xref: /linux/drivers/scsi/csiostor/csio_rnode.c (revision d0b73b488c55df905ea8faaad079f8535629ed26)
1 /*
2  * This file is part of the Chelsio FCoE driver for Linux.
3  *
4  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/string.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_els.h>
39 #include <scsi/fc/fc_fs.h>
40 
41 #include "csio_hw.h"
42 #include "csio_lnode.h"
43 #include "csio_rnode.h"
44 
45 static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
46 static void csio_rnode_exit(struct csio_rnode *);
47 
48 /* Static machine forward declarations */
49 static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
50 static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
51 static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
52 static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
53 
54 /* RNF event mapping */
55 static enum csio_rn_ev fwevt_to_rnevt[] = {
56 	CSIO_RNFE_NONE,		/* None */
57 	CSIO_RNFE_LOGGED_IN,	/* PLOGI_ACC_RCVD  */
58 	CSIO_RNFE_NONE,		/* PLOGI_RJT_RCVD  */
59 	CSIO_RNFE_PLOGI_RECV,	/* PLOGI_RCVD	   */
60 	CSIO_RNFE_LOGO_RECV,	/* PLOGO_RCVD	   */
61 	CSIO_RNFE_PRLI_DONE,	/* PRLI_ACC_RCVD   */
62 	CSIO_RNFE_NONE,		/* PRLI_RJT_RCVD   */
63 	CSIO_RNFE_PRLI_RECV,	/* PRLI_RCVD	   */
64 	CSIO_RNFE_PRLO_RECV,	/* PRLO_RCVD	   */
65 	CSIO_RNFE_NONE,		/* NPORT_ID_CHGD   */
66 	CSIO_RNFE_LOGO_RECV,	/* FLOGO_RCVD	   */
67 	CSIO_RNFE_NONE,		/* CLR_VIRT_LNK_RCVD */
68 	CSIO_RNFE_LOGGED_IN,	/* FLOGI_ACC_RCVD   */
69 	CSIO_RNFE_NONE,		/* FLOGI_RJT_RCVD   */
70 	CSIO_RNFE_LOGGED_IN,	/* FDISC_ACC_RCVD   */
71 	CSIO_RNFE_NONE,		/* FDISC_RJT_RCVD   */
72 	CSIO_RNFE_NONE,		/* FLOGI_TMO_MAX_RETRY */
73 	CSIO_RNFE_NONE,		/* IMPL_LOGO_ADISC_ACC */
74 	CSIO_RNFE_NONE,		/* IMPL_LOGO_ADISC_RJT */
75 	CSIO_RNFE_NONE,		/* IMPL_LOGO_ADISC_CNFLT */
76 	CSIO_RNFE_NONE,		/* PRLI_TMO		*/
77 	CSIO_RNFE_NONE,		/* ADISC_TMO		*/
78 	CSIO_RNFE_NAME_MISSING,	/* RSCN_DEV_LOST  */
79 	CSIO_RNFE_NONE,		/* SCR_ACC_RCVD	*/
80 	CSIO_RNFE_NONE,		/* ADISC_RJT_RCVD */
81 	CSIO_RNFE_NONE,		/* LOGO_SNT */
82 	CSIO_RNFE_LOGO_RECV,	/* PROTO_ERR_IMPL_LOGO */
83 };
84 
85 #define CSIO_FWE_TO_RNFE(_evt)	((_evt > PROTO_ERR_IMPL_LOGO) ?		\
86 						CSIO_RNFE_NONE :	\
87 						fwevt_to_rnevt[_evt])
88 int
89 csio_is_rnode_ready(struct csio_rnode *rn)
90 {
91 	return csio_match_state(rn, csio_rns_ready);
92 }
93 
94 static int
95 csio_is_rnode_uninit(struct csio_rnode *rn)
96 {
97 	return csio_match_state(rn, csio_rns_uninit);
98 }
99 
100 static int
101 csio_is_rnode_wka(uint8_t rport_type)
102 {
103 	if ((rport_type == FLOGI_VFPORT) ||
104 	    (rport_type == FDISC_VFPORT) ||
105 	    (rport_type == NS_VNPORT) ||
106 	    (rport_type == FDMI_VNPORT))
107 		return 1;
108 
109 	return 0;
110 }
111 
112 /*
113  * csio_rn_lookup - Finds the rnode with the given flowid
114  * @ln - lnode
115  * @flowid - flowid.
116  *
117  * Does the rnode lookup on the given lnode and flowid.If no matching entry
118  * found, NULL is returned.
119  */
120 static struct csio_rnode *
121 csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
122 {
123 	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
124 	struct list_head *tmp;
125 	struct csio_rnode *rn;
126 
127 	list_for_each(tmp, &rnhead->sm.sm_list) {
128 		rn = (struct csio_rnode *) tmp;
129 		if (rn->flowid == flowid)
130 			return rn;
131 	}
132 
133 	return NULL;
134 }
135 
136 /*
137  * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
138  * @ln: lnode
139  * @wwpn: wwpn
140  *
141  * Does the rnode lookup on the given lnode and wwpn. If no matching entry
142  * found, NULL is returned.
143  */
144 static struct csio_rnode *
145 csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
146 {
147 	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
148 	struct list_head *tmp;
149 	struct csio_rnode *rn;
150 
151 	list_for_each(tmp, &rnhead->sm.sm_list) {
152 		rn = (struct csio_rnode *) tmp;
153 		if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
154 			return rn;
155 	}
156 
157 	return NULL;
158 }
159 
160 /**
161  * csio_rnode_lookup_portid - Finds the rnode with the given portid
162  * @ln:		lnode
163  * @portid:	port id
164  *
165  * Lookup the rnode list for a given portid. If no matching entry
166  * found, NULL is returned.
167  */
168 struct csio_rnode *
169 csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
170 {
171 	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
172 	struct list_head *tmp;
173 	struct csio_rnode *rn;
174 
175 	list_for_each(tmp, &rnhead->sm.sm_list) {
176 		rn = (struct csio_rnode *) tmp;
177 		if (rn->nport_id == portid)
178 			return rn;
179 	}
180 
181 	return NULL;
182 }
183 
184 static int
185 csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
186 		    uint32_t *vnp_flowid)
187 {
188 	struct csio_rnode *rnhead;
189 	struct list_head *tmp, *tmp1;
190 	struct csio_rnode *rn;
191 	struct csio_lnode *ln_tmp;
192 	struct csio_hw *hw = csio_lnode_to_hw(ln);
193 
194 	list_for_each(tmp1, &hw->sln_head) {
195 		ln_tmp = (struct csio_lnode *) tmp1;
196 		if (ln_tmp == ln)
197 			continue;
198 
199 		rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
200 		list_for_each(tmp, &rnhead->sm.sm_list) {
201 
202 			rn = (struct csio_rnode *) tmp;
203 			if (csio_is_rnode_ready(rn)) {
204 				if (rn->flowid == rdev_flowid) {
205 					*vnp_flowid = csio_ln_flowid(ln_tmp);
206 					return 1;
207 				}
208 			}
209 		}
210 	}
211 
212 	return 0;
213 }
214 
215 static struct csio_rnode *
216 csio_alloc_rnode(struct csio_lnode *ln)
217 {
218 	struct csio_hw *hw = csio_lnode_to_hw(ln);
219 
220 	struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
221 	if (!rn)
222 		goto err;
223 
224 	memset(rn, 0, sizeof(struct csio_rnode));
225 	if (csio_rnode_init(rn, ln))
226 		goto err_free;
227 
228 	CSIO_INC_STATS(ln, n_rnode_alloc);
229 
230 	return rn;
231 
232 err_free:
233 	mempool_free(rn, hw->rnode_mempool);
234 err:
235 	CSIO_INC_STATS(ln, n_rnode_nomem);
236 	return NULL;
237 }
238 
239 static void
240 csio_free_rnode(struct csio_rnode *rn)
241 {
242 	struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
243 
244 	csio_rnode_exit(rn);
245 	CSIO_INC_STATS(rn->lnp, n_rnode_free);
246 	mempool_free(rn, hw->rnode_mempool);
247 }
248 
249 /*
250  * csio_get_rnode - Gets rnode with the given flowid
251  * @ln - lnode
252  * @flowid - flow id.
253  *
254  * Does the rnode lookup on the given lnode and flowid. If no matching
255  * rnode found, then new rnode with given npid is allocated and returned.
256  */
257 static struct csio_rnode *
258 csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
259 {
260 	struct csio_rnode *rn;
261 
262 	rn = csio_rn_lookup(ln, flowid);
263 	if (!rn) {
264 		rn = csio_alloc_rnode(ln);
265 		if (!rn)
266 			return NULL;
267 
268 		rn->flowid = flowid;
269 	}
270 
271 	return rn;
272 }
273 
274 /*
275  * csio_put_rnode - Frees the given rnode
276  * @ln - lnode
277  * @flowid - flow id.
278  *
279  * Does the rnode lookup on the given lnode and flowid. If no matching
280  * rnode found, then new rnode with given npid is allocated and returned.
281  */
282 void
283 csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
284 {
285 	CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
286 	csio_free_rnode(rn);
287 }
288 
289 /*
290  * csio_confirm_rnode - confirms rnode based on wwpn.
291  * @ln: lnode
292  * @rdev_flowid: remote device flowid
293  * @rdevp: remote device params
294  * This routines searches other rnode in list having same wwpn of new rnode.
295  * If there is a match, then matched rnode is returned and otherwise new rnode
296  * is returned.
297  * returns rnode.
298  */
299 struct csio_rnode *
300 csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
301 		   struct fcoe_rdev_entry *rdevp)
302 {
303 	uint8_t rport_type;
304 	struct csio_rnode *rn, *match_rn;
305 	uint32_t vnp_flowid;
306 	__be32 *port_id;
307 
308 	port_id = (__be32 *)&rdevp->r_id[0];
309 	rport_type =
310 		FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
311 
312 	/* Drop rdev event for cntrl port */
313 	if (rport_type == FAB_CTLR_VNPORT) {
314 		csio_ln_dbg(ln,
315 			    "Unhandled rport_type:%d recv in rdev evt "
316 			    "ssni:x%x\n", rport_type, rdev_flowid);
317 		return NULL;
318 	}
319 
320 	/* Lookup on flowid */
321 	rn = csio_rn_lookup(ln, rdev_flowid);
322 	if (!rn) {
323 
324 		/* Drop events with duplicate flowid */
325 		if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
326 			csio_ln_warn(ln,
327 				     "ssni:%x already active on vnpi:%x",
328 				     rdev_flowid, vnp_flowid);
329 			return NULL;
330 		}
331 
332 		/* Lookup on wwpn for NPORTs */
333 		rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
334 		if (!rn)
335 			goto alloc_rnode;
336 
337 	} else {
338 		/* Lookup well-known ports with nport id */
339 		if (csio_is_rnode_wka(rport_type)) {
340 			match_rn = csio_rnode_lookup_portid(ln,
341 				      ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
342 			if (match_rn == NULL) {
343 				csio_rn_flowid(rn) = CSIO_INVALID_IDX;
344 				goto alloc_rnode;
345 			}
346 
347 			/*
348 			 * Now compare the wwpn to confirm that
349 			 * same port relogged in. If so update the matched rn.
350 			 * Else, go ahead and alloc a new rnode.
351 			 */
352 			if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353 				if (csio_is_rnode_ready(rn)) {
354 					csio_ln_warn(ln,
355 						     "rnode is already"
356 						     "active ssni:x%x\n",
357 						     rdev_flowid);
358 					CSIO_ASSERT(0);
359 				}
360 				csio_rn_flowid(rn) = CSIO_INVALID_IDX;
361 				rn = match_rn;
362 
363 				/* Update rn */
364 				goto found_rnode;
365 			}
366 			csio_rn_flowid(rn) = CSIO_INVALID_IDX;
367 			goto alloc_rnode;
368 		}
369 
370 		/* wwpn match */
371 		if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
372 			goto found_rnode;
373 
374 		/* Search for rnode that have same wwpn */
375 		match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
376 		if (match_rn != NULL) {
377 			csio_ln_dbg(ln,
378 				"ssni:x%x changed for rport name(wwpn):%llx "
379 				"did:x%x\n", rdev_flowid,
380 				wwn_to_u64(rdevp->wwpn),
381 				match_rn->nport_id);
382 			csio_rn_flowid(rn) = CSIO_INVALID_IDX;
383 			rn = match_rn;
384 		} else {
385 			csio_ln_dbg(ln,
386 				"rnode wwpn mismatch found ssni:x%x "
387 				"name(wwpn):%llx\n",
388 				rdev_flowid,
389 				wwn_to_u64(csio_rn_wwpn(rn)));
390 			if (csio_is_rnode_ready(rn)) {
391 				csio_ln_warn(ln,
392 					     "rnode is already active "
393 					     "wwpn:%llx ssni:x%x\n",
394 					     wwn_to_u64(csio_rn_wwpn(rn)),
395 					     rdev_flowid);
396 				CSIO_ASSERT(0);
397 			}
398 			csio_rn_flowid(rn) = CSIO_INVALID_IDX;
399 			goto alloc_rnode;
400 		}
401 	}
402 
403 found_rnode:
404 	csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
405 		rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
406 
407 	/* Update flowid */
408 	csio_rn_flowid(rn) = rdev_flowid;
409 
410 	/* update rdev entry */
411 	rn->rdev_entry = rdevp;
412 	CSIO_INC_STATS(ln, n_rnode_match);
413 	return rn;
414 
415 alloc_rnode:
416 	rn = csio_get_rnode(ln, rdev_flowid);
417 	if (!rn)
418 		return NULL;
419 
420 	csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
421 		rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
422 
423 	/* update rdev entry */
424 	rn->rdev_entry = rdevp;
425 	return rn;
426 }
427 
428 /*
429  * csio_rn_verify_rparams - verify rparams.
430  * @ln: lnode
431  * @rn: rnode
432  * @rdevp: remote device params
433  * returns success if rparams are verified.
434  */
435 static int
436 csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
437 			struct fcoe_rdev_entry *rdevp)
438 {
439 	uint8_t null[8];
440 	uint8_t rport_type;
441 	uint8_t fc_class;
442 	__be32 *did;
443 
444 	did = (__be32 *) &rdevp->r_id[0];
445 	rport_type =
446 		FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
447 	switch (rport_type) {
448 	case FLOGI_VFPORT:
449 		rn->role = CSIO_RNFR_FABRIC;
450 		if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
451 			csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
452 				csio_rn_flowid(rn));
453 			return -EINVAL;
454 		}
455 		/* NPIV support */
456 		if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
457 			ln->flags |= CSIO_LNF_NPIVSUPP;
458 
459 		break;
460 
461 	case NS_VNPORT:
462 		rn->role = CSIO_RNFR_NS;
463 		if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
464 			csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
465 				csio_rn_flowid(rn));
466 			return -EINVAL;
467 		}
468 		break;
469 
470 	case REG_FC4_VNPORT:
471 	case REG_VNPORT:
472 		rn->role = CSIO_RNFR_NPORT;
473 		if (rdevp->event_cause == PRLI_ACC_RCVD ||
474 			rdevp->event_cause == PRLI_RCVD) {
475 			if (FW_RDEV_WR_TASK_RETRY_ID_GET(
476 							rdevp->enh_disc_to_tgt))
477 				rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
478 
479 			if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
480 				rn->fcp_flags |= FCP_SPPF_RETRY;
481 
482 			if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
483 				rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
484 
485 			if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
486 				rn->role |= CSIO_RNFR_TARGET;
487 
488 			if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
489 				rn->role |= CSIO_RNFR_INITIATOR;
490 		}
491 
492 		break;
493 
494 	case FDMI_VNPORT:
495 	case FAB_CTLR_VNPORT:
496 		rn->role = 0;
497 		break;
498 
499 	default:
500 		csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
501 			csio_rn_flowid(rn), rport_type);
502 		return -EINVAL;
503 	}
504 
505 	/* validate wwpn/wwnn for Name server/remote port */
506 	if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
507 		memset(null, 0, 8);
508 		if (!memcmp(rdevp->wwnn, null, 8)) {
509 			csio_ln_err(ln,
510 				    "ssni:x%x invalid wwnn received from"
511 				    " rport did:x%x\n",
512 				    csio_rn_flowid(rn),
513 				    (ntohl(*did) & CSIO_DID_MASK));
514 			return -EINVAL;
515 		}
516 
517 		if (!memcmp(rdevp->wwpn, null, 8)) {
518 			csio_ln_err(ln,
519 				    "ssni:x%x invalid wwpn received from"
520 				    " rport did:x%x\n",
521 				    csio_rn_flowid(rn),
522 				    (ntohl(*did) & CSIO_DID_MASK));
523 			return -EINVAL;
524 		}
525 
526 	}
527 
528 	/* Copy wwnn, wwpn and nport id */
529 	rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
530 	memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
531 	memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
532 	rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
533 	fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
534 	rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
535 
536 	return 0;
537 }
538 
539 static void
540 __csio_reg_rnode(struct csio_rnode *rn)
541 {
542 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
543 	struct csio_hw *hw = csio_lnode_to_hw(ln);
544 
545 	spin_unlock_irq(&hw->lock);
546 	csio_reg_rnode(rn);
547 	spin_lock_irq(&hw->lock);
548 
549 	if (rn->role & CSIO_RNFR_TARGET)
550 		ln->n_scsi_tgts++;
551 
552 	if (rn->nport_id == FC_FID_MGMT_SERV)
553 		csio_ln_fdmi_start(ln, (void *) rn);
554 }
555 
556 static void
557 __csio_unreg_rnode(struct csio_rnode *rn)
558 {
559 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
560 	struct csio_hw *hw = csio_lnode_to_hw(ln);
561 	LIST_HEAD(tmp_q);
562 	int cmpl = 0;
563 
564 	if (!list_empty(&rn->host_cmpl_q)) {
565 		csio_dbg(hw, "Returning completion queue I/Os\n");
566 		list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
567 		cmpl = 1;
568 	}
569 
570 	if (rn->role & CSIO_RNFR_TARGET) {
571 		ln->n_scsi_tgts--;
572 		ln->last_scan_ntgts--;
573 	}
574 
575 	spin_unlock_irq(&hw->lock);
576 	csio_unreg_rnode(rn);
577 	spin_lock_irq(&hw->lock);
578 
579 	/* Cleanup I/Os that were waiting for rnode to unregister */
580 	if (cmpl)
581 		csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
582 
583 }
584 
585 /*****************************************************************************/
586 /* START: Rnode SM                                                           */
587 /*****************************************************************************/
588 
589 /*
590  * csio_rns_uninit -
591  * @rn - rnode
592  * @evt - SM event.
593  *
594  */
595 static void
596 csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
597 {
598 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
599 	int ret = 0;
600 
601 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
602 
603 	switch (evt) {
604 	case CSIO_RNFE_LOGGED_IN:
605 	case CSIO_RNFE_PLOGI_RECV:
606 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
607 		if (!ret) {
608 			csio_set_state(&rn->sm, csio_rns_ready);
609 			__csio_reg_rnode(rn);
610 		} else {
611 			CSIO_INC_STATS(rn, n_err_inval);
612 		}
613 		break;
614 	case CSIO_RNFE_LOGO_RECV:
615 		csio_ln_dbg(ln,
616 			    "ssni:x%x Ignoring event %d recv "
617 			    "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
618 		CSIO_INC_STATS(rn, n_evt_drop);
619 		break;
620 	default:
621 		csio_ln_dbg(ln,
622 			    "ssni:x%x unexp event %d recv "
623 			    "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
624 		CSIO_INC_STATS(rn, n_evt_unexp);
625 		break;
626 	}
627 }
628 
629 /*
630  * csio_rns_ready -
631  * @rn - rnode
632  * @evt - SM event.
633  *
634  */
635 static void
636 csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
637 {
638 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
639 	int ret = 0;
640 
641 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
642 
643 	switch (evt) {
644 	case CSIO_RNFE_LOGGED_IN:
645 	case CSIO_RNFE_PLOGI_RECV:
646 		csio_ln_dbg(ln,
647 			"ssni:x%x Ignoring event %d recv from did:x%x "
648 			"in rn state[ready]\n", csio_rn_flowid(rn), evt,
649 			rn->nport_id);
650 		CSIO_INC_STATS(rn, n_evt_drop);
651 		break;
652 
653 	case CSIO_RNFE_PRLI_DONE:
654 	case CSIO_RNFE_PRLI_RECV:
655 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
656 		if (!ret)
657 			__csio_reg_rnode(rn);
658 		else
659 			CSIO_INC_STATS(rn, n_err_inval);
660 
661 		break;
662 	case CSIO_RNFE_DOWN:
663 		csio_set_state(&rn->sm, csio_rns_offline);
664 		__csio_unreg_rnode(rn);
665 
666 		/* FW expected to internally aborted outstanding SCSI WRs
667 		 * and return all SCSI WRs to host with status "ABORTED".
668 		 */
669 		break;
670 
671 	case CSIO_RNFE_LOGO_RECV:
672 		csio_set_state(&rn->sm, csio_rns_offline);
673 
674 		__csio_unreg_rnode(rn);
675 
676 		/* FW expected to internally aborted outstanding SCSI WRs
677 		 * and return all SCSI WRs to host with status "ABORTED".
678 		 */
679 		break;
680 
681 	case CSIO_RNFE_CLOSE:
682 		/*
683 		 * Each rnode receives CLOSE event when driver is removed or
684 		 * device is reset
685 		 * Note: All outstanding IOs on remote port need to returned
686 		 * to uppper layer with appropriate error before sending
687 		 * CLOSE event
688 		 */
689 		csio_set_state(&rn->sm, csio_rns_uninit);
690 		__csio_unreg_rnode(rn);
691 		break;
692 
693 	case CSIO_RNFE_NAME_MISSING:
694 		csio_set_state(&rn->sm, csio_rns_disappeared);
695 		__csio_unreg_rnode(rn);
696 
697 		/*
698 		 * FW expected to internally aborted outstanding SCSI WRs
699 		 * and return all SCSI WRs to host with status "ABORTED".
700 		 */
701 
702 		break;
703 
704 	default:
705 		csio_ln_dbg(ln,
706 			"ssni:x%x unexp event %d recv from did:x%x "
707 			"in rn state[uninit]\n", csio_rn_flowid(rn), evt,
708 			rn->nport_id);
709 		CSIO_INC_STATS(rn, n_evt_unexp);
710 		break;
711 	}
712 }
713 
714 /*
715  * csio_rns_offline -
716  * @rn - rnode
717  * @evt - SM event.
718  *
719  */
720 static void
721 csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
722 {
723 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
724 	int ret = 0;
725 
726 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
727 
728 	switch (evt) {
729 	case CSIO_RNFE_LOGGED_IN:
730 	case CSIO_RNFE_PLOGI_RECV:
731 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
732 		if (!ret) {
733 			csio_set_state(&rn->sm, csio_rns_ready);
734 			__csio_reg_rnode(rn);
735 		} else {
736 			CSIO_INC_STATS(rn, n_err_inval);
737 			csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
738 		}
739 		break;
740 
741 	case CSIO_RNFE_DOWN:
742 		csio_ln_dbg(ln,
743 			"ssni:x%x Ignoring event %d recv from did:x%x "
744 			"in rn state[offline]\n", csio_rn_flowid(rn), evt,
745 			rn->nport_id);
746 		CSIO_INC_STATS(rn, n_evt_drop);
747 		break;
748 
749 	case CSIO_RNFE_CLOSE:
750 		/* Each rnode receives CLOSE event when driver is removed or
751 		 * device is reset
752 		 * Note: All outstanding IOs on remote port need to returned
753 		 * to uppper layer with appropriate error before sending
754 		 * CLOSE event
755 		 */
756 		csio_set_state(&rn->sm, csio_rns_uninit);
757 		break;
758 
759 	case CSIO_RNFE_NAME_MISSING:
760 		csio_set_state(&rn->sm, csio_rns_disappeared);
761 		break;
762 
763 	default:
764 		csio_ln_dbg(ln,
765 			"ssni:x%x unexp event %d recv from did:x%x "
766 			"in rn state[offline]\n", csio_rn_flowid(rn), evt,
767 			rn->nport_id);
768 		CSIO_INC_STATS(rn, n_evt_unexp);
769 		break;
770 	}
771 }
772 
773 /*
774  * csio_rns_disappeared -
775  * @rn - rnode
776  * @evt - SM event.
777  *
778  */
779 static void
780 csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
781 {
782 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
783 	int ret = 0;
784 
785 	CSIO_INC_STATS(rn, n_evt_sm[evt]);
786 
787 	switch (evt) {
788 	case CSIO_RNFE_LOGGED_IN:
789 	case CSIO_RNFE_PLOGI_RECV:
790 		ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
791 		if (!ret) {
792 			csio_set_state(&rn->sm, csio_rns_ready);
793 			__csio_reg_rnode(rn);
794 		} else {
795 			CSIO_INC_STATS(rn, n_err_inval);
796 			csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
797 		}
798 		break;
799 
800 	case CSIO_RNFE_CLOSE:
801 		/* Each rnode receives CLOSE event when driver is removed or
802 		 * device is reset.
803 		 * Note: All outstanding IOs on remote port need to returned
804 		 * to uppper layer with appropriate error before sending
805 		 * CLOSE event
806 		 */
807 		csio_set_state(&rn->sm, csio_rns_uninit);
808 		break;
809 
810 	case CSIO_RNFE_DOWN:
811 	case CSIO_RNFE_NAME_MISSING:
812 		csio_ln_dbg(ln,
813 			"ssni:x%x Ignoring event %d recv from did x%x"
814 			"in rn state[disappeared]\n", csio_rn_flowid(rn),
815 			evt, rn->nport_id);
816 		break;
817 
818 	default:
819 		csio_ln_dbg(ln,
820 			"ssni:x%x unexp event %d recv from did x%x"
821 			"in rn state[disappeared]\n", csio_rn_flowid(rn),
822 			evt, rn->nport_id);
823 		CSIO_INC_STATS(rn, n_evt_unexp);
824 		break;
825 	}
826 }
827 
828 /*****************************************************************************/
829 /* END: Rnode SM                                                             */
830 /*****************************************************************************/
831 
832 /*
833  * csio_rnode_devloss_handler - Device loss event handler
834  * @rn: rnode
835  *
836  * Post event to close rnode SM and free rnode.
837  */
838 void
839 csio_rnode_devloss_handler(struct csio_rnode *rn)
840 {
841 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
842 
843 	/* ignore if same rnode came back as online */
844 	if (csio_is_rnode_ready(rn))
845 		return;
846 
847 	csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
848 
849 	/* Free rn if in uninit state */
850 	if (csio_is_rnode_uninit(rn))
851 		csio_put_rnode(ln, rn);
852 }
853 
854 /**
855  * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
856  * @rn:		rnode
857  *
858  */
859 void
860 csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
861 {
862 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
863 	enum csio_rn_ev evt;
864 
865 	evt = CSIO_FWE_TO_RNFE(fwevt);
866 	if (!evt) {
867 		csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
868 			    csio_rn_flowid(rn), fwevt);
869 		CSIO_INC_STATS(rn, n_evt_unexp);
870 		return;
871 	}
872 	CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
873 
874 	/* Track previous & current events for debugging */
875 	rn->prev_evt = rn->cur_evt;
876 	rn->cur_evt = fwevt;
877 
878 	/* Post event to rnode SM */
879 	csio_post_event(&rn->sm, evt);
880 
881 	/* Free rn if in uninit state */
882 	if (csio_is_rnode_uninit(rn))
883 		csio_put_rnode(ln, rn);
884 }
885 
886 /*
887  * csio_rnode_init - Initialize rnode.
888  * @rn: RNode
889  * @ln: Associated lnode
890  *
891  * Caller is responsible for holding the lock. The lock is required
892  * to be held for inserting the rnode in ln->rnhead list.
893  */
894 static int
895 csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
896 {
897 	csio_rnode_to_lnode(rn) = ln;
898 	csio_init_state(&rn->sm, csio_rns_uninit);
899 	INIT_LIST_HEAD(&rn->host_cmpl_q);
900 	csio_rn_flowid(rn) = CSIO_INVALID_IDX;
901 
902 	/* Add rnode to list of lnodes->rnhead */
903 	list_add_tail(&rn->sm.sm_list, &ln->rnhead);
904 
905 	return 0;
906 }
907 
908 static void
909 csio_rnode_exit(struct csio_rnode *rn)
910 {
911 	list_del_init(&rn->sm.sm_list);
912 	CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
913 }
914