xref: /linux/drivers/scsi/csiostor/csio_attr.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  * This file is part of the Chelsio FCoE driver for Linux.
3  *
4  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/delay.h>
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/mm.h>
42 #include <linux/jiffies.h>
43 #include <scsi/fc/fc_fs.h>
44 
45 #include "csio_init.h"
46 
47 static void
48 csio_vport_set_state(struct csio_lnode *ln);
49 
50 /*
51  * csio_reg_rnode - Register a remote port with FC transport.
52  * @rn: Rnode representing remote port.
53  *
54  * Call fc_remote_port_add() to register this remote port with FC transport.
55  * If remote port is Initiator OR Target OR both, change the role appropriately.
56  *
57  */
58 void
59 csio_reg_rnode(struct csio_rnode *rn)
60 {
61 	struct csio_lnode *ln		= csio_rnode_to_lnode(rn);
62 	struct Scsi_Host *shost		= csio_ln_to_shost(ln);
63 	struct fc_rport_identifiers ids;
64 	struct fc_rport  *rport;
65 	struct csio_service_parms *sp;
66 
67 	ids.node_name	= wwn_to_u64(csio_rn_wwnn(rn));
68 	ids.port_name	= wwn_to_u64(csio_rn_wwpn(rn));
69 	ids.port_id	= rn->nport_id;
70 	ids.roles	= FC_RPORT_ROLE_UNKNOWN;
71 
72 	if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
73 		rport = rn->rport;
74 		CSIO_ASSERT(rport != NULL);
75 		goto update_role;
76 	}
77 
78 	rn->rport = fc_remote_port_add(shost, 0, &ids);
79 	if (!rn->rport) {
80 		csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
81 					rn->nport_id);
82 		return;
83 	}
84 
85 	ln->num_reg_rnodes++;
86 	rport = rn->rport;
87 	spin_lock_irq(shost->host_lock);
88 	*((struct csio_rnode **)rport->dd_data) = rn;
89 	spin_unlock_irq(shost->host_lock);
90 
91 	sp = &rn->rn_sparm;
92 	rport->maxframe_size = ntohs(sp->csp.sp_bb_data);
93 	if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
94 		rport->supported_classes = FC_COS_CLASS3;
95 	else
96 		rport->supported_classes = FC_COS_UNSPECIFIED;
97 update_role:
98 	if (rn->role & CSIO_RNFR_INITIATOR)
99 		ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
100 	if (rn->role & CSIO_RNFR_TARGET)
101 		ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
102 
103 	if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
104 		fc_remote_port_rolechg(rport, ids.roles);
105 
106 	rn->scsi_id = rport->scsi_target_id;
107 
108 	csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
109 		rn->nport_id, ids.roles);
110 }
111 
112 /*
113  * csio_unreg_rnode - Unregister a remote port with FC transport.
114  * @rn: Rnode representing remote port.
115  *
116  * Call fc_remote_port_delete() to unregister this remote port with FC
117  * transport.
118  *
119  */
120 void
121 csio_unreg_rnode(struct csio_rnode *rn)
122 {
123 	struct csio_lnode *ln = csio_rnode_to_lnode(rn);
124 	struct fc_rport *rport = rn->rport;
125 
126 	rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
127 	fc_remote_port_delete(rport);
128 	ln->num_reg_rnodes--;
129 
130 	csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
131 }
132 
133 /*
134  * csio_lnode_async_event - Async events from local port.
135  * @ln: lnode representing local port.
136  *
137  * Async events from local node that FC transport/SCSI ML
138  * should be made aware of (Eg: RSCN).
139  */
140 void
141 csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
142 {
143 	switch (fc_evt) {
144 	case CSIO_LN_FC_RSCN:
145 		/* Get payload of rscn from ln */
146 		/* For each RSCN entry */
147 			/*
148 			 * fc_host_post_event(shost,
149 			 *		      fc_get_event_number(),
150 			 *		      FCH_EVT_RSCN,
151 			 *		      rscn_entry);
152 			 */
153 		break;
154 	case CSIO_LN_FC_LINKUP:
155 		/* send fc_host_post_event */
156 		/* set vport state */
157 		if (csio_is_npiv_ln(ln))
158 			csio_vport_set_state(ln);
159 
160 		break;
161 	case CSIO_LN_FC_LINKDOWN:
162 		/* send fc_host_post_event */
163 		/* set vport state */
164 		if (csio_is_npiv_ln(ln))
165 			csio_vport_set_state(ln);
166 
167 		break;
168 	case CSIO_LN_FC_ATTRIB_UPDATE:
169 		csio_fchost_attr_init(ln);
170 		break;
171 	default:
172 		break;
173 	}
174 }
175 
176 /*
177  * csio_fchost_attr_init - Initialize FC transport attributes
178  * @ln: Lnode.
179  *
180  */
181 void
182 csio_fchost_attr_init(struct csio_lnode *ln)
183 {
184 	struct Scsi_Host  *shost = csio_ln_to_shost(ln);
185 
186 	fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
187 	fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
188 
189 	fc_host_supported_classes(shost) = FC_COS_CLASS3;
190 	fc_host_max_npiv_vports(shost) =
191 			(csio_lnode_to_hw(ln))->fres_info.max_vnps;
192 	fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
193 		FC_PORTSPEED_1GBIT;
194 
195 	fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data);
196 	memset(fc_host_supported_fc4s(shost), 0,
197 		sizeof(fc_host_supported_fc4s(shost)));
198 	fc_host_supported_fc4s(shost)[7] = 1;
199 
200 	memset(fc_host_active_fc4s(shost), 0,
201 		sizeof(fc_host_active_fc4s(shost)));
202 	fc_host_active_fc4s(shost)[7] = 1;
203 }
204 
205 /*
206  * csio_get_host_port_id - sysfs entries for nport_id is
207  * populated/cached from this function
208  */
209 static void
210 csio_get_host_port_id(struct Scsi_Host *shost)
211 {
212 	struct csio_lnode *ln	= shost_priv(shost);
213 	struct csio_hw *hw = csio_lnode_to_hw(ln);
214 
215 	spin_lock_irq(&hw->lock);
216 	fc_host_port_id(shost) = ln->nport_id;
217 	spin_unlock_irq(&hw->lock);
218 }
219 
220 /*
221  * csio_get_port_type - Return FC local port type.
222  * @shost: scsi host.
223  *
224  */
225 static void
226 csio_get_host_port_type(struct Scsi_Host *shost)
227 {
228 	struct csio_lnode *ln = shost_priv(shost);
229 	struct csio_hw *hw = csio_lnode_to_hw(ln);
230 
231 	spin_lock_irq(&hw->lock);
232 	if (csio_is_npiv_ln(ln))
233 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
234 	else
235 		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
236 	spin_unlock_irq(&hw->lock);
237 }
238 
239 /*
240  * csio_get_port_state - Return FC local port state.
241  * @shost: scsi host.
242  *
243  */
244 static void
245 csio_get_host_port_state(struct Scsi_Host *shost)
246 {
247 	struct csio_lnode *ln = shost_priv(shost);
248 	struct csio_hw *hw = csio_lnode_to_hw(ln);
249 	char state[16];
250 
251 	spin_lock_irq(&hw->lock);
252 
253 	csio_lnode_state_to_str(ln, state);
254 	if (!strcmp(state, "READY"))
255 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
256 	else if (!strcmp(state, "OFFLINE"))
257 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
258 	else
259 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
260 
261 	spin_unlock_irq(&hw->lock);
262 }
263 
264 /*
265  * csio_get_host_speed - Return link speed to FC transport.
266  * @shost: scsi host.
267  *
268  */
269 static void
270 csio_get_host_speed(struct Scsi_Host *shost)
271 {
272 	struct csio_lnode *ln = shost_priv(shost);
273 	struct csio_hw *hw = csio_lnode_to_hw(ln);
274 
275 	spin_lock_irq(&hw->lock);
276 	switch (hw->pport[ln->portid].link_speed) {
277 	case FW_PORT_CAP32_SPEED_1G:
278 		fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
279 		break;
280 	case FW_PORT_CAP32_SPEED_10G:
281 		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
282 		break;
283 	case FW_PORT_CAP32_SPEED_25G:
284 		fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
285 		break;
286 	case FW_PORT_CAP32_SPEED_40G:
287 		fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
288 		break;
289 	case FW_PORT_CAP32_SPEED_50G:
290 		fc_host_speed(shost) = FC_PORTSPEED_50GBIT;
291 		break;
292 	case FW_PORT_CAP32_SPEED_100G:
293 		fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
294 		break;
295 	default:
296 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
297 		break;
298 	}
299 	spin_unlock_irq(&hw->lock);
300 }
301 
302 /*
303  * csio_get_host_fabric_name - Return fabric name
304  * @shost: scsi host.
305  *
306  */
307 static void
308 csio_get_host_fabric_name(struct Scsi_Host *shost)
309 {
310 	struct csio_lnode *ln = shost_priv(shost);
311 	struct csio_rnode *rn = NULL;
312 	struct csio_hw *hw = csio_lnode_to_hw(ln);
313 
314 	spin_lock_irq(&hw->lock);
315 	rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
316 	if (rn)
317 		fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
318 	else
319 		fc_host_fabric_name(shost) = 0;
320 	spin_unlock_irq(&hw->lock);
321 }
322 
323 /*
324  * csio_get_host_speed - Return FC transport statistics.
325  * @ln: Lnode.
326  *
327  */
328 static struct fc_host_statistics *
329 csio_get_stats(struct Scsi_Host *shost)
330 {
331 	struct csio_lnode *ln = shost_priv(shost);
332 	struct csio_hw *hw = csio_lnode_to_hw(ln);
333 	struct fc_host_statistics *fhs = &ln->fch_stats;
334 	struct fw_fcoe_port_stats fcoe_port_stats;
335 	uint64_t seconds;
336 
337 	memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
338 	csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
339 
340 	fhs->tx_frames  += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) +
341 			    be64_to_cpu(fcoe_port_stats.tx_mcast_frames) +
342 			    be64_to_cpu(fcoe_port_stats.tx_ucast_frames) +
343 			    be64_to_cpu(fcoe_port_stats.tx_offload_frames));
344 	fhs->tx_words  += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) +
345 			   be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) +
346 			   be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) +
347 			   be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) /
348 							CSIO_WORD_TO_BYTE;
349 	fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) +
350 			   be64_to_cpu(fcoe_port_stats.rx_mcast_frames) +
351 			   be64_to_cpu(fcoe_port_stats.rx_ucast_frames));
352 	fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) +
353 			  be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) +
354 			  be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) /
355 							CSIO_WORD_TO_BYTE;
356 	fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames);
357 	fhs->fcp_input_requests +=  ln->stats.n_input_requests;
358 	fhs->fcp_output_requests +=  ln->stats.n_output_requests;
359 	fhs->fcp_control_requests +=  ln->stats.n_control_requests;
360 	fhs->fcp_input_megabytes +=  ln->stats.n_input_bytes >> 20;
361 	fhs->fcp_output_megabytes +=  ln->stats.n_output_bytes >> 20;
362 	fhs->link_failure_count = ln->stats.n_link_down;
363 	/* Reset stats for the device */
364 	seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
365 	do_div(seconds, 1000);
366 	fhs->seconds_since_last_reset = seconds;
367 
368 	return fhs;
369 }
370 
371 /*
372  * csio_set_rport_loss_tmo - Set the rport dev loss timeout
373  * @rport: fc rport.
374  * @timeout: new value for dev loss tmo.
375  *
376  * If timeout is non zero set the dev_loss_tmo to timeout, else set
377  * dev_loss_tmo to one.
378  */
379 static void
380 csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
381 {
382 	if (timeout)
383 		rport->dev_loss_tmo = timeout;
384 	else
385 		rport->dev_loss_tmo = 1;
386 }
387 
388 static void
389 csio_vport_set_state(struct csio_lnode *ln)
390 {
391 	struct fc_vport *fc_vport = ln->fc_vport;
392 	struct csio_lnode  *pln = ln->pln;
393 	char state[16];
394 
395 	/* Set fc vport state based on phyiscal lnode */
396 	csio_lnode_state_to_str(pln, state);
397 	if (strcmp(state, "READY")) {
398 		fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
399 		return;
400 	}
401 
402 	if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
403 		fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
404 		return;
405 	}
406 
407 	/* Set fc vport state based on virtual lnode */
408 	csio_lnode_state_to_str(ln, state);
409 	if (strcmp(state, "READY")) {
410 		fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
411 		return;
412 	}
413 	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
414 }
415 
416 static int
417 csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
418 {
419 	struct csio_lnode *pln;
420 	struct csio_mb  *mbp;
421 	struct fw_fcoe_vnp_cmd *rsp;
422 	int ret = 0;
423 	int retry = 0;
424 
425 	/* Issue VNP cmd to alloc vport */
426 	/* Allocate Mbox request */
427 	spin_lock_irq(&hw->lock);
428 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
429 	if (!mbp) {
430 		CSIO_INC_STATS(hw, n_err_nomem);
431 		ret = -ENOMEM;
432 		goto out;
433 	}
434 
435 	pln = ln->pln;
436 	ln->fcf_flowid = pln->fcf_flowid;
437 	ln->portid = pln->portid;
438 
439 	csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
440 				    pln->fcf_flowid, pln->vnp_flowid, 0,
441 				    csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
442 
443 	for (retry = 0; retry < 3; retry++) {
444 		/* FW is expected to complete vnp cmd in immediate mode
445 		 * without much delay.
446 		 * Otherwise, there will be increase in IO latency since HW
447 		 * lock is held till completion of vnp mbox cmd.
448 		 */
449 		ret = csio_mb_issue(hw, mbp);
450 		if (ret != -EBUSY)
451 			break;
452 
453 		/* Retry if mbox returns busy */
454 		spin_unlock_irq(&hw->lock);
455 		msleep(2000);
456 		spin_lock_irq(&hw->lock);
457 	}
458 
459 	if (ret) {
460 		csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
461 		goto out_free;
462 	}
463 
464 	/* Process Mbox response of VNP command */
465 	rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
466 	if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
467 		csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
468 			    FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
469 		ret = -EINVAL;
470 		goto out_free;
471 	}
472 
473 	ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
474 				ntohl(rsp->gen_wwn_to_vnpi));
475 	memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
476 	memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
477 
478 	csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
479 	csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
480 		    ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
481 		    ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
482 		    ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
483 		    ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
484 	csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
485 		    ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
486 		    ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
487 		    ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
488 		    ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
489 
490 out_free:
491 	mempool_free(mbp, hw->mb_mempool);
492 out:
493 	spin_unlock_irq(&hw->lock);
494 	return ret;
495 }
496 
497 static int
498 csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
499 {
500 	struct csio_lnode *pln;
501 	struct csio_mb  *mbp;
502 	struct fw_fcoe_vnp_cmd *rsp;
503 	int ret = 0;
504 	int retry = 0;
505 
506 	/* Issue VNP cmd to free vport */
507 	/* Allocate Mbox request */
508 
509 	spin_lock_irq(&hw->lock);
510 	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
511 	if (!mbp) {
512 		CSIO_INC_STATS(hw, n_err_nomem);
513 		ret = -ENOMEM;
514 		goto out;
515 	}
516 
517 	pln = ln->pln;
518 
519 	csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
520 				   ln->fcf_flowid, ln->vnp_flowid,
521 				   NULL);
522 
523 	for (retry = 0; retry < 3; retry++) {
524 		ret = csio_mb_issue(hw, mbp);
525 		if (ret != -EBUSY)
526 			break;
527 
528 		/* Retry if mbox returns busy */
529 		spin_unlock_irq(&hw->lock);
530 		msleep(2000);
531 		spin_lock_irq(&hw->lock);
532 	}
533 
534 	if (ret) {
535 		csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
536 		goto out_free;
537 	}
538 
539 	/* Process Mbox response of VNP command */
540 	rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
541 	if (FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
542 		csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
543 			    FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)));
544 		ret = -EINVAL;
545 	}
546 
547 out_free:
548 	mempool_free(mbp, hw->mb_mempool);
549 out:
550 	spin_unlock_irq(&hw->lock);
551 	return ret;
552 }
553 
554 static int
555 csio_vport_create(struct fc_vport *fc_vport, bool disable)
556 {
557 	struct Scsi_Host *shost = fc_vport->shost;
558 	struct csio_lnode *pln = shost_priv(shost);
559 	struct csio_lnode *ln = NULL;
560 	struct csio_hw *hw = csio_lnode_to_hw(pln);
561 	uint8_t wwn[8];
562 	int ret = -1;
563 
564 	ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
565 	if (!ln)
566 		goto error;
567 
568 	if (fc_vport->node_name != 0) {
569 		u64_to_wwn(fc_vport->node_name, wwn);
570 
571 		if (!CSIO_VALID_WWN(wwn)) {
572 			csio_ln_err(ln,
573 				    "vport create failed. Invalid wwnn\n");
574 			goto error;
575 		}
576 		memcpy(csio_ln_wwnn(ln), wwn, 8);
577 	}
578 
579 	if (fc_vport->port_name != 0) {
580 		u64_to_wwn(fc_vport->port_name, wwn);
581 
582 		if (!CSIO_VALID_WWN(wwn)) {
583 			csio_ln_err(ln,
584 				    "vport create failed. Invalid wwpn\n");
585 			goto error;
586 		}
587 
588 		if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
589 			csio_ln_err(ln,
590 			    "vport create failed. wwpn already exists\n");
591 			goto error;
592 		}
593 		memcpy(csio_ln_wwpn(ln), wwn, 8);
594 	}
595 
596 	fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
597 
598 	if (csio_fcoe_alloc_vnp(hw, ln))
599 		goto error;
600 
601 	*(struct csio_lnode **)fc_vport->dd_data = ln;
602 	ln->fc_vport = fc_vport;
603 	if (!fc_vport->node_name)
604 		fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
605 	if (!fc_vport->port_name)
606 		fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
607 	csio_fchost_attr_init(ln);
608 	return 0;
609 error:
610 	if (ln)
611 		csio_shost_exit(ln);
612 
613 	return ret;
614 }
615 
616 static int
617 csio_vport_delete(struct fc_vport *fc_vport)
618 {
619 	struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
620 	struct Scsi_Host *shost = csio_ln_to_shost(ln);
621 	struct csio_hw *hw = csio_lnode_to_hw(ln);
622 	int rmv;
623 
624 	spin_lock_irq(&hw->lock);
625 	rmv = csio_is_hw_removing(hw);
626 	spin_unlock_irq(&hw->lock);
627 
628 	if (rmv) {
629 		csio_shost_exit(ln);
630 		return 0;
631 	}
632 
633 	/* Quiesce ios and send remove event to lnode */
634 	scsi_block_requests(shost);
635 	spin_lock_irq(&hw->lock);
636 	csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
637 	csio_lnode_close(ln);
638 	spin_unlock_irq(&hw->lock);
639 	scsi_unblock_requests(shost);
640 
641 	/* Free vnp */
642 	if (fc_vport->vport_state !=  FC_VPORT_DISABLED)
643 		csio_fcoe_free_vnp(hw, ln);
644 
645 	csio_shost_exit(ln);
646 	return 0;
647 }
648 
649 static int
650 csio_vport_disable(struct fc_vport *fc_vport, bool disable)
651 {
652 	struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
653 	struct Scsi_Host *shost = csio_ln_to_shost(ln);
654 	struct csio_hw *hw = csio_lnode_to_hw(ln);
655 
656 	/* disable vport */
657 	if (disable) {
658 		/* Quiesce ios and send stop event to lnode */
659 		scsi_block_requests(shost);
660 		spin_lock_irq(&hw->lock);
661 		csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
662 		csio_lnode_stop(ln);
663 		spin_unlock_irq(&hw->lock);
664 		scsi_unblock_requests(shost);
665 
666 		/* Free vnp */
667 		csio_fcoe_free_vnp(hw, ln);
668 		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
669 		csio_ln_err(ln, "vport disabled\n");
670 		return 0;
671 	} else {
672 		/* enable vport */
673 		fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
674 		if (csio_fcoe_alloc_vnp(hw, ln)) {
675 			csio_ln_err(ln, "vport enabled failed.\n");
676 			return -1;
677 		}
678 		csio_ln_err(ln, "vport enabled\n");
679 		return 0;
680 	}
681 }
682 
683 static void
684 csio_dev_loss_tmo_callbk(struct fc_rport *rport)
685 {
686 	struct csio_rnode *rn;
687 	struct csio_hw *hw;
688 	struct csio_lnode *ln;
689 
690 	rn = *((struct csio_rnode **)rport->dd_data);
691 	ln = csio_rnode_to_lnode(rn);
692 	hw = csio_lnode_to_hw(ln);
693 
694 	spin_lock_irq(&hw->lock);
695 
696 	/* return if driver is being removed or same rnode comes back online */
697 	if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
698 		goto out;
699 
700 	csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
701 		    rn, rn->nport_id, csio_rn_flowid(rn));
702 
703 	CSIO_INC_STATS(ln, n_dev_loss_tmo);
704 
705 	/*
706 	 * enqueue devloss event to event worker thread to serialize all
707 	 * rnode events.
708 	 */
709 	if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
710 		CSIO_INC_STATS(hw, n_evt_drop);
711 		goto out;
712 	}
713 
714 	if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
715 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
716 		spin_unlock_irq(&hw->lock);
717 		schedule_work(&hw->evtq_work);
718 		return;
719 	}
720 
721 out:
722 	spin_unlock_irq(&hw->lock);
723 }
724 
725 /* FC transport functions template - Physical port */
726 struct fc_function_template csio_fc_transport_funcs = {
727 	.show_host_node_name = 1,
728 	.show_host_port_name = 1,
729 	.show_host_supported_classes = 1,
730 	.show_host_supported_fc4s = 1,
731 	.show_host_maxframe_size = 1,
732 
733 	.get_host_port_id = csio_get_host_port_id,
734 	.show_host_port_id = 1,
735 
736 	.get_host_port_type = csio_get_host_port_type,
737 	.show_host_port_type = 1,
738 
739 	.get_host_port_state = csio_get_host_port_state,
740 	.show_host_port_state = 1,
741 
742 	.show_host_active_fc4s = 1,
743 	.get_host_speed = csio_get_host_speed,
744 	.show_host_speed = 1,
745 	.get_host_fabric_name = csio_get_host_fabric_name,
746 	.show_host_fabric_name = 1,
747 
748 	.get_fc_host_stats = csio_get_stats,
749 
750 	.dd_fcrport_size = sizeof(struct csio_rnode *),
751 	.show_rport_maxframe_size = 1,
752 	.show_rport_supported_classes = 1,
753 
754 	.set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
755 	.show_rport_dev_loss_tmo = 1,
756 
757 	.show_starget_port_id = 1,
758 	.show_starget_node_name = 1,
759 	.show_starget_port_name = 1,
760 
761 	.dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
762 	.dd_fcvport_size = sizeof(struct csio_lnode *),
763 
764 	.vport_create = csio_vport_create,
765 	.vport_disable = csio_vport_disable,
766 	.vport_delete = csio_vport_delete,
767 };
768 
769 /* FC transport functions template - Virtual  port */
770 struct fc_function_template csio_fc_transport_vport_funcs = {
771 	.show_host_node_name = 1,
772 	.show_host_port_name = 1,
773 	.show_host_supported_classes = 1,
774 	.show_host_supported_fc4s = 1,
775 	.show_host_maxframe_size = 1,
776 
777 	.get_host_port_id = csio_get_host_port_id,
778 	.show_host_port_id = 1,
779 
780 	.get_host_port_type = csio_get_host_port_type,
781 	.show_host_port_type = 1,
782 
783 	.get_host_port_state = csio_get_host_port_state,
784 	.show_host_port_state = 1,
785 	.show_host_active_fc4s = 1,
786 
787 	.get_host_speed = csio_get_host_speed,
788 	.show_host_speed = 1,
789 
790 	.get_host_fabric_name = csio_get_host_fabric_name,
791 	.show_host_fabric_name = 1,
792 
793 	.get_fc_host_stats = csio_get_stats,
794 
795 	.dd_fcrport_size = sizeof(struct csio_rnode *),
796 	.show_rport_maxframe_size = 1,
797 	.show_rport_supported_classes = 1,
798 
799 	.set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
800 	.show_rport_dev_loss_tmo = 1,
801 
802 	.show_starget_port_id = 1,
803 	.show_starget_node_name = 1,
804 	.show_starget_port_name = 1,
805 
806 	.dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
807 
808 };
809