xref: /linux/drivers/scsi/fcoe/fcoe.c (revision 273b281fa22c293963ee3e6eec418f5dda2dbc83)
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/cpu.h>
30 #include <linux/fs.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsicam.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <net/rtnetlink.h>
38 
39 #include <scsi/fc/fc_encaps.h>
40 #include <scsi/fc/fc_fip.h>
41 
42 #include <scsi/libfc.h>
43 #include <scsi/fc_frame.h>
44 #include <scsi/libfcoe.h>
45 
46 #include "fcoe.h"
47 
48 MODULE_AUTHOR("Open-FCoE.org");
49 MODULE_DESCRIPTION("FCoE");
50 MODULE_LICENSE("GPL v2");
51 
52 /* Performance tuning parameters for fcoe */
53 static unsigned int fcoe_ddp_min;
54 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "	\
56 		 "Direct Data Placement (DDP).");
57 
58 DEFINE_MUTEX(fcoe_config_mutex);
59 
60 /* fcoe_percpu_clean completion.  Waiter protected by fcoe_create_mutex */
61 static DECLARE_COMPLETION(fcoe_flush_completion);
62 
63 /* fcoe host list */
64 /* must only by accessed under the RTNL mutex */
65 LIST_HEAD(fcoe_hostlist);
66 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
67 
68 /* Function Prototypes */
69 static int fcoe_reset(struct Scsi_Host *);
70 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
71 static int fcoe_rcv(struct sk_buff *, struct net_device *,
72 		    struct packet_type *, struct net_device *);
73 static int fcoe_percpu_receive_thread(void *);
74 static void fcoe_clean_pending_queue(struct fc_lport *);
75 static void fcoe_percpu_clean(struct fc_lport *);
76 static int fcoe_link_ok(struct fc_lport *);
77 
78 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
79 static int fcoe_hostlist_add(const struct fc_lport *);
80 
81 static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
82 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
83 static void fcoe_dev_setup(void);
84 static void fcoe_dev_cleanup(void);
85 static struct fcoe_interface
86 *fcoe_hostlist_lookup_port(const struct net_device *);
87 
88 static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
89 			 struct packet_type *, struct net_device *);
90 
91 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
92 static void fcoe_update_src_mac(struct fc_lport *, u8 *);
93 static u8 *fcoe_get_src_mac(struct fc_lport *);
94 static void fcoe_destroy_work(struct work_struct *);
95 
96 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
97 			  unsigned int);
98 static int fcoe_ddp_done(struct fc_lport *, u16);
99 
100 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
101 
102 static int fcoe_create(const char *, struct kernel_param *);
103 static int fcoe_destroy(const char *, struct kernel_param *);
104 
105 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
106 				      u32 did, struct fc_frame *,
107 				      unsigned int op,
108 				      void (*resp)(struct fc_seq *,
109 						   struct fc_frame *,
110 						   void *),
111 				      void *, u32 timeout);
112 static void fcoe_recv_frame(struct sk_buff *skb);
113 
114 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
115 
116 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
117 __MODULE_PARM_TYPE(create, "string");
118 MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
119 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
120 __MODULE_PARM_TYPE(destroy, "string");
121 MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
122 
123 /* notification function for packets from net device */
124 static struct notifier_block fcoe_notifier = {
125 	.notifier_call = fcoe_device_notification,
126 };
127 
128 /* notification function for CPU hotplug events */
129 static struct notifier_block fcoe_cpu_notifier = {
130 	.notifier_call = fcoe_cpu_callback,
131 };
132 
133 static struct scsi_transport_template *fcoe_transport_template;
134 static struct scsi_transport_template *fcoe_vport_transport_template;
135 
136 static int fcoe_vport_destroy(struct fc_vport *);
137 static int fcoe_vport_create(struct fc_vport *, bool disabled);
138 static int fcoe_vport_disable(struct fc_vport *, bool disable);
139 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
140 
141 static struct libfc_function_template fcoe_libfc_fcn_templ = {
142 	.frame_send = fcoe_xmit,
143 	.ddp_setup = fcoe_ddp_setup,
144 	.ddp_done = fcoe_ddp_done,
145 	.elsct_send = fcoe_elsct_send,
146 	.get_lesb = fcoe_get_lesb,
147 };
148 
149 struct fc_function_template fcoe_transport_function = {
150 	.show_host_node_name = 1,
151 	.show_host_port_name = 1,
152 	.show_host_supported_classes = 1,
153 	.show_host_supported_fc4s = 1,
154 	.show_host_active_fc4s = 1,
155 	.show_host_maxframe_size = 1,
156 
157 	.show_host_port_id = 1,
158 	.show_host_supported_speeds = 1,
159 	.get_host_speed = fc_get_host_speed,
160 	.show_host_speed = 1,
161 	.show_host_port_type = 1,
162 	.get_host_port_state = fc_get_host_port_state,
163 	.show_host_port_state = 1,
164 	.show_host_symbolic_name = 1,
165 
166 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
167 	.show_rport_maxframe_size = 1,
168 	.show_rport_supported_classes = 1,
169 
170 	.show_host_fabric_name = 1,
171 	.show_starget_node_name = 1,
172 	.show_starget_port_name = 1,
173 	.show_starget_port_id = 1,
174 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
175 	.show_rport_dev_loss_tmo = 1,
176 	.get_fc_host_stats = fc_get_host_stats,
177 	.issue_fc_host_lip = fcoe_reset,
178 
179 	.terminate_rport_io = fc_rport_terminate_io,
180 
181 	.vport_create = fcoe_vport_create,
182 	.vport_delete = fcoe_vport_destroy,
183 	.vport_disable = fcoe_vport_disable,
184 	.set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
185 
186 	.bsg_request = fc_lport_bsg_request,
187 };
188 
189 struct fc_function_template fcoe_vport_transport_function = {
190 	.show_host_node_name = 1,
191 	.show_host_port_name = 1,
192 	.show_host_supported_classes = 1,
193 	.show_host_supported_fc4s = 1,
194 	.show_host_active_fc4s = 1,
195 	.show_host_maxframe_size = 1,
196 
197 	.show_host_port_id = 1,
198 	.show_host_supported_speeds = 1,
199 	.get_host_speed = fc_get_host_speed,
200 	.show_host_speed = 1,
201 	.show_host_port_type = 1,
202 	.get_host_port_state = fc_get_host_port_state,
203 	.show_host_port_state = 1,
204 	.show_host_symbolic_name = 1,
205 
206 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
207 	.show_rport_maxframe_size = 1,
208 	.show_rport_supported_classes = 1,
209 
210 	.show_host_fabric_name = 1,
211 	.show_starget_node_name = 1,
212 	.show_starget_port_name = 1,
213 	.show_starget_port_id = 1,
214 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
215 	.show_rport_dev_loss_tmo = 1,
216 	.get_fc_host_stats = fc_get_host_stats,
217 	.issue_fc_host_lip = fcoe_reset,
218 
219 	.terminate_rport_io = fc_rport_terminate_io,
220 
221 	.bsg_request = fc_lport_bsg_request,
222 };
223 
224 static struct scsi_host_template fcoe_shost_template = {
225 	.module = THIS_MODULE,
226 	.name = "FCoE Driver",
227 	.proc_name = FCOE_NAME,
228 	.queuecommand = fc_queuecommand,
229 	.eh_abort_handler = fc_eh_abort,
230 	.eh_device_reset_handler = fc_eh_device_reset,
231 	.eh_host_reset_handler = fc_eh_host_reset,
232 	.slave_alloc = fc_slave_alloc,
233 	.change_queue_depth = fc_change_queue_depth,
234 	.change_queue_type = fc_change_queue_type,
235 	.this_id = -1,
236 	.cmd_per_lun = 3,
237 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
238 	.use_clustering = ENABLE_CLUSTERING,
239 	.sg_tablesize = SG_ALL,
240 	.max_sectors = 0xffff,
241 };
242 
243 /**
244  * fcoe_interface_setup() - Setup a FCoE interface
245  * @fcoe:   The new FCoE interface
246  * @netdev: The net device that the fcoe interface is on
247  *
248  * Returns : 0 for success
249  * Locking: must be called with the RTNL mutex held
250  */
251 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
252 				struct net_device *netdev)
253 {
254 	struct fcoe_ctlr *fip = &fcoe->ctlr;
255 	struct netdev_hw_addr *ha;
256 	struct net_device *real_dev;
257 	u8 flogi_maddr[ETH_ALEN];
258 	const struct net_device_ops *ops;
259 
260 	fcoe->netdev = netdev;
261 
262 	/* Let LLD initialize for FCoE */
263 	ops = netdev->netdev_ops;
264 	if (ops->ndo_fcoe_enable) {
265 		if (ops->ndo_fcoe_enable(netdev))
266 			FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
267 					" specific feature for LLD.\n");
268 	}
269 
270 	/* Do not support for bonding device */
271 	if ((netdev->priv_flags & IFF_MASTER_ALB) ||
272 	    (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
273 	    (netdev->priv_flags & IFF_MASTER_8023AD)) {
274 		FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
275 		return -EOPNOTSUPP;
276 	}
277 
278 	/* look for SAN MAC address, if multiple SAN MACs exist, only
279 	 * use the first one for SPMA */
280 	real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
281 		vlan_dev_real_dev(netdev) : netdev;
282 	rcu_read_lock();
283 	for_each_dev_addr(real_dev, ha) {
284 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
285 		    (is_valid_ether_addr(ha->addr))) {
286 			memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
287 			fip->spma = 1;
288 			break;
289 		}
290 	}
291 	rcu_read_unlock();
292 
293 	/* setup Source Mac Address */
294 	if (!fip->spma)
295 		memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
296 
297 	/*
298 	 * Add FCoE MAC address as second unicast MAC address
299 	 * or enter promiscuous mode if not capable of listening
300 	 * for multiple unicast MACs.
301 	 */
302 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
303 	dev_unicast_add(netdev, flogi_maddr);
304 	if (fip->spma)
305 		dev_unicast_add(netdev, fip->ctl_src_addr);
306 	dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
307 
308 	/*
309 	 * setup the receive function from ethernet driver
310 	 * on the ethertype for the given device
311 	 */
312 	fcoe->fcoe_packet_type.func = fcoe_rcv;
313 	fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
314 	fcoe->fcoe_packet_type.dev = netdev;
315 	dev_add_pack(&fcoe->fcoe_packet_type);
316 
317 	fcoe->fip_packet_type.func = fcoe_fip_recv;
318 	fcoe->fip_packet_type.type = htons(ETH_P_FIP);
319 	fcoe->fip_packet_type.dev = netdev;
320 	dev_add_pack(&fcoe->fip_packet_type);
321 
322 	return 0;
323 }
324 
325 /**
326  * fcoe_interface_create() - Create a FCoE interface on a net device
327  * @netdev: The net device to create the FCoE interface on
328  *
329  * Returns: pointer to a struct fcoe_interface or NULL on error
330  */
331 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
332 {
333 	struct fcoe_interface *fcoe;
334 	int err;
335 
336 	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
337 	if (!fcoe) {
338 		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
339 		return NULL;
340 	}
341 
342 	dev_hold(netdev);
343 	kref_init(&fcoe->kref);
344 
345 	/*
346 	 * Initialize FIP.
347 	 */
348 	fcoe_ctlr_init(&fcoe->ctlr);
349 	fcoe->ctlr.send = fcoe_fip_send;
350 	fcoe->ctlr.update_mac = fcoe_update_src_mac;
351 	fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
352 
353 	err = fcoe_interface_setup(fcoe, netdev);
354 	if (err) {
355 		fcoe_ctlr_destroy(&fcoe->ctlr);
356 		kfree(fcoe);
357 		dev_put(netdev);
358 		return NULL;
359 	}
360 
361 	return fcoe;
362 }
363 
364 /**
365  * fcoe_interface_cleanup() - Clean up a FCoE interface
366  * @fcoe: The FCoE interface to be cleaned up
367  *
368  * Caller must be holding the RTNL mutex
369  */
370 void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
371 {
372 	struct net_device *netdev = fcoe->netdev;
373 	struct fcoe_ctlr *fip = &fcoe->ctlr;
374 	u8 flogi_maddr[ETH_ALEN];
375 	const struct net_device_ops *ops;
376 
377 	/*
378 	 * Don't listen for Ethernet packets anymore.
379 	 * synchronize_net() ensures that the packet handlers are not running
380 	 * on another CPU. dev_remove_pack() would do that, this calls the
381 	 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
382 	 */
383 	__dev_remove_pack(&fcoe->fcoe_packet_type);
384 	__dev_remove_pack(&fcoe->fip_packet_type);
385 	synchronize_net();
386 
387 	/* Delete secondary MAC addresses */
388 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
389 	dev_unicast_delete(netdev, flogi_maddr);
390 	if (fip->spma)
391 		dev_unicast_delete(netdev, fip->ctl_src_addr);
392 	dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
393 
394 	/* Tell the LLD we are done w/ FCoE */
395 	ops = netdev->netdev_ops;
396 	if (ops->ndo_fcoe_disable) {
397 		if (ops->ndo_fcoe_disable(netdev))
398 			FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
399 					" specific feature for LLD.\n");
400 	}
401 }
402 
403 /**
404  * fcoe_interface_release() - fcoe_port kref release function
405  * @kref: Embedded reference count in an fcoe_interface struct
406  */
407 static void fcoe_interface_release(struct kref *kref)
408 {
409 	struct fcoe_interface *fcoe;
410 	struct net_device *netdev;
411 
412 	fcoe = container_of(kref, struct fcoe_interface, kref);
413 	netdev = fcoe->netdev;
414 	/* tear-down the FCoE controller */
415 	fcoe_ctlr_destroy(&fcoe->ctlr);
416 	kfree(fcoe);
417 	dev_put(netdev);
418 }
419 
420 /**
421  * fcoe_interface_get() - Get a reference to a FCoE interface
422  * @fcoe: The FCoE interface to be held
423  */
424 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
425 {
426 	kref_get(&fcoe->kref);
427 }
428 
429 /**
430  * fcoe_interface_put() - Put a reference to a FCoE interface
431  * @fcoe: The FCoE interface to be released
432  */
433 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
434 {
435 	kref_put(&fcoe->kref, fcoe_interface_release);
436 }
437 
438 /**
439  * fcoe_fip_recv() - Handler for received FIP frames
440  * @skb:      The receive skb
441  * @netdev:   The associated net device
442  * @ptype:    The packet_type structure which was used to register this handler
443  * @orig_dev: The original net_device the the skb was received on.
444  *	      (in case dev is a bond)
445  *
446  * Returns: 0 for success
447  */
448 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
449 			 struct packet_type *ptype,
450 			 struct net_device *orig_dev)
451 {
452 	struct fcoe_interface *fcoe;
453 
454 	fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
455 	fcoe_ctlr_recv(&fcoe->ctlr, skb);
456 	return 0;
457 }
458 
459 /**
460  * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
461  * @fip: The FCoE controller
462  * @skb: The FIP packet to be sent
463  */
464 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
465 {
466 	skb->dev = fcoe_from_ctlr(fip)->netdev;
467 	dev_queue_xmit(skb);
468 }
469 
470 /**
471  * fcoe_update_src_mac() - Update the Ethernet MAC filters
472  * @lport: The local port to update the source MAC on
473  * @addr:  Unicast MAC address to add
474  *
475  * Remove any previously-set unicast MAC filter.
476  * Add secondary FCoE MAC address filter for our OUI.
477  */
478 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
479 {
480 	struct fcoe_port *port = lport_priv(lport);
481 	struct fcoe_interface *fcoe = port->fcoe;
482 
483 	rtnl_lock();
484 	if (!is_zero_ether_addr(port->data_src_addr))
485 		dev_unicast_delete(fcoe->netdev, port->data_src_addr);
486 	if (!is_zero_ether_addr(addr))
487 		dev_unicast_add(fcoe->netdev, addr);
488 	memcpy(port->data_src_addr, addr, ETH_ALEN);
489 	rtnl_unlock();
490 }
491 
492 /**
493  * fcoe_get_src_mac() - return the Ethernet source address for an lport
494  * @lport: libfc lport
495  */
496 static u8 *fcoe_get_src_mac(struct fc_lport *lport)
497 {
498 	struct fcoe_port *port = lport_priv(lport);
499 
500 	return port->data_src_addr;
501 }
502 
503 /**
504  * fcoe_lport_config() - Set up a local port
505  * @lport: The local port to be setup
506  *
507  * Returns: 0 for success
508  */
509 static int fcoe_lport_config(struct fc_lport *lport)
510 {
511 	lport->link_up = 0;
512 	lport->qfull = 0;
513 	lport->max_retry_count = 3;
514 	lport->max_rport_retry_count = 3;
515 	lport->e_d_tov = 2 * 1000;	/* FC-FS default */
516 	lport->r_a_tov = 2 * 2 * 1000;
517 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
518 				 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
519 	lport->does_npiv = 1;
520 
521 	fc_lport_init_stats(lport);
522 
523 	/* lport fc_lport related configuration */
524 	fc_lport_config(lport);
525 
526 	/* offload related configuration */
527 	lport->crc_offload = 0;
528 	lport->seq_offload = 0;
529 	lport->lro_enabled = 0;
530 	lport->lro_xid = 0;
531 	lport->lso_max = 0;
532 
533 	return 0;
534 }
535 
536 /**
537  * fcoe_queue_timer() - The fcoe queue timer
538  * @lport: The local port
539  *
540  * Calls fcoe_check_wait_queue on timeout
541  */
542 static void fcoe_queue_timer(ulong lport)
543 {
544 	fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
545 }
546 
547 /**
548  * fcoe_netdev_config() - Set up net devive for SW FCoE
549  * @lport:  The local port that is associated with the net device
550  * @netdev: The associated net device
551  *
552  * Must be called after fcoe_lport_config() as it will use local port mutex
553  *
554  * Returns: 0 for success
555  */
556 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
557 {
558 	u32 mfs;
559 	u64 wwnn, wwpn;
560 	struct fcoe_interface *fcoe;
561 	struct fcoe_port *port;
562 	int vid = 0;
563 
564 	/* Setup lport private data to point to fcoe softc */
565 	port = lport_priv(lport);
566 	fcoe = port->fcoe;
567 
568 	/*
569 	 * Determine max frame size based on underlying device and optional
570 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
571 	 * will return 0, so do this first.
572 	 */
573 	mfs = netdev->mtu;
574 	if (netdev->features & NETIF_F_FCOE_MTU) {
575 		mfs = FCOE_MTU;
576 		FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
577 	}
578 	mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
579 	if (fc_set_mfs(lport, mfs))
580 		return -EINVAL;
581 
582 	/* offload features support */
583 	if (netdev->features & NETIF_F_SG)
584 		lport->sg_supp = 1;
585 
586 	if (netdev->features & NETIF_F_FCOE_CRC) {
587 		lport->crc_offload = 1;
588 		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
589 	}
590 	if (netdev->features & NETIF_F_FSO) {
591 		lport->seq_offload = 1;
592 		lport->lso_max = netdev->gso_max_size;
593 		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
594 				lport->lso_max);
595 	}
596 	if (netdev->fcoe_ddp_xid) {
597 		lport->lro_enabled = 1;
598 		lport->lro_xid = netdev->fcoe_ddp_xid;
599 		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
600 				lport->lro_xid);
601 	}
602 	skb_queue_head_init(&port->fcoe_pending_queue);
603 	port->fcoe_pending_queue_active = 0;
604 	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
605 
606 	if (!lport->vport) {
607 		/*
608 		 * Use NAA 1&2 (FC-FS Rev. 2.0, Sec. 15) to generate WWNN/WWPN:
609 		 * For WWNN, we use NAA 1 w/ bit 27-16 of word 0 as 0.
610 		 * For WWPN, we use NAA 2 w/ bit 27-16 of word 0 from VLAN ID
611 		 */
612 		if (netdev->priv_flags & IFF_802_1Q_VLAN)
613 			vid = vlan_dev_vlan_id(netdev);
614 		wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
615 		fc_set_wwnn(lport, wwnn);
616 		wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid);
617 		fc_set_wwpn(lport, wwpn);
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * fcoe_shost_config() - Set up the SCSI host associated with a local port
625  * @lport: The local port
626  * @shost: The SCSI host to associate with the local port
627  * @dev:   The device associated with the SCSI host
628  *
629  * Must be called after fcoe_lport_config() and fcoe_netdev_config()
630  *
631  * Returns: 0 for success
632  */
633 static int fcoe_shost_config(struct fc_lport *lport, struct Scsi_Host *shost,
634 			     struct device *dev)
635 {
636 	int rc = 0;
637 
638 	/* lport scsi host config */
639 	lport->host->max_lun = FCOE_MAX_LUN;
640 	lport->host->max_id = FCOE_MAX_FCP_TARGET;
641 	lport->host->max_channel = 0;
642 	if (lport->vport)
643 		lport->host->transportt = fcoe_vport_transport_template;
644 	else
645 		lport->host->transportt = fcoe_transport_template;
646 
647 	/* add the new host to the SCSI-ml */
648 	rc = scsi_add_host(lport->host, dev);
649 	if (rc) {
650 		FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
651 				"error on scsi_add_host\n");
652 		return rc;
653 	}
654 
655 	if (!lport->vport)
656 		fc_host_max_npiv_vports(lport->host) = USHORT_MAX;
657 
658 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
659 		 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
660 		 fcoe_netdev(lport)->name);
661 
662 	return 0;
663 }
664 
665 /**
666  * fcoe_oem_match() - The match routine for the offloaded exchange manager
667  * @fp: The I/O frame
668  *
669  * This routine will be associated with an exchange manager (EM). When
670  * the libfc exchange handling code is looking for an EM to use it will
671  * call this routine and pass it the frame that it wishes to send. This
672  * routine will return True if the associated EM is to be used and False
673  * if the echange code should continue looking for an EM.
674  *
675  * The offload EM that this routine is associated with will handle any
676  * packets that are for SCSI read requests.
677  *
678  * Returns: True for read types I/O, otherwise returns false.
679  */
680 bool fcoe_oem_match(struct fc_frame *fp)
681 {
682 	return fc_fcp_is_read(fr_fsp(fp)) &&
683 		(fr_fsp(fp)->data_len > fcoe_ddp_min);
684 }
685 
686 /**
687  * fcoe_em_config() - Allocate and configure an exchange manager
688  * @lport: The local port that the new EM will be associated with
689  *
690  * Returns: 0 on success
691  */
692 static inline int fcoe_em_config(struct fc_lport *lport)
693 {
694 	struct fcoe_port *port = lport_priv(lport);
695 	struct fcoe_interface *fcoe = port->fcoe;
696 	struct fcoe_interface *oldfcoe = NULL;
697 	struct net_device *old_real_dev, *cur_real_dev;
698 	u16 min_xid = FCOE_MIN_XID;
699 	u16 max_xid = FCOE_MAX_XID;
700 
701 	/*
702 	 * Check if need to allocate an em instance for
703 	 * offload exchange ids to be shared across all VN_PORTs/lport.
704 	 */
705 	if (!lport->lro_enabled || !lport->lro_xid ||
706 	    (lport->lro_xid >= max_xid)) {
707 		lport->lro_xid = 0;
708 		goto skip_oem;
709 	}
710 
711 	/*
712 	 * Reuse existing offload em instance in case
713 	 * it is already allocated on real eth device
714 	 */
715 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
716 		cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
717 	else
718 		cur_real_dev = fcoe->netdev;
719 
720 	list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
721 		if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
722 			old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
723 		else
724 			old_real_dev = oldfcoe->netdev;
725 
726 		if (cur_real_dev == old_real_dev) {
727 			fcoe->oem = oldfcoe->oem;
728 			break;
729 		}
730 	}
731 
732 	if (fcoe->oem) {
733 		if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
734 			printk(KERN_ERR "fcoe_em_config: failed to add "
735 			       "offload em:%p on interface:%s\n",
736 			       fcoe->oem, fcoe->netdev->name);
737 			return -ENOMEM;
738 		}
739 	} else {
740 		fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
741 					      FCOE_MIN_XID, lport->lro_xid,
742 					      fcoe_oem_match);
743 		if (!fcoe->oem) {
744 			printk(KERN_ERR "fcoe_em_config: failed to allocate "
745 			       "em for offload exches on interface:%s\n",
746 			       fcoe->netdev->name);
747 			return -ENOMEM;
748 		}
749 	}
750 
751 	/*
752 	 * Exclude offload EM xid range from next EM xid range.
753 	 */
754 	min_xid += lport->lro_xid + 1;
755 
756 skip_oem:
757 	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
758 		printk(KERN_ERR "fcoe_em_config: failed to "
759 		       "allocate em on interface %s\n", fcoe->netdev->name);
760 		return -ENOMEM;
761 	}
762 
763 	return 0;
764 }
765 
766 /**
767  * fcoe_if_destroy() - Tear down a SW FCoE instance
768  * @lport: The local port to be destroyed
769  */
770 static void fcoe_if_destroy(struct fc_lport *lport)
771 {
772 	struct fcoe_port *port = lport_priv(lport);
773 	struct fcoe_interface *fcoe = port->fcoe;
774 	struct net_device *netdev = fcoe->netdev;
775 
776 	FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
777 
778 	/* Logout of the fabric */
779 	fc_fabric_logoff(lport);
780 
781 	/* Cleanup the fc_lport */
782 	fc_lport_destroy(lport);
783 	fc_fcp_destroy(lport);
784 
785 	/* Stop the transmit retry timer */
786 	del_timer_sync(&port->timer);
787 
788 	/* Free existing transmit skbs */
789 	fcoe_clean_pending_queue(lport);
790 
791 	rtnl_lock();
792 	if (!is_zero_ether_addr(port->data_src_addr))
793 		dev_unicast_delete(netdev, port->data_src_addr);
794 	rtnl_unlock();
795 
796 	/* receives may not be stopped until after this */
797 	fcoe_interface_put(fcoe);
798 
799 	/* Free queued packets for the per-CPU receive threads */
800 	fcoe_percpu_clean(lport);
801 
802 	/* Detach from the scsi-ml */
803 	fc_remove_host(lport->host);
804 	scsi_remove_host(lport->host);
805 
806 	/* There are no more rports or I/O, free the EM */
807 	fc_exch_mgr_free(lport);
808 
809 	/* Free memory used by statistical counters */
810 	fc_lport_free_stats(lport);
811 
812 	/* Release the Scsi_Host */
813 	scsi_host_put(lport->host);
814 }
815 
816 /**
817  * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
818  * @lport: The local port to setup DDP for
819  * @xid:   The exchange ID for this DDP transfer
820  * @sgl:   The scatterlist describing this transfer
821  * @sgc:   The number of sg items
822  *
823  * Returns: 0 if the DDP context was not configured
824  */
825 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
826 			  struct scatterlist *sgl, unsigned int sgc)
827 {
828 	struct net_device *netdev = fcoe_netdev(lport);
829 
830 	if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
831 		return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
832 							      xid, sgl,
833 							      sgc);
834 
835 	return 0;
836 }
837 
838 /**
839  * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
840  * @lport: The local port to complete DDP on
841  * @xid:   The exchange ID for this DDP transfer
842  *
843  * Returns: the length of data that have been completed by DDP
844  */
845 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
846 {
847 	struct net_device *netdev = fcoe_netdev(lport);
848 
849 	if (netdev->netdev_ops->ndo_fcoe_ddp_done)
850 		return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
851 	return 0;
852 }
853 
854 /**
855  * fcoe_if_create() - Create a FCoE instance on an interface
856  * @fcoe:   The FCoE interface to create a local port on
857  * @parent: The device pointer to be the parent in sysfs for the SCSI host
858  * @npiv:   Indicates if the port is a vport or not
859  *
860  * Creates a fc_lport instance and a Scsi_Host instance and configure them.
861  *
862  * Returns: The allocated fc_lport or an error pointer
863  */
864 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
865 				       struct device *parent, int npiv)
866 {
867 	struct net_device *netdev = fcoe->netdev;
868 	struct fc_lport *lport = NULL;
869 	struct fcoe_port *port;
870 	struct Scsi_Host *shost;
871 	int rc;
872 	/*
873 	 * parent is only a vport if npiv is 1,
874 	 * but we'll only use vport in that case so go ahead and set it
875 	 */
876 	struct fc_vport *vport = dev_to_vport(parent);
877 
878 	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
879 
880 	if (!npiv) {
881 		lport = libfc_host_alloc(&fcoe_shost_template,
882 					 sizeof(struct fcoe_port));
883 	} else	{
884 		lport = libfc_vport_create(vport,
885 					   sizeof(struct fcoe_port));
886 	}
887 	if (!lport) {
888 		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
889 		rc = -ENOMEM;
890 		goto out;
891 	}
892 	shost = lport->host;
893 	port = lport_priv(lport);
894 	port->lport = lport;
895 	port->fcoe = fcoe;
896 	INIT_WORK(&port->destroy_work, fcoe_destroy_work);
897 
898 	/* configure a fc_lport including the exchange manager */
899 	rc = fcoe_lport_config(lport);
900 	if (rc) {
901 		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
902 				"interface\n");
903 		goto out_host_put;
904 	}
905 
906 	if (npiv) {
907 		FCOE_NETDEV_DBG(netdev, "Setting vport names, 0x%llX 0x%llX\n",
908 				vport->node_name, vport->port_name);
909 		fc_set_wwnn(lport, vport->node_name);
910 		fc_set_wwpn(lport, vport->port_name);
911 	}
912 
913 	/* configure lport network properties */
914 	rc = fcoe_netdev_config(lport, netdev);
915 	if (rc) {
916 		FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
917 				"interface\n");
918 		goto out_lp_destroy;
919 	}
920 
921 	/* configure lport scsi host properties */
922 	rc = fcoe_shost_config(lport, shost, parent);
923 	if (rc) {
924 		FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
925 				"interface\n");
926 		goto out_lp_destroy;
927 	}
928 
929 	/* Initialize the library */
930 	rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
931 	if (rc) {
932 		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
933 				"interface\n");
934 		goto out_lp_destroy;
935 	}
936 
937 	if (!npiv) {
938 		/*
939 		 * fcoe_em_alloc() and fcoe_hostlist_add() both
940 		 * need to be atomic with respect to other changes to the
941 		 * hostlist since fcoe_em_alloc() looks for an existing EM
942 		 * instance on host list updated by fcoe_hostlist_add().
943 		 *
944 		 * This is currently handled through the fcoe_config_mutex
945 		 * begin held.
946 		 */
947 
948 		/* lport exch manager allocation */
949 		rc = fcoe_em_config(lport);
950 		if (rc) {
951 			FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
952 					"for the interface\n");
953 			goto out_lp_destroy;
954 		}
955 	}
956 
957 	fcoe_interface_get(fcoe);
958 	return lport;
959 
960 out_lp_destroy:
961 	fc_exch_mgr_free(lport);
962 out_host_put:
963 	scsi_host_put(lport->host);
964 out:
965 	return ERR_PTR(rc);
966 }
967 
968 /**
969  * fcoe_if_init() - Initialization routine for fcoe.ko
970  *
971  * Attaches the SW FCoE transport to the FC transport
972  *
973  * Returns: 0 on success
974  */
975 static int __init fcoe_if_init(void)
976 {
977 	/* attach to scsi transport */
978 	fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
979 	fcoe_vport_transport_template =
980 		fc_attach_transport(&fcoe_vport_transport_function);
981 
982 	if (!fcoe_transport_template) {
983 		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
984 		return -ENODEV;
985 	}
986 
987 	return 0;
988 }
989 
990 /**
991  * fcoe_if_exit() - Tear down fcoe.ko
992  *
993  * Detaches the SW FCoE transport from the FC transport
994  *
995  * Returns: 0 on success
996  */
997 int __exit fcoe_if_exit(void)
998 {
999 	fc_release_transport(fcoe_transport_template);
1000 	fc_release_transport(fcoe_vport_transport_template);
1001 	fcoe_transport_template = NULL;
1002 	fcoe_vport_transport_template = NULL;
1003 	return 0;
1004 }
1005 
1006 /**
1007  * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
1008  * @cpu: The CPU index of the CPU to create a receive thread for
1009  */
1010 static void fcoe_percpu_thread_create(unsigned int cpu)
1011 {
1012 	struct fcoe_percpu_s *p;
1013 	struct task_struct *thread;
1014 
1015 	p = &per_cpu(fcoe_percpu, cpu);
1016 
1017 	thread = kthread_create(fcoe_percpu_receive_thread,
1018 				(void *)p, "fcoethread/%d", cpu);
1019 
1020 	if (likely(!IS_ERR(thread))) {
1021 		kthread_bind(thread, cpu);
1022 		wake_up_process(thread);
1023 
1024 		spin_lock_bh(&p->fcoe_rx_list.lock);
1025 		p->thread = thread;
1026 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1027 	}
1028 }
1029 
1030 /**
1031  * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
1032  * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
1033  *
1034  * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
1035  * current CPU's Rx thread. If the thread being destroyed is bound to
1036  * the CPU processing this context the skbs will be freed.
1037  */
1038 static void fcoe_percpu_thread_destroy(unsigned int cpu)
1039 {
1040 	struct fcoe_percpu_s *p;
1041 	struct task_struct *thread;
1042 	struct page *crc_eof;
1043 	struct sk_buff *skb;
1044 #ifdef CONFIG_SMP
1045 	struct fcoe_percpu_s *p0;
1046 	unsigned targ_cpu = smp_processor_id();
1047 #endif /* CONFIG_SMP */
1048 
1049 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
1050 
1051 	/* Prevent any new skbs from being queued for this CPU. */
1052 	p = &per_cpu(fcoe_percpu, cpu);
1053 	spin_lock_bh(&p->fcoe_rx_list.lock);
1054 	thread = p->thread;
1055 	p->thread = NULL;
1056 	crc_eof = p->crc_eof_page;
1057 	p->crc_eof_page = NULL;
1058 	p->crc_eof_offset = 0;
1059 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1060 
1061 #ifdef CONFIG_SMP
1062 	/*
1063 	 * Don't bother moving the skb's if this context is running
1064 	 * on the same CPU that is having its thread destroyed. This
1065 	 * can easily happen when the module is removed.
1066 	 */
1067 	if (cpu != targ_cpu) {
1068 		p0 = &per_cpu(fcoe_percpu, targ_cpu);
1069 		spin_lock_bh(&p0->fcoe_rx_list.lock);
1070 		if (p0->thread) {
1071 			FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
1072 				 cpu, targ_cpu);
1073 
1074 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1075 				__skb_queue_tail(&p0->fcoe_rx_list, skb);
1076 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1077 		} else {
1078 			/*
1079 			 * The targeted CPU is not initialized and cannot accept
1080 			 * new	skbs. Unlock the targeted CPU and drop the skbs
1081 			 * on the CPU that is going offline.
1082 			 */
1083 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1084 				kfree_skb(skb);
1085 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1086 		}
1087 	} else {
1088 		/*
1089 		 * This scenario occurs when the module is being removed
1090 		 * and all threads are being destroyed. skbs will continue
1091 		 * to be shifted from the CPU thread that is being removed
1092 		 * to the CPU thread associated with the CPU that is processing
1093 		 * the module removal. Once there is only one CPU Rx thread it
1094 		 * will reach this case and we will drop all skbs and later
1095 		 * stop the thread.
1096 		 */
1097 		spin_lock_bh(&p->fcoe_rx_list.lock);
1098 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1099 			kfree_skb(skb);
1100 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1101 	}
1102 #else
1103 	/*
1104 	 * This a non-SMP scenario where the singular Rx thread is
1105 	 * being removed. Free all skbs and stop the thread.
1106 	 */
1107 	spin_lock_bh(&p->fcoe_rx_list.lock);
1108 	while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1109 		kfree_skb(skb);
1110 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1111 #endif
1112 
1113 	if (thread)
1114 		kthread_stop(thread);
1115 
1116 	if (crc_eof)
1117 		put_page(crc_eof);
1118 }
1119 
1120 /**
1121  * fcoe_cpu_callback() - Handler for CPU hotplug events
1122  * @nfb:    The callback data block
1123  * @action: The event triggering the callback
1124  * @hcpu:   The index of the CPU that the event is for
1125  *
1126  * This creates or destroys per-CPU data for fcoe
1127  *
1128  * Returns NOTIFY_OK always.
1129  */
1130 static int fcoe_cpu_callback(struct notifier_block *nfb,
1131 			     unsigned long action, void *hcpu)
1132 {
1133 	unsigned cpu = (unsigned long)hcpu;
1134 
1135 	switch (action) {
1136 	case CPU_ONLINE:
1137 	case CPU_ONLINE_FROZEN:
1138 		FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
1139 		fcoe_percpu_thread_create(cpu);
1140 		break;
1141 	case CPU_DEAD:
1142 	case CPU_DEAD_FROZEN:
1143 		FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
1144 		fcoe_percpu_thread_destroy(cpu);
1145 		break;
1146 	default:
1147 		break;
1148 	}
1149 	return NOTIFY_OK;
1150 }
1151 
1152 /**
1153  * fcoe_rcv() - Receive packets from a net device
1154  * @skb:    The received packet
1155  * @netdev: The net device that the packet was received on
1156  * @ptype:  The packet type context
1157  * @olddev: The last device net device
1158  *
1159  * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
1160  * FC frame and passes the frame to libfc.
1161  *
1162  * Returns: 0 for success
1163  */
1164 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1165 	     struct packet_type *ptype, struct net_device *olddev)
1166 {
1167 	struct fc_lport *lport;
1168 	struct fcoe_rcv_info *fr;
1169 	struct fcoe_interface *fcoe;
1170 	struct fc_frame_header *fh;
1171 	struct fcoe_percpu_s *fps;
1172 	unsigned int cpu;
1173 
1174 	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1175 	lport = fcoe->ctlr.lp;
1176 	if (unlikely(!lport)) {
1177 		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1178 		goto err2;
1179 	}
1180 	if (!lport->link_up)
1181 		goto err2;
1182 
1183 	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1184 			"data:%p tail:%p end:%p sum:%d dev:%s",
1185 			skb->len, skb->data_len, skb->head, skb->data,
1186 			skb_tail_pointer(skb), skb_end_pointer(skb),
1187 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1188 
1189 	/* check for FCOE packet type */
1190 	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
1191 		FCOE_NETDEV_DBG(netdev, "Wrong FC type frame");
1192 		goto err;
1193 	}
1194 
1195 	/*
1196 	 * Check for minimum frame length, and make sure required FCoE
1197 	 * and FC headers are pulled into the linear data area.
1198 	 */
1199 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1200 		     !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1201 		goto err;
1202 
1203 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1204 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1205 
1206 	fr = fcoe_dev_from_skb(skb);
1207 	fr->fr_dev = lport;
1208 	fr->ptype = ptype;
1209 
1210 	/*
1211 	 * In case the incoming frame's exchange is originated from
1212 	 * the initiator, then received frame's exchange id is ANDed
1213 	 * with fc_cpu_mask bits to get the same cpu on which exchange
1214 	 * was originated, otherwise just use the current cpu.
1215 	 */
1216 	if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1217 		cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1218 	else
1219 		cpu = smp_processor_id();
1220 
1221 	fps = &per_cpu(fcoe_percpu, cpu);
1222 	spin_lock_bh(&fps->fcoe_rx_list.lock);
1223 	if (unlikely(!fps->thread)) {
1224 		/*
1225 		 * The targeted CPU is not ready, let's target
1226 		 * the first CPU now. For non-SMP systems this
1227 		 * will check the same CPU twice.
1228 		 */
1229 		FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1230 				"ready for incoming skb- using first online "
1231 				"CPU.\n");
1232 
1233 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1234 		cpu = first_cpu(cpu_online_map);
1235 		fps = &per_cpu(fcoe_percpu, cpu);
1236 		spin_lock_bh(&fps->fcoe_rx_list.lock);
1237 		if (!fps->thread) {
1238 			spin_unlock_bh(&fps->fcoe_rx_list.lock);
1239 			goto err;
1240 		}
1241 	}
1242 
1243 	/*
1244 	 * We now have a valid CPU that we're targeting for
1245 	 * this skb. We also have this receive thread locked,
1246 	 * so we're free to queue skbs into it's queue.
1247 	 */
1248 
1249 	/* If this is a SCSI-FCP frame, and this is already executing on the
1250 	 * correct CPU, and the queue for this CPU is empty, then go ahead
1251 	 * and process the frame directly in the softirq context.
1252 	 * This lets us process completions without context switching from the
1253 	 * NET_RX softirq, to our receive processing thread, and then back to
1254 	 * BLOCK softirq context.
1255 	 */
1256 	if (fh->fh_type == FC_TYPE_FCP &&
1257 	    cpu == smp_processor_id() &&
1258 	    skb_queue_empty(&fps->fcoe_rx_list)) {
1259 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1260 		fcoe_recv_frame(skb);
1261 	} else {
1262 		__skb_queue_tail(&fps->fcoe_rx_list, skb);
1263 		if (fps->fcoe_rx_list.qlen == 1)
1264 			wake_up_process(fps->thread);
1265 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1266 	}
1267 
1268 	return 0;
1269 err:
1270 	fc_lport_get_stats(lport)->ErrorFrames++;
1271 
1272 err2:
1273 	kfree_skb(skb);
1274 	return -1;
1275 }
1276 
1277 /**
1278  * fcoe_start_io() - Start FCoE I/O
1279  * @skb: The packet to be transmitted
1280  *
1281  * This routine is called from the net device to start transmitting
1282  * FCoE packets.
1283  *
1284  * Returns: 0 for success
1285  */
1286 static inline int fcoe_start_io(struct sk_buff *skb)
1287 {
1288 	struct sk_buff *nskb;
1289 	int rc;
1290 
1291 	nskb = skb_clone(skb, GFP_ATOMIC);
1292 	rc = dev_queue_xmit(nskb);
1293 	if (rc != 0)
1294 		return rc;
1295 	kfree_skb(skb);
1296 	return 0;
1297 }
1298 
1299 /**
1300  * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
1301  * @skb:  The packet to be transmitted
1302  * @tlen: The total length of the trailer
1303  *
1304  * This routine allocates a page for frame trailers. The page is re-used if
1305  * there is enough room left on it for the current trailer. If there isn't
1306  * enough buffer left a new page is allocated for the trailer. Reference to
1307  * the page from this function as well as the skbs using the page fragments
1308  * ensure that the page is freed at the appropriate time.
1309  *
1310  * Returns: 0 for success
1311  */
1312 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1313 {
1314 	struct fcoe_percpu_s *fps;
1315 	struct page *page;
1316 
1317 	fps = &get_cpu_var(fcoe_percpu);
1318 	page = fps->crc_eof_page;
1319 	if (!page) {
1320 		page = alloc_page(GFP_ATOMIC);
1321 		if (!page) {
1322 			put_cpu_var(fcoe_percpu);
1323 			return -ENOMEM;
1324 		}
1325 		fps->crc_eof_page = page;
1326 		fps->crc_eof_offset = 0;
1327 	}
1328 
1329 	get_page(page);
1330 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
1331 			   fps->crc_eof_offset, tlen);
1332 	skb->len += tlen;
1333 	skb->data_len += tlen;
1334 	skb->truesize += tlen;
1335 	fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
1336 
1337 	if (fps->crc_eof_offset >= PAGE_SIZE) {
1338 		fps->crc_eof_page = NULL;
1339 		fps->crc_eof_offset = 0;
1340 		put_page(page);
1341 	}
1342 	put_cpu_var(fcoe_percpu);
1343 	return 0;
1344 }
1345 
1346 /**
1347  * fcoe_fc_crc() - Calculates the CRC for a given frame
1348  * @fp: The frame to be checksumed
1349  *
1350  * This uses crc32() routine to calculate the CRC for a frame
1351  *
1352  * Return: The 32 bit CRC value
1353  */
1354 u32 fcoe_fc_crc(struct fc_frame *fp)
1355 {
1356 	struct sk_buff *skb = fp_skb(fp);
1357 	struct skb_frag_struct *frag;
1358 	unsigned char *data;
1359 	unsigned long off, len, clen;
1360 	u32 crc;
1361 	unsigned i;
1362 
1363 	crc = crc32(~0, skb->data, skb_headlen(skb));
1364 
1365 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1366 		frag = &skb_shinfo(skb)->frags[i];
1367 		off = frag->page_offset;
1368 		len = frag->size;
1369 		while (len > 0) {
1370 			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1371 			data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1372 					   KM_SKB_DATA_SOFTIRQ);
1373 			crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1374 			kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1375 			off += clen;
1376 			len -= clen;
1377 		}
1378 	}
1379 	return crc;
1380 }
1381 
1382 /**
1383  * fcoe_xmit() - Transmit a FCoE frame
1384  * @lport: The local port that the frame is to be transmitted for
1385  * @fp:	   The frame to be transmitted
1386  *
1387  * Return: 0 for success
1388  */
1389 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1390 {
1391 	int wlen;
1392 	u32 crc;
1393 	struct ethhdr *eh;
1394 	struct fcoe_crc_eof *cp;
1395 	struct sk_buff *skb;
1396 	struct fcoe_dev_stats *stats;
1397 	struct fc_frame_header *fh;
1398 	unsigned int hlen;		/* header length implies the version */
1399 	unsigned int tlen;		/* trailer length */
1400 	unsigned int elen;		/* eth header, may include vlan */
1401 	struct fcoe_port *port = lport_priv(lport);
1402 	struct fcoe_interface *fcoe = port->fcoe;
1403 	u8 sof, eof;
1404 	struct fcoe_hdr *hp;
1405 
1406 	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1407 
1408 	fh = fc_frame_header_get(fp);
1409 	skb = fp_skb(fp);
1410 	wlen = skb->len / FCOE_WORD_TO_BYTE;
1411 
1412 	if (!lport->link_up) {
1413 		kfree_skb(skb);
1414 		return 0;
1415 	}
1416 
1417 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1418 	    fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1419 		return 0;
1420 
1421 	sof = fr_sof(fp);
1422 	eof = fr_eof(fp);
1423 
1424 	elen = sizeof(struct ethhdr);
1425 	hlen = sizeof(struct fcoe_hdr);
1426 	tlen = sizeof(struct fcoe_crc_eof);
1427 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1428 
1429 	/* crc offload */
1430 	if (likely(lport->crc_offload)) {
1431 		skb->ip_summed = CHECKSUM_PARTIAL;
1432 		skb->csum_start = skb_headroom(skb);
1433 		skb->csum_offset = skb->len;
1434 		crc = 0;
1435 	} else {
1436 		skb->ip_summed = CHECKSUM_NONE;
1437 		crc = fcoe_fc_crc(fp);
1438 	}
1439 
1440 	/* copy port crc and eof to the skb buff */
1441 	if (skb_is_nonlinear(skb)) {
1442 		skb_frag_t *frag;
1443 		if (fcoe_get_paged_crc_eof(skb, tlen)) {
1444 			kfree_skb(skb);
1445 			return -ENOMEM;
1446 		}
1447 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1448 		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1449 			+ frag->page_offset;
1450 	} else {
1451 		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1452 	}
1453 
1454 	memset(cp, 0, sizeof(*cp));
1455 	cp->fcoe_eof = eof;
1456 	cp->fcoe_crc32 = cpu_to_le32(~crc);
1457 
1458 	if (skb_is_nonlinear(skb)) {
1459 		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1460 		cp = NULL;
1461 	}
1462 
1463 	/* adjust skb network/transport offsets to match mac/fcoe/port */
1464 	skb_push(skb, elen + hlen);
1465 	skb_reset_mac_header(skb);
1466 	skb_reset_network_header(skb);
1467 	skb->mac_len = elen;
1468 	skb->protocol = htons(ETH_P_FCOE);
1469 	skb->dev = fcoe->netdev;
1470 
1471 	/* fill up mac and fcoe headers */
1472 	eh = eth_hdr(skb);
1473 	eh->h_proto = htons(ETH_P_FCOE);
1474 	if (fcoe->ctlr.map_dest)
1475 		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1476 	else
1477 		/* insert GW address */
1478 		memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1479 
1480 	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1481 		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1482 	else
1483 		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1484 
1485 	hp = (struct fcoe_hdr *)(eh + 1);
1486 	memset(hp, 0, sizeof(*hp));
1487 	if (FC_FCOE_VER)
1488 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1489 	hp->fcoe_sof = sof;
1490 
1491 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1492 	if (lport->seq_offload && fr_max_payload(fp)) {
1493 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1494 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1495 	} else {
1496 		skb_shinfo(skb)->gso_type = 0;
1497 		skb_shinfo(skb)->gso_size = 0;
1498 	}
1499 	/* update tx stats: regardless if LLD fails */
1500 	stats = fc_lport_get_stats(lport);
1501 	stats->TxFrames++;
1502 	stats->TxWords += wlen;
1503 
1504 	/* send down to lld */
1505 	fr_dev(fp) = lport;
1506 	if (port->fcoe_pending_queue.qlen)
1507 		fcoe_check_wait_queue(lport, skb);
1508 	else if (fcoe_start_io(skb))
1509 		fcoe_check_wait_queue(lport, skb);
1510 
1511 	return 0;
1512 }
1513 
1514 /**
1515  * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
1516  * @skb: The completed skb (argument required by destructor)
1517  */
1518 static void fcoe_percpu_flush_done(struct sk_buff *skb)
1519 {
1520 	complete(&fcoe_flush_completion);
1521 }
1522 
1523 /**
1524  * fcoe_recv_frame() - process a single received frame
1525  * @skb: frame to process
1526  */
1527 static void fcoe_recv_frame(struct sk_buff *skb)
1528 {
1529 	u32 fr_len;
1530 	struct fc_lport *lport;
1531 	struct fcoe_rcv_info *fr;
1532 	struct fcoe_dev_stats *stats;
1533 	struct fc_frame_header *fh;
1534 	struct fcoe_crc_eof crc_eof;
1535 	struct fc_frame *fp;
1536 	u8 *mac = NULL;
1537 	struct fcoe_port *port;
1538 	struct fcoe_hdr *hp;
1539 
1540 	fr = fcoe_dev_from_skb(skb);
1541 	lport = fr->fr_dev;
1542 	if (unlikely(!lport)) {
1543 		if (skb->destructor != fcoe_percpu_flush_done)
1544 			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1545 		kfree_skb(skb);
1546 		return;
1547 	}
1548 
1549 	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1550 			"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1551 			skb->len, skb->data_len,
1552 			skb->head, skb->data, skb_tail_pointer(skb),
1553 			skb_end_pointer(skb), skb->csum,
1554 			skb->dev ? skb->dev->name : "<NULL>");
1555 
1556 	/*
1557 	 * Save source MAC address before discarding header.
1558 	 */
1559 	port = lport_priv(lport);
1560 	if (skb_is_nonlinear(skb))
1561 		skb_linearize(skb);	/* not ideal */
1562 	mac = eth_hdr(skb)->h_source;
1563 
1564 	/*
1565 	 * Frame length checks and setting up the header pointers
1566 	 * was done in fcoe_rcv already.
1567 	 */
1568 	hp = (struct fcoe_hdr *) skb_network_header(skb);
1569 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1570 
1571 	stats = fc_lport_get_stats(lport);
1572 	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1573 		if (stats->ErrorFrames < 5)
1574 			printk(KERN_WARNING "fcoe: FCoE version "
1575 			       "mismatch: The frame has "
1576 			       "version %x, but the "
1577 			       "initiator supports version "
1578 			       "%x\n", FC_FCOE_DECAPS_VER(hp),
1579 			       FC_FCOE_VER);
1580 		stats->ErrorFrames++;
1581 		kfree_skb(skb);
1582 		return;
1583 	}
1584 
1585 	skb_pull(skb, sizeof(struct fcoe_hdr));
1586 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1587 
1588 	stats->RxFrames++;
1589 	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1590 
1591 	fp = (struct fc_frame *)skb;
1592 	fc_frame_init(fp);
1593 	fr_dev(fp) = lport;
1594 	fr_sof(fp) = hp->fcoe_sof;
1595 
1596 	/* Copy out the CRC and EOF trailer for access */
1597 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1598 		kfree_skb(skb);
1599 		return;
1600 	}
1601 	fr_eof(fp) = crc_eof.fcoe_eof;
1602 	fr_crc(fp) = crc_eof.fcoe_crc32;
1603 	if (pskb_trim(skb, fr_len)) {
1604 		kfree_skb(skb);
1605 		return;
1606 	}
1607 
1608 	/*
1609 	 * We only check CRC if no offload is available and if it is
1610 	 * it's solicited data, in which case, the FCP layer would
1611 	 * check it during the copy.
1612 	 */
1613 	if (lport->crc_offload &&
1614 	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1615 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1616 	else
1617 		fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1618 
1619 	fh = fc_frame_header_get(fp);
1620 	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1621 	    fh->fh_type == FC_TYPE_FCP) {
1622 		fc_exch_recv(lport, fp);
1623 		return;
1624 	}
1625 	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1626 		if (le32_to_cpu(fr_crc(fp)) !=
1627 		    ~crc32(~0, skb->data, fr_len)) {
1628 			if (stats->InvalidCRCCount < 5)
1629 				printk(KERN_WARNING "fcoe: dropping "
1630 				       "frame with CRC error\n");
1631 			stats->InvalidCRCCount++;
1632 			stats->ErrorFrames++;
1633 			fc_frame_free(fp);
1634 			return;
1635 		}
1636 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1637 	}
1638 	fc_exch_recv(lport, fp);
1639 }
1640 
1641 /**
1642  * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
1643  * @arg: The per-CPU context
1644  *
1645  * Return: 0 for success
1646  */
1647 int fcoe_percpu_receive_thread(void *arg)
1648 {
1649 	struct fcoe_percpu_s *p = arg;
1650 	struct sk_buff *skb;
1651 
1652 	set_user_nice(current, -20);
1653 
1654 	while (!kthread_should_stop()) {
1655 
1656 		spin_lock_bh(&p->fcoe_rx_list.lock);
1657 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1658 			set_current_state(TASK_INTERRUPTIBLE);
1659 			spin_unlock_bh(&p->fcoe_rx_list.lock);
1660 			schedule();
1661 			set_current_state(TASK_RUNNING);
1662 			if (kthread_should_stop())
1663 				return 0;
1664 			spin_lock_bh(&p->fcoe_rx_list.lock);
1665 		}
1666 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1667 		fcoe_recv_frame(skb);
1668 	}
1669 	return 0;
1670 }
1671 
1672 /**
1673  * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
1674  * @lport: The local port whose backlog is to be cleared
1675  *
1676  * This empties the wait_queue, dequeues the head of the wait_queue queue
1677  * and calls fcoe_start_io() for each packet. If all skb have been
1678  * transmitted it returns the qlen. If an error occurs it restores
1679  * wait_queue (to try again later) and returns -1.
1680  *
1681  * The wait_queue is used when the skb transmit fails. The failed skb
1682  * will go in the wait_queue which will be emptied by the timer function or
1683  * by the next skb transmit.
1684  */
1685 static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
1686 {
1687 	struct fcoe_port *port = lport_priv(lport);
1688 	int rc;
1689 
1690 	spin_lock_bh(&port->fcoe_pending_queue.lock);
1691 
1692 	if (skb)
1693 		__skb_queue_tail(&port->fcoe_pending_queue, skb);
1694 
1695 	if (port->fcoe_pending_queue_active)
1696 		goto out;
1697 	port->fcoe_pending_queue_active = 1;
1698 
1699 	while (port->fcoe_pending_queue.qlen) {
1700 		/* keep qlen > 0 until fcoe_start_io succeeds */
1701 		port->fcoe_pending_queue.qlen++;
1702 		skb = __skb_dequeue(&port->fcoe_pending_queue);
1703 
1704 		spin_unlock_bh(&port->fcoe_pending_queue.lock);
1705 		rc = fcoe_start_io(skb);
1706 		spin_lock_bh(&port->fcoe_pending_queue.lock);
1707 
1708 		if (rc) {
1709 			__skb_queue_head(&port->fcoe_pending_queue, skb);
1710 			/* undo temporary increment above */
1711 			port->fcoe_pending_queue.qlen--;
1712 			break;
1713 		}
1714 		/* undo temporary increment above */
1715 		port->fcoe_pending_queue.qlen--;
1716 	}
1717 
1718 	if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1719 		lport->qfull = 0;
1720 	if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
1721 		mod_timer(&port->timer, jiffies + 2);
1722 	port->fcoe_pending_queue_active = 0;
1723 out:
1724 	if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1725 		lport->qfull = 1;
1726 	spin_unlock_bh(&port->fcoe_pending_queue.lock);
1727 	return;
1728 }
1729 
1730 /**
1731  * fcoe_dev_setup() - Setup the link change notification interface
1732  */
1733 static void fcoe_dev_setup(void)
1734 {
1735 	register_netdevice_notifier(&fcoe_notifier);
1736 }
1737 
1738 /**
1739  * fcoe_dev_cleanup() - Cleanup the link change notification interface
1740  */
1741 static void fcoe_dev_cleanup(void)
1742 {
1743 	unregister_netdevice_notifier(&fcoe_notifier);
1744 }
1745 
1746 /**
1747  * fcoe_device_notification() - Handler for net device events
1748  * @notifier: The context of the notification
1749  * @event:    The type of event
1750  * @ptr:      The net device that the event was on
1751  *
1752  * This function is called by the Ethernet driver in case of link change event.
1753  *
1754  * Returns: 0 for success
1755  */
1756 static int fcoe_device_notification(struct notifier_block *notifier,
1757 				    ulong event, void *ptr)
1758 {
1759 	struct fc_lport *lport = NULL;
1760 	struct net_device *netdev = ptr;
1761 	struct fcoe_interface *fcoe;
1762 	struct fcoe_port *port;
1763 	struct fcoe_dev_stats *stats;
1764 	u32 link_possible = 1;
1765 	u32 mfs;
1766 	int rc = NOTIFY_OK;
1767 
1768 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1769 		if (fcoe->netdev == netdev) {
1770 			lport = fcoe->ctlr.lp;
1771 			break;
1772 		}
1773 	}
1774 	if (!lport) {
1775 		rc = NOTIFY_DONE;
1776 		goto out;
1777 	}
1778 
1779 	switch (event) {
1780 	case NETDEV_DOWN:
1781 	case NETDEV_GOING_DOWN:
1782 		link_possible = 0;
1783 		break;
1784 	case NETDEV_UP:
1785 	case NETDEV_CHANGE:
1786 		break;
1787 	case NETDEV_CHANGEMTU:
1788 		if (netdev->features & NETIF_F_FCOE_MTU)
1789 			break;
1790 		mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1791 				     sizeof(struct fcoe_crc_eof));
1792 		if (mfs >= FC_MIN_MAX_FRAME)
1793 			fc_set_mfs(lport, mfs);
1794 		break;
1795 	case NETDEV_REGISTER:
1796 		break;
1797 	case NETDEV_UNREGISTER:
1798 		list_del(&fcoe->list);
1799 		port = lport_priv(fcoe->ctlr.lp);
1800 		fcoe_interface_cleanup(fcoe);
1801 		schedule_work(&port->destroy_work);
1802 		goto out;
1803 		break;
1804 	default:
1805 		FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1806 				"from netdev netlink\n", event);
1807 	}
1808 	if (link_possible && !fcoe_link_ok(lport))
1809 		fcoe_ctlr_link_up(&fcoe->ctlr);
1810 	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1811 		stats = fc_lport_get_stats(lport);
1812 		stats->LinkFailureCount++;
1813 		fcoe_clean_pending_queue(lport);
1814 	}
1815 out:
1816 	return rc;
1817 }
1818 
1819 /**
1820  * fcoe_if_to_netdev() - Parse a name buffer to get a net device
1821  * @buffer: The name of the net device
1822  *
1823  * Returns: NULL or a ptr to net_device
1824  */
1825 static struct net_device *fcoe_if_to_netdev(const char *buffer)
1826 {
1827 	char *cp;
1828 	char ifname[IFNAMSIZ + 2];
1829 
1830 	if (buffer) {
1831 		strlcpy(ifname, buffer, IFNAMSIZ);
1832 		cp = ifname + strlen(ifname);
1833 		while (--cp >= ifname && *cp == '\n')
1834 			*cp = '\0';
1835 		return dev_get_by_name(&init_net, ifname);
1836 	}
1837 	return NULL;
1838 }
1839 
1840 /**
1841  * fcoe_destroy() - Destroy a FCoE interface
1842  * @buffer: The name of the Ethernet interface to be destroyed
1843  * @kp:	    The associated kernel parameter
1844  *
1845  * Called from sysfs.
1846  *
1847  * Returns: 0 for success
1848  */
1849 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1850 {
1851 	struct fcoe_interface *fcoe;
1852 	struct net_device *netdev;
1853 	int rc = 0;
1854 
1855 	mutex_lock(&fcoe_config_mutex);
1856 #ifdef CONFIG_FCOE_MODULE
1857 	/*
1858 	 * Make sure the module has been initialized, and is not about to be
1859 	 * removed.  Module paramter sysfs files are writable before the
1860 	 * module_init function is called and after module_exit.
1861 	 */
1862 	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1863 		rc = -ENODEV;
1864 		goto out_nodev;
1865 	}
1866 #endif
1867 
1868 	netdev = fcoe_if_to_netdev(buffer);
1869 	if (!netdev) {
1870 		rc = -ENODEV;
1871 		goto out_nodev;
1872 	}
1873 
1874 	rtnl_lock();
1875 	fcoe = fcoe_hostlist_lookup_port(netdev);
1876 	if (!fcoe) {
1877 		rtnl_unlock();
1878 		rc = -ENODEV;
1879 		goto out_putdev;
1880 	}
1881 	list_del(&fcoe->list);
1882 	fcoe_interface_cleanup(fcoe);
1883 	rtnl_unlock();
1884 	fcoe_if_destroy(fcoe->ctlr.lp);
1885 out_putdev:
1886 	dev_put(netdev);
1887 out_nodev:
1888 	mutex_unlock(&fcoe_config_mutex);
1889 	return rc;
1890 }
1891 
1892 /**
1893  * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
1894  * @work: Handle to the FCoE port to be destroyed
1895  */
1896 static void fcoe_destroy_work(struct work_struct *work)
1897 {
1898 	struct fcoe_port *port;
1899 
1900 	port = container_of(work, struct fcoe_port, destroy_work);
1901 	mutex_lock(&fcoe_config_mutex);
1902 	fcoe_if_destroy(port->lport);
1903 	mutex_unlock(&fcoe_config_mutex);
1904 }
1905 
1906 /**
1907  * fcoe_create() - Create a fcoe interface
1908  * @buffer: The name of the Ethernet interface to create on
1909  * @kp:	    The associated kernel param
1910  *
1911  * Called from sysfs.
1912  *
1913  * Returns: 0 for success
1914  */
1915 static int fcoe_create(const char *buffer, struct kernel_param *kp)
1916 {
1917 	int rc;
1918 	struct fcoe_interface *fcoe;
1919 	struct fc_lport *lport;
1920 	struct net_device *netdev;
1921 
1922 	mutex_lock(&fcoe_config_mutex);
1923 #ifdef CONFIG_FCOE_MODULE
1924 	/*
1925 	 * Make sure the module has been initialized, and is not about to be
1926 	 * removed.  Module paramter sysfs files are writable before the
1927 	 * module_init function is called and after module_exit.
1928 	 */
1929 	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1930 		rc = -ENODEV;
1931 		goto out_nodev;
1932 	}
1933 #endif
1934 
1935 	rtnl_lock();
1936 	netdev = fcoe_if_to_netdev(buffer);
1937 	if (!netdev) {
1938 		rc = -ENODEV;
1939 		goto out_nodev;
1940 	}
1941 
1942 	/* look for existing lport */
1943 	if (fcoe_hostlist_lookup(netdev)) {
1944 		rc = -EEXIST;
1945 		goto out_putdev;
1946 	}
1947 
1948 	fcoe = fcoe_interface_create(netdev);
1949 	if (!fcoe) {
1950 		rc = -ENOMEM;
1951 		goto out_putdev;
1952 	}
1953 
1954 	lport = fcoe_if_create(fcoe, &netdev->dev, 0);
1955 	if (IS_ERR(lport)) {
1956 		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
1957 		       netdev->name);
1958 		rc = -EIO;
1959 		fcoe_interface_cleanup(fcoe);
1960 		goto out_free;
1961 	}
1962 
1963 	/* Make this the "master" N_Port */
1964 	fcoe->ctlr.lp = lport;
1965 
1966 	/* add to lports list */
1967 	fcoe_hostlist_add(lport);
1968 
1969 	/* start FIP Discovery and FLOGI */
1970 	lport->boot_time = jiffies;
1971 	fc_fabric_login(lport);
1972 	if (!fcoe_link_ok(lport))
1973 		fcoe_ctlr_link_up(&fcoe->ctlr);
1974 
1975 	rc = 0;
1976 out_free:
1977 	/*
1978 	 * Release from init in fcoe_interface_create(), on success lport
1979 	 * should be holding a reference taken in fcoe_if_create().
1980 	 */
1981 	fcoe_interface_put(fcoe);
1982 out_putdev:
1983 	dev_put(netdev);
1984 out_nodev:
1985 	rtnl_unlock();
1986 	mutex_unlock(&fcoe_config_mutex);
1987 	return rc;
1988 }
1989 
1990 /**
1991  * fcoe_link_ok() - Check if the link is OK for a local port
1992  * @lport: The local port to check link on
1993  *
1994  * Any permanently-disqualifying conditions have been previously checked.
1995  * This also updates the speed setting, which may change with link for 100/1000.
1996  *
1997  * This function should probably be checking for PAUSE support at some point
1998  * in the future. Currently Per-priority-pause is not determinable using
1999  * ethtool, so we shouldn't be restrictive until that problem is resolved.
2000  *
2001  * Returns: 0 if link is OK for use by FCoE.
2002  *
2003  */
2004 int fcoe_link_ok(struct fc_lport *lport)
2005 {
2006 	struct fcoe_port *port = lport_priv(lport);
2007 	struct net_device *netdev = port->fcoe->netdev;
2008 	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2009 
2010 	if ((netdev->flags & IFF_UP) && netif_carrier_ok(netdev) &&
2011 	    (!dev_ethtool_get_settings(netdev, &ecmd))) {
2012 		lport->link_supported_speeds &=
2013 			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2014 		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2015 				      SUPPORTED_1000baseT_Full))
2016 			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
2017 		if (ecmd.supported & SUPPORTED_10000baseT_Full)
2018 			lport->link_supported_speeds |=
2019 				FC_PORTSPEED_10GBIT;
2020 		if (ecmd.speed == SPEED_1000)
2021 			lport->link_speed = FC_PORTSPEED_1GBIT;
2022 		if (ecmd.speed == SPEED_10000)
2023 			lport->link_speed = FC_PORTSPEED_10GBIT;
2024 
2025 		return 0;
2026 	}
2027 	return -1;
2028 }
2029 
2030 /**
2031  * fcoe_percpu_clean() - Clear all pending skbs for an local port
2032  * @lport: The local port whose skbs are to be cleared
2033  *
2034  * Must be called with fcoe_create_mutex held to single-thread completion.
2035  *
2036  * This flushes the pending skbs by adding a new skb to each queue and
2037  * waiting until they are all freed.  This assures us that not only are
2038  * there no packets that will be handled by the lport, but also that any
2039  * threads already handling packet have returned.
2040  */
2041 void fcoe_percpu_clean(struct fc_lport *lport)
2042 {
2043 	struct fcoe_percpu_s *pp;
2044 	struct fcoe_rcv_info *fr;
2045 	struct sk_buff_head *list;
2046 	struct sk_buff *skb, *next;
2047 	struct sk_buff *head;
2048 	unsigned int cpu;
2049 
2050 	for_each_possible_cpu(cpu) {
2051 		pp = &per_cpu(fcoe_percpu, cpu);
2052 		spin_lock_bh(&pp->fcoe_rx_list.lock);
2053 		list = &pp->fcoe_rx_list;
2054 		head = list->next;
2055 		for (skb = head; skb != (struct sk_buff *)list;
2056 		     skb = next) {
2057 			next = skb->next;
2058 			fr = fcoe_dev_from_skb(skb);
2059 			if (fr->fr_dev == lport) {
2060 				__skb_unlink(skb, list);
2061 				kfree_skb(skb);
2062 			}
2063 		}
2064 
2065 		if (!pp->thread || !cpu_online(cpu)) {
2066 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2067 			continue;
2068 		}
2069 
2070 		skb = dev_alloc_skb(0);
2071 		if (!skb) {
2072 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2073 			continue;
2074 		}
2075 		skb->destructor = fcoe_percpu_flush_done;
2076 
2077 		__skb_queue_tail(&pp->fcoe_rx_list, skb);
2078 		if (pp->fcoe_rx_list.qlen == 1)
2079 			wake_up_process(pp->thread);
2080 		spin_unlock_bh(&pp->fcoe_rx_list.lock);
2081 
2082 		wait_for_completion(&fcoe_flush_completion);
2083 	}
2084 }
2085 
2086 /**
2087  * fcoe_clean_pending_queue() - Dequeue a skb and free it
2088  * @lport: The local port to dequeue a skb on
2089  */
2090 void fcoe_clean_pending_queue(struct fc_lport *lport)
2091 {
2092 	struct fcoe_port  *port = lport_priv(lport);
2093 	struct sk_buff *skb;
2094 
2095 	spin_lock_bh(&port->fcoe_pending_queue.lock);
2096 	while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
2097 		spin_unlock_bh(&port->fcoe_pending_queue.lock);
2098 		kfree_skb(skb);
2099 		spin_lock_bh(&port->fcoe_pending_queue.lock);
2100 	}
2101 	spin_unlock_bh(&port->fcoe_pending_queue.lock);
2102 }
2103 
2104 /**
2105  * fcoe_reset() - Reset a local port
2106  * @shost: The SCSI host associated with the local port to be reset
2107  *
2108  * Returns: Always 0 (return value required by FC transport template)
2109  */
2110 int fcoe_reset(struct Scsi_Host *shost)
2111 {
2112 	struct fc_lport *lport = shost_priv(shost);
2113 	fc_lport_reset(lport);
2114 	return 0;
2115 }
2116 
2117 /**
2118  * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
2119  * @netdev: The net device used as a key
2120  *
2121  * Locking: Must be called with the RNL mutex held.
2122  *
2123  * Returns: NULL or the FCoE interface
2124  */
2125 static struct fcoe_interface *
2126 fcoe_hostlist_lookup_port(const struct net_device *netdev)
2127 {
2128 	struct fcoe_interface *fcoe;
2129 
2130 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
2131 		if (fcoe->netdev == netdev)
2132 			return fcoe;
2133 	}
2134 	return NULL;
2135 }
2136 
2137 /**
2138  * fcoe_hostlist_lookup() - Find the local port associated with a
2139  *			    given net device
2140  * @netdev: The netdevice used as a key
2141  *
2142  * Locking: Must be called with the RTNL mutex held
2143  *
2144  * Returns: NULL or the local port
2145  */
2146 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2147 {
2148 	struct fcoe_interface *fcoe;
2149 
2150 	fcoe = fcoe_hostlist_lookup_port(netdev);
2151 	return (fcoe) ? fcoe->ctlr.lp : NULL;
2152 }
2153 
2154 /**
2155  * fcoe_hostlist_add() - Add the FCoE interface identified by a local
2156  *			 port to the hostlist
2157  * @lport: The local port that identifies the FCoE interface to be added
2158  *
2159  * Locking: must be called with the RTNL mutex held
2160  *
2161  * Returns: 0 for success
2162  */
2163 static int fcoe_hostlist_add(const struct fc_lport *lport)
2164 {
2165 	struct fcoe_interface *fcoe;
2166 	struct fcoe_port *port;
2167 
2168 	fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
2169 	if (!fcoe) {
2170 		port = lport_priv(lport);
2171 		fcoe = port->fcoe;
2172 		list_add_tail(&fcoe->list, &fcoe_hostlist);
2173 	}
2174 	return 0;
2175 }
2176 
2177 /**
2178  * fcoe_init() - Initialize fcoe.ko
2179  *
2180  * Returns: 0 on success, or a negative value on failure
2181  */
2182 static int __init fcoe_init(void)
2183 {
2184 	struct fcoe_percpu_s *p;
2185 	unsigned int cpu;
2186 	int rc = 0;
2187 
2188 	mutex_lock(&fcoe_config_mutex);
2189 
2190 	for_each_possible_cpu(cpu) {
2191 		p = &per_cpu(fcoe_percpu, cpu);
2192 		skb_queue_head_init(&p->fcoe_rx_list);
2193 	}
2194 
2195 	for_each_online_cpu(cpu)
2196 		fcoe_percpu_thread_create(cpu);
2197 
2198 	/* Initialize per CPU interrupt thread */
2199 	rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
2200 	if (rc)
2201 		goto out_free;
2202 
2203 	/* Setup link change notification */
2204 	fcoe_dev_setup();
2205 
2206 	rc = fcoe_if_init();
2207 	if (rc)
2208 		goto out_free;
2209 
2210 	mutex_unlock(&fcoe_config_mutex);
2211 	return 0;
2212 
2213 out_free:
2214 	for_each_online_cpu(cpu) {
2215 		fcoe_percpu_thread_destroy(cpu);
2216 	}
2217 	mutex_unlock(&fcoe_config_mutex);
2218 	return rc;
2219 }
2220 module_init(fcoe_init);
2221 
2222 /**
2223  * fcoe_exit() - Clean up fcoe.ko
2224  *
2225  * Returns: 0 on success or a  negative value on failure
2226  */
2227 static void __exit fcoe_exit(void)
2228 {
2229 	struct fcoe_interface *fcoe, *tmp;
2230 	struct fcoe_port *port;
2231 	unsigned int cpu;
2232 
2233 	mutex_lock(&fcoe_config_mutex);
2234 
2235 	fcoe_dev_cleanup();
2236 
2237 	/* releases the associated fcoe hosts */
2238 	rtnl_lock();
2239 	list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2240 		list_del(&fcoe->list);
2241 		port = lport_priv(fcoe->ctlr.lp);
2242 		fcoe_interface_cleanup(fcoe);
2243 		schedule_work(&port->destroy_work);
2244 	}
2245 	rtnl_unlock();
2246 
2247 	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2248 
2249 	for_each_online_cpu(cpu)
2250 		fcoe_percpu_thread_destroy(cpu);
2251 
2252 	mutex_unlock(&fcoe_config_mutex);
2253 
2254 	/* flush any asyncronous interface destroys,
2255 	 * this should happen after the netdev notifier is unregistered */
2256 	flush_scheduled_work();
2257 	/* That will flush out all the N_Ports on the hostlist, but now we
2258 	 * may have NPIV VN_Ports scheduled for destruction */
2259 	flush_scheduled_work();
2260 
2261 	/* detach from scsi transport
2262 	 * must happen after all destroys are done, therefor after the flush */
2263 	fcoe_if_exit();
2264 }
2265 module_exit(fcoe_exit);
2266 
2267 /**
2268  * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
2269  * @seq: active sequence in the FLOGI or FDISC exchange
2270  * @fp: response frame, or error encoded in a pointer (timeout)
2271  * @arg: pointer the the fcoe_ctlr structure
2272  *
2273  * This handles MAC address managment for FCoE, then passes control on to
2274  * the libfc FLOGI response handler.
2275  */
2276 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2277 {
2278 	struct fcoe_ctlr *fip = arg;
2279 	struct fc_exch *exch = fc_seq_exch(seq);
2280 	struct fc_lport *lport = exch->lp;
2281 	u8 *mac;
2282 
2283 	if (IS_ERR(fp))
2284 		goto done;
2285 
2286 	mac = fr_cb(fp)->granted_mac;
2287 	if (is_zero_ether_addr(mac)) {
2288 		/* pre-FIP */
2289 		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
2290 			fc_frame_free(fp);
2291 			return;
2292 		}
2293 	}
2294 	fcoe_update_src_mac(lport, mac);
2295 done:
2296 	fc_lport_flogi_resp(seq, fp, lport);
2297 }
2298 
2299 /**
2300  * fcoe_logo_resp() - FCoE specific LOGO response handler
2301  * @seq: active sequence in the LOGO exchange
2302  * @fp: response frame, or error encoded in a pointer (timeout)
2303  * @arg: pointer the the fcoe_ctlr structure
2304  *
2305  * This handles MAC address managment for FCoE, then passes control on to
2306  * the libfc LOGO response handler.
2307  */
2308 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2309 {
2310 	struct fc_lport *lport = arg;
2311 	static u8 zero_mac[ETH_ALEN] = { 0 };
2312 
2313 	if (!IS_ERR(fp))
2314 		fcoe_update_src_mac(lport, zero_mac);
2315 	fc_lport_logo_resp(seq, fp, lport);
2316 }
2317 
2318 /**
2319  * fcoe_elsct_send - FCoE specific ELS handler
2320  *
2321  * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
2322  * using FCoE specific response handlers and passing the FIP controller as
2323  * the argument (the lport is still available from the exchange).
2324  *
2325  * Most of the work here is just handed off to the libfc routine.
2326  */
2327 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2328 				      struct fc_frame *fp, unsigned int op,
2329 				      void (*resp)(struct fc_seq *,
2330 						   struct fc_frame *,
2331 						   void *),
2332 				      void *arg, u32 timeout)
2333 {
2334 	struct fcoe_port *port = lport_priv(lport);
2335 	struct fcoe_interface *fcoe = port->fcoe;
2336 	struct fcoe_ctlr *fip = &fcoe->ctlr;
2337 	struct fc_frame_header *fh = fc_frame_header_get(fp);
2338 
2339 	switch (op) {
2340 	case ELS_FLOGI:
2341 	case ELS_FDISC:
2342 		return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2343 				     fip, timeout);
2344 	case ELS_LOGO:
2345 		/* only hook onto fabric logouts, not port logouts */
2346 		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2347 			break;
2348 		return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2349 				     lport, timeout);
2350 	}
2351 	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2352 }
2353 
2354 /**
2355  * fcoe_vport_create() - create an fc_host/scsi_host for a vport
2356  * @vport: fc_vport object to create a new fc_host for
2357  * @disabled: start the new fc_host in a disabled state by default?
2358  *
2359  * Returns: 0 for success
2360  */
2361 static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2362 {
2363 	struct Scsi_Host *shost = vport_to_shost(vport);
2364 	struct fc_lport *n_port = shost_priv(shost);
2365 	struct fcoe_port *port = lport_priv(n_port);
2366 	struct fcoe_interface *fcoe = port->fcoe;
2367 	struct net_device *netdev = fcoe->netdev;
2368 	struct fc_lport *vn_port;
2369 
2370 	mutex_lock(&fcoe_config_mutex);
2371 	vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2372 	mutex_unlock(&fcoe_config_mutex);
2373 
2374 	if (IS_ERR(vn_port)) {
2375 		printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2376 		       netdev->name);
2377 		return -EIO;
2378 	}
2379 
2380 	if (disabled) {
2381 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2382 	} else {
2383 		vn_port->boot_time = jiffies;
2384 		fc_fabric_login(vn_port);
2385 		fc_vport_setlink(vn_port);
2386 	}
2387 	return 0;
2388 }
2389 
2390 /**
2391  * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
2392  * @vport: fc_vport object that is being destroyed
2393  *
2394  * Returns: 0 for success
2395  */
2396 static int fcoe_vport_destroy(struct fc_vport *vport)
2397 {
2398 	struct Scsi_Host *shost = vport_to_shost(vport);
2399 	struct fc_lport *n_port = shost_priv(shost);
2400 	struct fc_lport *vn_port = vport->dd_data;
2401 	struct fcoe_port *port = lport_priv(vn_port);
2402 
2403 	mutex_lock(&n_port->lp_mutex);
2404 	list_del(&vn_port->list);
2405 	mutex_unlock(&n_port->lp_mutex);
2406 	schedule_work(&port->destroy_work);
2407 	return 0;
2408 }
2409 
2410 /**
2411  * fcoe_vport_disable() - change vport state
2412  * @vport: vport to bring online/offline
2413  * @disable: should the vport be disabled?
2414  */
2415 static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2416 {
2417 	struct fc_lport *lport = vport->dd_data;
2418 
2419 	if (disable) {
2420 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2421 		fc_fabric_logoff(lport);
2422 	} else {
2423 		lport->boot_time = jiffies;
2424 		fc_fabric_login(lport);
2425 		fc_vport_setlink(lport);
2426 	}
2427 
2428 	return 0;
2429 }
2430 
2431 /**
2432  * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
2433  * @vport: fc_vport with a new symbolic name string
2434  *
2435  * After generating a new symbolic name string, a new RSPN_ID request is
2436  * sent to the name server.  There is no response handler, so if it fails
2437  * for some reason it will not be retried.
2438  */
2439 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2440 {
2441 	struct fc_lport *lport = vport->dd_data;
2442 	struct fc_frame *fp;
2443 	size_t len;
2444 
2445 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
2446 		 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2447 		 fcoe_netdev(lport)->name, vport->symbolic_name);
2448 
2449 	if (lport->state != LPORT_ST_READY)
2450 		return;
2451 
2452 	len = strnlen(fc_host_symbolic_name(lport->host), 255);
2453 	fp = fc_frame_alloc(lport,
2454 			    sizeof(struct fc_ct_hdr) +
2455 			    sizeof(struct fc_ns_rspn) + len);
2456 	if (!fp)
2457 		return;
2458 	lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2459 			     NULL, NULL, 3 * lport->r_a_tov);
2460 }
2461 
2462 /**
2463  * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
2464  * @lport: the local port
2465  * @fc_lesb: the link error status block
2466  */
2467 static void fcoe_get_lesb(struct fc_lport *lport,
2468 			 struct fc_els_lesb *fc_lesb)
2469 {
2470 	unsigned int cpu;
2471 	u32 lfc, vlfc, mdac;
2472 	struct fcoe_dev_stats *devst;
2473 	struct fcoe_fc_els_lesb *lesb;
2474 	struct net_device *netdev = fcoe_netdev(lport);
2475 
2476 	lfc = 0;
2477 	vlfc = 0;
2478 	mdac = 0;
2479 	lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
2480 	memset(lesb, 0, sizeof(*lesb));
2481 	for_each_possible_cpu(cpu) {
2482 		devst = per_cpu_ptr(lport->dev_stats, cpu);
2483 		lfc += devst->LinkFailureCount;
2484 		vlfc += devst->VLinkFailureCount;
2485 		mdac += devst->MissDiscAdvCount;
2486 	}
2487 	lesb->lesb_link_fail = htonl(lfc);
2488 	lesb->lesb_vlink_fail = htonl(vlfc);
2489 	lesb->lesb_miss_fka = htonl(mdac);
2490 	lesb->lesb_fcs_error = htonl(dev_get_stats(netdev)->rx_crc_errors);
2491 }
2492