xref: /linux/drivers/scsi/fcoe/fcoe.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <linux/crc32.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30 #include <linux/fs.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <linux/workqueue.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsicam.h>
36 #include <scsi/scsi_transport.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <net/rtnetlink.h>
39 
40 #include <scsi/fc/fc_encaps.h>
41 #include <scsi/fc/fc_fip.h>
42 
43 #include <scsi/libfc.h>
44 #include <scsi/fc_frame.h>
45 #include <scsi/libfcoe.h>
46 
47 #include "fcoe.h"
48 
49 MODULE_AUTHOR("Open-FCoE.org");
50 MODULE_DESCRIPTION("FCoE");
51 MODULE_LICENSE("GPL v2");
52 
53 /* Performance tuning parameters for fcoe */
54 static unsigned int fcoe_ddp_min = 4096;
55 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "	\
57 		 "Direct Data Placement (DDP).");
58 
59 DEFINE_MUTEX(fcoe_config_mutex);
60 
61 static struct workqueue_struct *fcoe_wq;
62 
63 /* fcoe_percpu_clean completion.  Waiter protected by fcoe_create_mutex */
64 static DECLARE_COMPLETION(fcoe_flush_completion);
65 
66 /* fcoe host list */
67 /* must only by accessed under the RTNL mutex */
68 LIST_HEAD(fcoe_hostlist);
69 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
70 
71 /* Function Prototypes */
72 static int fcoe_reset(struct Scsi_Host *);
73 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
74 static int fcoe_rcv(struct sk_buff *, struct net_device *,
75 		    struct packet_type *, struct net_device *);
76 static int fcoe_percpu_receive_thread(void *);
77 static void fcoe_percpu_clean(struct fc_lport *);
78 static int fcoe_link_speed_update(struct fc_lport *);
79 static int fcoe_link_ok(struct fc_lport *);
80 
81 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
82 static int fcoe_hostlist_add(const struct fc_lport *);
83 
84 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
85 static void fcoe_dev_setup(void);
86 static void fcoe_dev_cleanup(void);
87 static struct fcoe_interface
88 *fcoe_hostlist_lookup_port(const struct net_device *);
89 
90 static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
91 			 struct packet_type *, struct net_device *);
92 
93 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
94 static void fcoe_update_src_mac(struct fc_lport *, u8 *);
95 static u8 *fcoe_get_src_mac(struct fc_lport *);
96 static void fcoe_destroy_work(struct work_struct *);
97 
98 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
99 			  unsigned int);
100 static int fcoe_ddp_done(struct fc_lport *, u16);
101 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
102 			   unsigned int);
103 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
104 
105 static bool fcoe_match(struct net_device *netdev);
106 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
107 static int fcoe_destroy(struct net_device *netdev);
108 static int fcoe_enable(struct net_device *netdev);
109 static int fcoe_disable(struct net_device *netdev);
110 
111 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
112 				      u32 did, struct fc_frame *,
113 				      unsigned int op,
114 				      void (*resp)(struct fc_seq *,
115 						   struct fc_frame *,
116 						   void *),
117 				      void *, u32 timeout);
118 static void fcoe_recv_frame(struct sk_buff *skb);
119 
120 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
121 
122 /* notification function for packets from net device */
123 static struct notifier_block fcoe_notifier = {
124 	.notifier_call = fcoe_device_notification,
125 };
126 
127 /* notification function for CPU hotplug events */
128 static struct notifier_block fcoe_cpu_notifier = {
129 	.notifier_call = fcoe_cpu_callback,
130 };
131 
132 static struct scsi_transport_template *fcoe_nport_scsi_transport;
133 static struct scsi_transport_template *fcoe_vport_scsi_transport;
134 
135 static int fcoe_vport_destroy(struct fc_vport *);
136 static int fcoe_vport_create(struct fc_vport *, bool disabled);
137 static int fcoe_vport_disable(struct fc_vport *, bool disable);
138 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
139 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
140 
141 static struct libfc_function_template fcoe_libfc_fcn_templ = {
142 	.frame_send = fcoe_xmit,
143 	.ddp_setup = fcoe_ddp_setup,
144 	.ddp_done = fcoe_ddp_done,
145 	.ddp_target = fcoe_ddp_target,
146 	.elsct_send = fcoe_elsct_send,
147 	.get_lesb = fcoe_get_lesb,
148 	.lport_set_port_id = fcoe_set_port_id,
149 };
150 
151 struct fc_function_template fcoe_nport_fc_functions = {
152 	.show_host_node_name = 1,
153 	.show_host_port_name = 1,
154 	.show_host_supported_classes = 1,
155 	.show_host_supported_fc4s = 1,
156 	.show_host_active_fc4s = 1,
157 	.show_host_maxframe_size = 1,
158 
159 	.show_host_port_id = 1,
160 	.show_host_supported_speeds = 1,
161 	.get_host_speed = fc_get_host_speed,
162 	.show_host_speed = 1,
163 	.show_host_port_type = 1,
164 	.get_host_port_state = fc_get_host_port_state,
165 	.show_host_port_state = 1,
166 	.show_host_symbolic_name = 1,
167 
168 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
169 	.show_rport_maxframe_size = 1,
170 	.show_rport_supported_classes = 1,
171 
172 	.show_host_fabric_name = 1,
173 	.show_starget_node_name = 1,
174 	.show_starget_port_name = 1,
175 	.show_starget_port_id = 1,
176 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
177 	.show_rport_dev_loss_tmo = 1,
178 	.get_fc_host_stats = fc_get_host_stats,
179 	.issue_fc_host_lip = fcoe_reset,
180 
181 	.terminate_rport_io = fc_rport_terminate_io,
182 
183 	.vport_create = fcoe_vport_create,
184 	.vport_delete = fcoe_vport_destroy,
185 	.vport_disable = fcoe_vport_disable,
186 	.set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
187 
188 	.bsg_request = fc_lport_bsg_request,
189 };
190 
191 struct fc_function_template fcoe_vport_fc_functions = {
192 	.show_host_node_name = 1,
193 	.show_host_port_name = 1,
194 	.show_host_supported_classes = 1,
195 	.show_host_supported_fc4s = 1,
196 	.show_host_active_fc4s = 1,
197 	.show_host_maxframe_size = 1,
198 
199 	.show_host_port_id = 1,
200 	.show_host_supported_speeds = 1,
201 	.get_host_speed = fc_get_host_speed,
202 	.show_host_speed = 1,
203 	.show_host_port_type = 1,
204 	.get_host_port_state = fc_get_host_port_state,
205 	.show_host_port_state = 1,
206 	.show_host_symbolic_name = 1,
207 
208 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
209 	.show_rport_maxframe_size = 1,
210 	.show_rport_supported_classes = 1,
211 
212 	.show_host_fabric_name = 1,
213 	.show_starget_node_name = 1,
214 	.show_starget_port_name = 1,
215 	.show_starget_port_id = 1,
216 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
217 	.show_rport_dev_loss_tmo = 1,
218 	.get_fc_host_stats = fc_get_host_stats,
219 	.issue_fc_host_lip = fcoe_reset,
220 
221 	.terminate_rport_io = fc_rport_terminate_io,
222 
223 	.bsg_request = fc_lport_bsg_request,
224 };
225 
226 static struct scsi_host_template fcoe_shost_template = {
227 	.module = THIS_MODULE,
228 	.name = "FCoE Driver",
229 	.proc_name = FCOE_NAME,
230 	.queuecommand = fc_queuecommand,
231 	.eh_abort_handler = fc_eh_abort,
232 	.eh_device_reset_handler = fc_eh_device_reset,
233 	.eh_host_reset_handler = fc_eh_host_reset,
234 	.slave_alloc = fc_slave_alloc,
235 	.change_queue_depth = fc_change_queue_depth,
236 	.change_queue_type = fc_change_queue_type,
237 	.this_id = -1,
238 	.cmd_per_lun = 3,
239 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
240 	.use_clustering = ENABLE_CLUSTERING,
241 	.sg_tablesize = SG_ALL,
242 	.max_sectors = 0xffff,
243 };
244 
245 /**
246  * fcoe_interface_setup() - Setup a FCoE interface
247  * @fcoe:   The new FCoE interface
248  * @netdev: The net device that the fcoe interface is on
249  *
250  * Returns : 0 for success
251  * Locking: must be called with the RTNL mutex held
252  */
253 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
254 				struct net_device *netdev)
255 {
256 	struct fcoe_ctlr *fip = &fcoe->ctlr;
257 	struct netdev_hw_addr *ha;
258 	struct net_device *real_dev;
259 	u8 flogi_maddr[ETH_ALEN];
260 	const struct net_device_ops *ops;
261 
262 	fcoe->netdev = netdev;
263 
264 	/* Let LLD initialize for FCoE */
265 	ops = netdev->netdev_ops;
266 	if (ops->ndo_fcoe_enable) {
267 		if (ops->ndo_fcoe_enable(netdev))
268 			FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
269 					" specific feature for LLD.\n");
270 	}
271 
272 	/* Do not support for bonding device */
273 	if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
274 		FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
275 		return -EOPNOTSUPP;
276 	}
277 
278 	/* look for SAN MAC address, if multiple SAN MACs exist, only
279 	 * use the first one for SPMA */
280 	real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
281 		vlan_dev_real_dev(netdev) : netdev;
282 	fcoe->realdev = real_dev;
283 	rcu_read_lock();
284 	for_each_dev_addr(real_dev, ha) {
285 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
286 		    (is_valid_ether_addr(ha->addr))) {
287 			memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
288 			fip->spma = 1;
289 			break;
290 		}
291 	}
292 	rcu_read_unlock();
293 
294 	/* setup Source Mac Address */
295 	if (!fip->spma)
296 		memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
297 
298 	/*
299 	 * Add FCoE MAC address as second unicast MAC address
300 	 * or enter promiscuous mode if not capable of listening
301 	 * for multiple unicast MACs.
302 	 */
303 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
304 	dev_uc_add(netdev, flogi_maddr);
305 	if (fip->spma)
306 		dev_uc_add(netdev, fip->ctl_src_addr);
307 	if (fip->mode == FIP_MODE_VN2VN) {
308 		dev_mc_add(netdev, FIP_ALL_VN2VN_MACS);
309 		dev_mc_add(netdev, FIP_ALL_P2P_MACS);
310 	} else
311 		dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
312 
313 	/*
314 	 * setup the receive function from ethernet driver
315 	 * on the ethertype for the given device
316 	 */
317 	fcoe->fcoe_packet_type.func = fcoe_rcv;
318 	fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
319 	fcoe->fcoe_packet_type.dev = netdev;
320 	dev_add_pack(&fcoe->fcoe_packet_type);
321 
322 	fcoe->fip_packet_type.func = fcoe_fip_recv;
323 	fcoe->fip_packet_type.type = htons(ETH_P_FIP);
324 	fcoe->fip_packet_type.dev = netdev;
325 	dev_add_pack(&fcoe->fip_packet_type);
326 
327 	return 0;
328 }
329 
330 /**
331  * fcoe_interface_create() - Create a FCoE interface on a net device
332  * @netdev: The net device to create the FCoE interface on
333  * @fip_mode: The mode to use for FIP
334  *
335  * Returns: pointer to a struct fcoe_interface or NULL on error
336  */
337 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
338 						    enum fip_state fip_mode)
339 {
340 	struct fcoe_interface *fcoe;
341 	int err;
342 
343 	if (!try_module_get(THIS_MODULE)) {
344 		FCOE_NETDEV_DBG(netdev,
345 				"Could not get a reference to the module\n");
346 		fcoe = ERR_PTR(-EBUSY);
347 		goto out;
348 	}
349 
350 	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
351 	if (!fcoe) {
352 		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
353 		fcoe = ERR_PTR(-ENOMEM);
354 		goto out_nomod;
355 	}
356 
357 	dev_hold(netdev);
358 	kref_init(&fcoe->kref);
359 
360 	/*
361 	 * Initialize FIP.
362 	 */
363 	fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
364 	fcoe->ctlr.send = fcoe_fip_send;
365 	fcoe->ctlr.update_mac = fcoe_update_src_mac;
366 	fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
367 
368 	err = fcoe_interface_setup(fcoe, netdev);
369 	if (err) {
370 		fcoe_ctlr_destroy(&fcoe->ctlr);
371 		kfree(fcoe);
372 		dev_put(netdev);
373 		fcoe = ERR_PTR(err);
374 		goto out_nomod;
375 	}
376 
377 	goto out;
378 
379 out_nomod:
380 	module_put(THIS_MODULE);
381 out:
382 	return fcoe;
383 }
384 
385 /**
386  * fcoe_interface_release() - fcoe_port kref release function
387  * @kref: Embedded reference count in an fcoe_interface struct
388  */
389 static void fcoe_interface_release(struct kref *kref)
390 {
391 	struct fcoe_interface *fcoe;
392 	struct net_device *netdev;
393 
394 	fcoe = container_of(kref, struct fcoe_interface, kref);
395 	netdev = fcoe->netdev;
396 	/* tear-down the FCoE controller */
397 	fcoe_ctlr_destroy(&fcoe->ctlr);
398 	kfree(fcoe);
399 	dev_put(netdev);
400 	module_put(THIS_MODULE);
401 }
402 
403 /**
404  * fcoe_interface_get() - Get a reference to a FCoE interface
405  * @fcoe: The FCoE interface to be held
406  */
407 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
408 {
409 	kref_get(&fcoe->kref);
410 }
411 
412 /**
413  * fcoe_interface_put() - Put a reference to a FCoE interface
414  * @fcoe: The FCoE interface to be released
415  */
416 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
417 {
418 	kref_put(&fcoe->kref, fcoe_interface_release);
419 }
420 
421 /**
422  * fcoe_interface_cleanup() - Clean up a FCoE interface
423  * @fcoe: The FCoE interface to be cleaned up
424  *
425  * Caller must be holding the RTNL mutex
426  */
427 void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
428 {
429 	struct net_device *netdev = fcoe->netdev;
430 	struct fcoe_ctlr *fip = &fcoe->ctlr;
431 	u8 flogi_maddr[ETH_ALEN];
432 	const struct net_device_ops *ops;
433 
434 	rtnl_lock();
435 
436 	/*
437 	 * Don't listen for Ethernet packets anymore.
438 	 * synchronize_net() ensures that the packet handlers are not running
439 	 * on another CPU. dev_remove_pack() would do that, this calls the
440 	 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
441 	 */
442 	__dev_remove_pack(&fcoe->fcoe_packet_type);
443 	__dev_remove_pack(&fcoe->fip_packet_type);
444 	synchronize_net();
445 
446 	/* Delete secondary MAC addresses */
447 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
448 	dev_uc_del(netdev, flogi_maddr);
449 	if (fip->spma)
450 		dev_uc_del(netdev, fip->ctl_src_addr);
451 	if (fip->mode == FIP_MODE_VN2VN) {
452 		dev_mc_del(netdev, FIP_ALL_VN2VN_MACS);
453 		dev_mc_del(netdev, FIP_ALL_P2P_MACS);
454 	} else
455 		dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
456 
457 	/* Tell the LLD we are done w/ FCoE */
458 	ops = netdev->netdev_ops;
459 	if (ops->ndo_fcoe_disable) {
460 		if (ops->ndo_fcoe_disable(netdev))
461 			FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
462 					" specific feature for LLD.\n");
463 	}
464 
465 	rtnl_unlock();
466 
467 	/* Release the self-reference taken during fcoe_interface_create() */
468 	fcoe_interface_put(fcoe);
469 }
470 
471 /**
472  * fcoe_fip_recv() - Handler for received FIP frames
473  * @skb:      The receive skb
474  * @netdev:   The associated net device
475  * @ptype:    The packet_type structure which was used to register this handler
476  * @orig_dev: The original net_device the the skb was received on.
477  *	      (in case dev is a bond)
478  *
479  * Returns: 0 for success
480  */
481 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
482 			 struct packet_type *ptype,
483 			 struct net_device *orig_dev)
484 {
485 	struct fcoe_interface *fcoe;
486 
487 	fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
488 	fcoe_ctlr_recv(&fcoe->ctlr, skb);
489 	return 0;
490 }
491 
492 /**
493  * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
494  * @port: The FCoE port
495  * @skb: The FIP/FCoE packet to be sent
496  */
497 static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
498 {
499 	if (port->fcoe_pending_queue.qlen)
500 		fcoe_check_wait_queue(port->lport, skb);
501 	else if (fcoe_start_io(skb))
502 		fcoe_check_wait_queue(port->lport, skb);
503 }
504 
505 /**
506  * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
507  * @fip: The FCoE controller
508  * @skb: The FIP packet to be sent
509  */
510 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
511 {
512 	skb->dev = fcoe_from_ctlr(fip)->netdev;
513 	fcoe_port_send(lport_priv(fip->lp), skb);
514 }
515 
516 /**
517  * fcoe_update_src_mac() - Update the Ethernet MAC filters
518  * @lport: The local port to update the source MAC on
519  * @addr:  Unicast MAC address to add
520  *
521  * Remove any previously-set unicast MAC filter.
522  * Add secondary FCoE MAC address filter for our OUI.
523  */
524 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
525 {
526 	struct fcoe_port *port = lport_priv(lport);
527 	struct fcoe_interface *fcoe = port->priv;
528 
529 	rtnl_lock();
530 	if (!is_zero_ether_addr(port->data_src_addr))
531 		dev_uc_del(fcoe->netdev, port->data_src_addr);
532 	if (!is_zero_ether_addr(addr))
533 		dev_uc_add(fcoe->netdev, addr);
534 	memcpy(port->data_src_addr, addr, ETH_ALEN);
535 	rtnl_unlock();
536 }
537 
538 /**
539  * fcoe_get_src_mac() - return the Ethernet source address for an lport
540  * @lport: libfc lport
541  */
542 static u8 *fcoe_get_src_mac(struct fc_lport *lport)
543 {
544 	struct fcoe_port *port = lport_priv(lport);
545 
546 	return port->data_src_addr;
547 }
548 
549 /**
550  * fcoe_lport_config() - Set up a local port
551  * @lport: The local port to be setup
552  *
553  * Returns: 0 for success
554  */
555 static int fcoe_lport_config(struct fc_lport *lport)
556 {
557 	lport->link_up = 0;
558 	lport->qfull = 0;
559 	lport->max_retry_count = 3;
560 	lport->max_rport_retry_count = 3;
561 	lport->e_d_tov = 2 * 1000;	/* FC-FS default */
562 	lport->r_a_tov = 2 * 2 * 1000;
563 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
564 				 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
565 	lport->does_npiv = 1;
566 
567 	fc_lport_init_stats(lport);
568 
569 	/* lport fc_lport related configuration */
570 	fc_lport_config(lport);
571 
572 	/* offload related configuration */
573 	lport->crc_offload = 0;
574 	lport->seq_offload = 0;
575 	lport->lro_enabled = 0;
576 	lport->lro_xid = 0;
577 	lport->lso_max = 0;
578 
579 	return 0;
580 }
581 
582 /**
583  * fcoe_netdev_features_change - Updates the lport's offload flags based
584  * on the LLD netdev's FCoE feature flags
585  */
586 static void fcoe_netdev_features_change(struct fc_lport *lport,
587 					struct net_device *netdev)
588 {
589 	mutex_lock(&lport->lp_mutex);
590 
591 	if (netdev->features & NETIF_F_SG)
592 		lport->sg_supp = 1;
593 	else
594 		lport->sg_supp = 0;
595 
596 	if (netdev->features & NETIF_F_FCOE_CRC) {
597 		lport->crc_offload = 1;
598 		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
599 	} else {
600 		lport->crc_offload = 0;
601 	}
602 
603 	if (netdev->features & NETIF_F_FSO) {
604 		lport->seq_offload = 1;
605 		lport->lso_max = netdev->gso_max_size;
606 		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
607 				lport->lso_max);
608 	} else {
609 		lport->seq_offload = 0;
610 		lport->lso_max = 0;
611 	}
612 
613 	if (netdev->fcoe_ddp_xid) {
614 		lport->lro_enabled = 1;
615 		lport->lro_xid = netdev->fcoe_ddp_xid;
616 		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
617 				lport->lro_xid);
618 	} else {
619 		lport->lro_enabled = 0;
620 		lport->lro_xid = 0;
621 	}
622 
623 	mutex_unlock(&lport->lp_mutex);
624 }
625 
626 /**
627  * fcoe_netdev_config() - Set up net devive for SW FCoE
628  * @lport:  The local port that is associated with the net device
629  * @netdev: The associated net device
630  *
631  * Must be called after fcoe_lport_config() as it will use local port mutex
632  *
633  * Returns: 0 for success
634  */
635 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
636 {
637 	u32 mfs;
638 	u64 wwnn, wwpn;
639 	struct fcoe_interface *fcoe;
640 	struct fcoe_port *port;
641 
642 	/* Setup lport private data to point to fcoe softc */
643 	port = lport_priv(lport);
644 	fcoe = port->priv;
645 
646 	/*
647 	 * Determine max frame size based on underlying device and optional
648 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
649 	 * will return 0, so do this first.
650 	 */
651 	mfs = netdev->mtu;
652 	if (netdev->features & NETIF_F_FCOE_MTU) {
653 		mfs = FCOE_MTU;
654 		FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
655 	}
656 	mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
657 	if (fc_set_mfs(lport, mfs))
658 		return -EINVAL;
659 
660 	/* offload features support */
661 	fcoe_netdev_features_change(lport, netdev);
662 
663 	skb_queue_head_init(&port->fcoe_pending_queue);
664 	port->fcoe_pending_queue_active = 0;
665 	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
666 
667 	fcoe_link_speed_update(lport);
668 
669 	if (!lport->vport) {
670 		if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
671 			wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
672 		fc_set_wwnn(lport, wwnn);
673 		if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
674 			wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
675 						 2, 0);
676 		fc_set_wwpn(lport, wwpn);
677 	}
678 
679 	return 0;
680 }
681 
682 /**
683  * fcoe_shost_config() - Set up the SCSI host associated with a local port
684  * @lport: The local port
685  * @dev:   The device associated with the SCSI host
686  *
687  * Must be called after fcoe_lport_config() and fcoe_netdev_config()
688  *
689  * Returns: 0 for success
690  */
691 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
692 {
693 	int rc = 0;
694 
695 	/* lport scsi host config */
696 	lport->host->max_lun = FCOE_MAX_LUN;
697 	lport->host->max_id = FCOE_MAX_FCP_TARGET;
698 	lport->host->max_channel = 0;
699 	lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
700 
701 	if (lport->vport)
702 		lport->host->transportt = fcoe_vport_scsi_transport;
703 	else
704 		lport->host->transportt = fcoe_nport_scsi_transport;
705 
706 	/* add the new host to the SCSI-ml */
707 	rc = scsi_add_host(lport->host, dev);
708 	if (rc) {
709 		FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
710 				"error on scsi_add_host\n");
711 		return rc;
712 	}
713 
714 	if (!lport->vport)
715 		fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
716 
717 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
718 		 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
719 		 fcoe_netdev(lport)->name);
720 
721 	return 0;
722 }
723 
724 /**
725  * fcoe_oem_match() - The match routine for the offloaded exchange manager
726  * @fp: The I/O frame
727  *
728  * This routine will be associated with an exchange manager (EM). When
729  * the libfc exchange handling code is looking for an EM to use it will
730  * call this routine and pass it the frame that it wishes to send. This
731  * routine will return True if the associated EM is to be used and False
732  * if the echange code should continue looking for an EM.
733  *
734  * The offload EM that this routine is associated with will handle any
735  * packets that are for SCSI read requests.
736  *
737  * This has been enhanced to work when FCoE stack is operating in target
738  * mode.
739  *
740  * Returns: True for read types I/O, otherwise returns false.
741  */
742 bool fcoe_oem_match(struct fc_frame *fp)
743 {
744 	struct fc_frame_header *fh = fc_frame_header_get(fp);
745 	struct fcp_cmnd *fcp;
746 
747 	if (fc_fcp_is_read(fr_fsp(fp)) &&
748 	    (fr_fsp(fp)->data_len > fcoe_ddp_min))
749 		return true;
750 	else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
751 		fcp = fc_frame_payload_get(fp, sizeof(*fcp));
752 		if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
753 		    fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
754 		    (fcp->fc_flags & FCP_CFL_WRDATA))
755 			return true;
756 	}
757 	return false;
758 }
759 
760 /**
761  * fcoe_em_config() - Allocate and configure an exchange manager
762  * @lport: The local port that the new EM will be associated with
763  *
764  * Returns: 0 on success
765  */
766 static inline int fcoe_em_config(struct fc_lport *lport)
767 {
768 	struct fcoe_port *port = lport_priv(lport);
769 	struct fcoe_interface *fcoe = port->priv;
770 	struct fcoe_interface *oldfcoe = NULL;
771 	struct net_device *old_real_dev, *cur_real_dev;
772 	u16 min_xid = FCOE_MIN_XID;
773 	u16 max_xid = FCOE_MAX_XID;
774 
775 	/*
776 	 * Check if need to allocate an em instance for
777 	 * offload exchange ids to be shared across all VN_PORTs/lport.
778 	 */
779 	if (!lport->lro_enabled || !lport->lro_xid ||
780 	    (lport->lro_xid >= max_xid)) {
781 		lport->lro_xid = 0;
782 		goto skip_oem;
783 	}
784 
785 	/*
786 	 * Reuse existing offload em instance in case
787 	 * it is already allocated on real eth device
788 	 */
789 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
790 		cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
791 	else
792 		cur_real_dev = fcoe->netdev;
793 
794 	list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
795 		if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
796 			old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
797 		else
798 			old_real_dev = oldfcoe->netdev;
799 
800 		if (cur_real_dev == old_real_dev) {
801 			fcoe->oem = oldfcoe->oem;
802 			break;
803 		}
804 	}
805 
806 	if (fcoe->oem) {
807 		if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
808 			printk(KERN_ERR "fcoe_em_config: failed to add "
809 			       "offload em:%p on interface:%s\n",
810 			       fcoe->oem, fcoe->netdev->name);
811 			return -ENOMEM;
812 		}
813 	} else {
814 		fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
815 					      FCOE_MIN_XID, lport->lro_xid,
816 					      fcoe_oem_match);
817 		if (!fcoe->oem) {
818 			printk(KERN_ERR "fcoe_em_config: failed to allocate "
819 			       "em for offload exches on interface:%s\n",
820 			       fcoe->netdev->name);
821 			return -ENOMEM;
822 		}
823 	}
824 
825 	/*
826 	 * Exclude offload EM xid range from next EM xid range.
827 	 */
828 	min_xid += lport->lro_xid + 1;
829 
830 skip_oem:
831 	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
832 		printk(KERN_ERR "fcoe_em_config: failed to "
833 		       "allocate em on interface %s\n", fcoe->netdev->name);
834 		return -ENOMEM;
835 	}
836 
837 	return 0;
838 }
839 
840 /**
841  * fcoe_if_destroy() - Tear down a SW FCoE instance
842  * @lport: The local port to be destroyed
843  *
844  */
845 static void fcoe_if_destroy(struct fc_lport *lport)
846 {
847 	struct fcoe_port *port = lport_priv(lport);
848 	struct fcoe_interface *fcoe = port->priv;
849 	struct net_device *netdev = fcoe->netdev;
850 
851 	FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
852 
853 	/* Logout of the fabric */
854 	fc_fabric_logoff(lport);
855 
856 	/* Cleanup the fc_lport */
857 	fc_lport_destroy(lport);
858 
859 	/* Stop the transmit retry timer */
860 	del_timer_sync(&port->timer);
861 
862 	/* Free existing transmit skbs */
863 	fcoe_clean_pending_queue(lport);
864 
865 	rtnl_lock();
866 	if (!is_zero_ether_addr(port->data_src_addr))
867 		dev_uc_del(netdev, port->data_src_addr);
868 	rtnl_unlock();
869 
870 	/* Release reference held in fcoe_if_create() */
871 	fcoe_interface_put(fcoe);
872 
873 	/* Free queued packets for the per-CPU receive threads */
874 	fcoe_percpu_clean(lport);
875 
876 	/* Detach from the scsi-ml */
877 	fc_remove_host(lport->host);
878 	scsi_remove_host(lport->host);
879 
880 	/* Destroy lport scsi_priv */
881 	fc_fcp_destroy(lport);
882 
883 	/* There are no more rports or I/O, free the EM */
884 	fc_exch_mgr_free(lport);
885 
886 	/* Free memory used by statistical counters */
887 	fc_lport_free_stats(lport);
888 
889 	/* Release the Scsi_Host */
890 	scsi_host_put(lport->host);
891 }
892 
893 /**
894  * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
895  * @lport: The local port to setup DDP for
896  * @xid:   The exchange ID for this DDP transfer
897  * @sgl:   The scatterlist describing this transfer
898  * @sgc:   The number of sg items
899  *
900  * Returns: 0 if the DDP context was not configured
901  */
902 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
903 			  struct scatterlist *sgl, unsigned int sgc)
904 {
905 	struct net_device *netdev = fcoe_netdev(lport);
906 
907 	if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
908 		return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
909 							      xid, sgl,
910 							      sgc);
911 
912 	return 0;
913 }
914 
915 /**
916  * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
917  * @lport: The local port to setup DDP for
918  * @xid:   The exchange ID for this DDP transfer
919  * @sgl:   The scatterlist describing this transfer
920  * @sgc:   The number of sg items
921  *
922  * Returns: 0 if the DDP context was not configured
923  */
924 static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
925 			   struct scatterlist *sgl, unsigned int sgc)
926 {
927 	struct net_device *netdev = fcoe_netdev(lport);
928 
929 	if (netdev->netdev_ops->ndo_fcoe_ddp_target)
930 		return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
931 							       sgl, sgc);
932 
933 	return 0;
934 }
935 
936 
937 /**
938  * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
939  * @lport: The local port to complete DDP on
940  * @xid:   The exchange ID for this DDP transfer
941  *
942  * Returns: the length of data that have been completed by DDP
943  */
944 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
945 {
946 	struct net_device *netdev = fcoe_netdev(lport);
947 
948 	if (netdev->netdev_ops->ndo_fcoe_ddp_done)
949 		return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
950 	return 0;
951 }
952 
953 /**
954  * fcoe_if_create() - Create a FCoE instance on an interface
955  * @fcoe:   The FCoE interface to create a local port on
956  * @parent: The device pointer to be the parent in sysfs for the SCSI host
957  * @npiv:   Indicates if the port is a vport or not
958  *
959  * Creates a fc_lport instance and a Scsi_Host instance and configure them.
960  *
961  * Returns: The allocated fc_lport or an error pointer
962  */
963 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
964 				       struct device *parent, int npiv)
965 {
966 	struct net_device *netdev = fcoe->netdev;
967 	struct fc_lport *lport, *n_port;
968 	struct fcoe_port *port;
969 	struct Scsi_Host *shost;
970 	int rc;
971 	/*
972 	 * parent is only a vport if npiv is 1,
973 	 * but we'll only use vport in that case so go ahead and set it
974 	 */
975 	struct fc_vport *vport = dev_to_vport(parent);
976 
977 	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
978 
979 	if (!npiv)
980 		lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
981 	else
982 		lport = libfc_vport_create(vport, sizeof(*port));
983 
984 	if (!lport) {
985 		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
986 		rc = -ENOMEM;
987 		goto out;
988 	}
989 	port = lport_priv(lport);
990 	port->lport = lport;
991 	port->priv = fcoe;
992 	port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
993 	port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
994 	INIT_WORK(&port->destroy_work, fcoe_destroy_work);
995 
996 	/* configure a fc_lport including the exchange manager */
997 	rc = fcoe_lport_config(lport);
998 	if (rc) {
999 		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
1000 				"interface\n");
1001 		goto out_host_put;
1002 	}
1003 
1004 	if (npiv) {
1005 		FCOE_NETDEV_DBG(netdev, "Setting vport names, "
1006 				"%16.16llx %16.16llx\n",
1007 				vport->node_name, vport->port_name);
1008 		fc_set_wwnn(lport, vport->node_name);
1009 		fc_set_wwpn(lport, vport->port_name);
1010 	}
1011 
1012 	/* configure lport network properties */
1013 	rc = fcoe_netdev_config(lport, netdev);
1014 	if (rc) {
1015 		FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
1016 				"interface\n");
1017 		goto out_lp_destroy;
1018 	}
1019 
1020 	/* configure lport scsi host properties */
1021 	rc = fcoe_shost_config(lport, parent);
1022 	if (rc) {
1023 		FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
1024 				"interface\n");
1025 		goto out_lp_destroy;
1026 	}
1027 
1028 	/* Initialize the library */
1029 	rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
1030 	if (rc) {
1031 		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1032 				"interface\n");
1033 		goto out_lp_destroy;
1034 	}
1035 
1036 	/*
1037 	 * fcoe_em_alloc() and fcoe_hostlist_add() both
1038 	 * need to be atomic with respect to other changes to the
1039 	 * hostlist since fcoe_em_alloc() looks for an existing EM
1040 	 * instance on host list updated by fcoe_hostlist_add().
1041 	 *
1042 	 * This is currently handled through the fcoe_config_mutex
1043 	 * begin held.
1044 	 */
1045 	if (!npiv)
1046 		/* lport exch manager allocation */
1047 		rc = fcoe_em_config(lport);
1048 	else {
1049 		shost = vport_to_shost(vport);
1050 		n_port = shost_priv(shost);
1051 		rc = fc_exch_mgr_list_clone(n_port, lport);
1052 	}
1053 
1054 	if (rc) {
1055 		FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
1056 		goto out_lp_destroy;
1057 	}
1058 
1059 	fcoe_interface_get(fcoe);
1060 	return lport;
1061 
1062 out_lp_destroy:
1063 	fc_exch_mgr_free(lport);
1064 out_host_put:
1065 	scsi_host_put(lport->host);
1066 out:
1067 	return ERR_PTR(rc);
1068 }
1069 
1070 /**
1071  * fcoe_if_init() - Initialization routine for fcoe.ko
1072  *
1073  * Attaches the SW FCoE transport to the FC transport
1074  *
1075  * Returns: 0 on success
1076  */
1077 static int __init fcoe_if_init(void)
1078 {
1079 	/* attach to scsi transport */
1080 	fcoe_nport_scsi_transport =
1081 		fc_attach_transport(&fcoe_nport_fc_functions);
1082 	fcoe_vport_scsi_transport =
1083 		fc_attach_transport(&fcoe_vport_fc_functions);
1084 
1085 	if (!fcoe_nport_scsi_transport) {
1086 		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1087 		return -ENODEV;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 /**
1094  * fcoe_if_exit() - Tear down fcoe.ko
1095  *
1096  * Detaches the SW FCoE transport from the FC transport
1097  *
1098  * Returns: 0 on success
1099  */
1100 int __exit fcoe_if_exit(void)
1101 {
1102 	fc_release_transport(fcoe_nport_scsi_transport);
1103 	fc_release_transport(fcoe_vport_scsi_transport);
1104 	fcoe_nport_scsi_transport = NULL;
1105 	fcoe_vport_scsi_transport = NULL;
1106 	return 0;
1107 }
1108 
1109 /**
1110  * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
1111  * @cpu: The CPU index of the CPU to create a receive thread for
1112  */
1113 static void fcoe_percpu_thread_create(unsigned int cpu)
1114 {
1115 	struct fcoe_percpu_s *p;
1116 	struct task_struct *thread;
1117 
1118 	p = &per_cpu(fcoe_percpu, cpu);
1119 
1120 	thread = kthread_create_on_node(fcoe_percpu_receive_thread,
1121 					(void *)p, cpu_to_node(cpu),
1122 					"fcoethread/%d", cpu);
1123 
1124 	if (likely(!IS_ERR(thread))) {
1125 		kthread_bind(thread, cpu);
1126 		wake_up_process(thread);
1127 
1128 		spin_lock_bh(&p->fcoe_rx_list.lock);
1129 		p->thread = thread;
1130 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1131 	}
1132 }
1133 
1134 /**
1135  * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
1136  * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
1137  *
1138  * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
1139  * current CPU's Rx thread. If the thread being destroyed is bound to
1140  * the CPU processing this context the skbs will be freed.
1141  */
1142 static void fcoe_percpu_thread_destroy(unsigned int cpu)
1143 {
1144 	struct fcoe_percpu_s *p;
1145 	struct task_struct *thread;
1146 	struct page *crc_eof;
1147 	struct sk_buff *skb;
1148 #ifdef CONFIG_SMP
1149 	struct fcoe_percpu_s *p0;
1150 	unsigned targ_cpu = get_cpu();
1151 #endif /* CONFIG_SMP */
1152 
1153 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
1154 
1155 	/* Prevent any new skbs from being queued for this CPU. */
1156 	p = &per_cpu(fcoe_percpu, cpu);
1157 	spin_lock_bh(&p->fcoe_rx_list.lock);
1158 	thread = p->thread;
1159 	p->thread = NULL;
1160 	crc_eof = p->crc_eof_page;
1161 	p->crc_eof_page = NULL;
1162 	p->crc_eof_offset = 0;
1163 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1164 
1165 #ifdef CONFIG_SMP
1166 	/*
1167 	 * Don't bother moving the skb's if this context is running
1168 	 * on the same CPU that is having its thread destroyed. This
1169 	 * can easily happen when the module is removed.
1170 	 */
1171 	if (cpu != targ_cpu) {
1172 		p0 = &per_cpu(fcoe_percpu, targ_cpu);
1173 		spin_lock_bh(&p0->fcoe_rx_list.lock);
1174 		if (p0->thread) {
1175 			FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
1176 				 cpu, targ_cpu);
1177 
1178 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1179 				__skb_queue_tail(&p0->fcoe_rx_list, skb);
1180 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1181 		} else {
1182 			/*
1183 			 * The targeted CPU is not initialized and cannot accept
1184 			 * new	skbs. Unlock the targeted CPU and drop the skbs
1185 			 * on the CPU that is going offline.
1186 			 */
1187 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1188 				kfree_skb(skb);
1189 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1190 		}
1191 	} else {
1192 		/*
1193 		 * This scenario occurs when the module is being removed
1194 		 * and all threads are being destroyed. skbs will continue
1195 		 * to be shifted from the CPU thread that is being removed
1196 		 * to the CPU thread associated with the CPU that is processing
1197 		 * the module removal. Once there is only one CPU Rx thread it
1198 		 * will reach this case and we will drop all skbs and later
1199 		 * stop the thread.
1200 		 */
1201 		spin_lock_bh(&p->fcoe_rx_list.lock);
1202 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1203 			kfree_skb(skb);
1204 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1205 	}
1206 	put_cpu();
1207 #else
1208 	/*
1209 	 * This a non-SMP scenario where the singular Rx thread is
1210 	 * being removed. Free all skbs and stop the thread.
1211 	 */
1212 	spin_lock_bh(&p->fcoe_rx_list.lock);
1213 	while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1214 		kfree_skb(skb);
1215 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1216 #endif
1217 
1218 	if (thread)
1219 		kthread_stop(thread);
1220 
1221 	if (crc_eof)
1222 		put_page(crc_eof);
1223 }
1224 
1225 /**
1226  * fcoe_cpu_callback() - Handler for CPU hotplug events
1227  * @nfb:    The callback data block
1228  * @action: The event triggering the callback
1229  * @hcpu:   The index of the CPU that the event is for
1230  *
1231  * This creates or destroys per-CPU data for fcoe
1232  *
1233  * Returns NOTIFY_OK always.
1234  */
1235 static int fcoe_cpu_callback(struct notifier_block *nfb,
1236 			     unsigned long action, void *hcpu)
1237 {
1238 	unsigned cpu = (unsigned long)hcpu;
1239 
1240 	switch (action) {
1241 	case CPU_ONLINE:
1242 	case CPU_ONLINE_FROZEN:
1243 		FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
1244 		fcoe_percpu_thread_create(cpu);
1245 		break;
1246 	case CPU_DEAD:
1247 	case CPU_DEAD_FROZEN:
1248 		FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
1249 		fcoe_percpu_thread_destroy(cpu);
1250 		break;
1251 	default:
1252 		break;
1253 	}
1254 	return NOTIFY_OK;
1255 }
1256 
1257 /**
1258  * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1259  *			command.
1260  *
1261  * This routine selects next CPU based on cpumask to distribute
1262  * incoming requests in round robin.
1263  *
1264  * Returns: int CPU number
1265  */
1266 static inline unsigned int fcoe_select_cpu(void)
1267 {
1268 	static unsigned int selected_cpu;
1269 
1270 	selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1271 	if (selected_cpu >= nr_cpu_ids)
1272 		selected_cpu = cpumask_first(cpu_online_mask);
1273 
1274 	return selected_cpu;
1275 }
1276 
1277 /**
1278  * fcoe_rcv() - Receive packets from a net device
1279  * @skb:    The received packet
1280  * @netdev: The net device that the packet was received on
1281  * @ptype:  The packet type context
1282  * @olddev: The last device net device
1283  *
1284  * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
1285  * FC frame and passes the frame to libfc.
1286  *
1287  * Returns: 0 for success
1288  */
1289 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1290 	     struct packet_type *ptype, struct net_device *olddev)
1291 {
1292 	struct fc_lport *lport;
1293 	struct fcoe_rcv_info *fr;
1294 	struct fcoe_interface *fcoe;
1295 	struct fc_frame_header *fh;
1296 	struct fcoe_percpu_s *fps;
1297 	struct ethhdr *eh;
1298 	unsigned int cpu;
1299 
1300 	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1301 	lport = fcoe->ctlr.lp;
1302 	if (unlikely(!lport)) {
1303 		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1304 		goto err2;
1305 	}
1306 	if (!lport->link_up)
1307 		goto err2;
1308 
1309 	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1310 			"data:%p tail:%p end:%p sum:%d dev:%s",
1311 			skb->len, skb->data_len, skb->head, skb->data,
1312 			skb_tail_pointer(skb), skb_end_pointer(skb),
1313 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1314 
1315 	eh = eth_hdr(skb);
1316 
1317 	if (is_fip_mode(&fcoe->ctlr) &&
1318 	    compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
1319 		FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1320 				eh->h_source);
1321 		goto err;
1322 	}
1323 
1324 	/*
1325 	 * Check for minimum frame length, and make sure required FCoE
1326 	 * and FC headers are pulled into the linear data area.
1327 	 */
1328 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1329 		     !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1330 		goto err;
1331 
1332 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1333 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1334 
1335 	if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
1336 		FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
1337 				eh->h_dest);
1338 		goto err;
1339 	}
1340 
1341 	fr = fcoe_dev_from_skb(skb);
1342 	fr->fr_dev = lport;
1343 
1344 	/*
1345 	 * In case the incoming frame's exchange is originated from
1346 	 * the initiator, then received frame's exchange id is ANDed
1347 	 * with fc_cpu_mask bits to get the same cpu on which exchange
1348 	 * was originated, otherwise select cpu using rx exchange id
1349 	 * or fcoe_select_cpu().
1350 	 */
1351 	if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1352 		cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1353 	else {
1354 		if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
1355 			cpu = fcoe_select_cpu();
1356 		else
1357 			cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1358 	}
1359 
1360 	if (cpu >= nr_cpu_ids)
1361 		goto err;
1362 
1363 	fps = &per_cpu(fcoe_percpu, cpu);
1364 	spin_lock_bh(&fps->fcoe_rx_list.lock);
1365 	if (unlikely(!fps->thread)) {
1366 		/*
1367 		 * The targeted CPU is not ready, let's target
1368 		 * the first CPU now. For non-SMP systems this
1369 		 * will check the same CPU twice.
1370 		 */
1371 		FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1372 				"ready for incoming skb- using first online "
1373 				"CPU.\n");
1374 
1375 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1376 		cpu = cpumask_first(cpu_online_mask);
1377 		fps = &per_cpu(fcoe_percpu, cpu);
1378 		spin_lock_bh(&fps->fcoe_rx_list.lock);
1379 		if (!fps->thread) {
1380 			spin_unlock_bh(&fps->fcoe_rx_list.lock);
1381 			goto err;
1382 		}
1383 	}
1384 
1385 	/*
1386 	 * We now have a valid CPU that we're targeting for
1387 	 * this skb. We also have this receive thread locked,
1388 	 * so we're free to queue skbs into it's queue.
1389 	 */
1390 
1391 	/* If this is a SCSI-FCP frame, and this is already executing on the
1392 	 * correct CPU, and the queue for this CPU is empty, then go ahead
1393 	 * and process the frame directly in the softirq context.
1394 	 * This lets us process completions without context switching from the
1395 	 * NET_RX softirq, to our receive processing thread, and then back to
1396 	 * BLOCK softirq context.
1397 	 */
1398 	if (fh->fh_type == FC_TYPE_FCP &&
1399 	    cpu == smp_processor_id() &&
1400 	    skb_queue_empty(&fps->fcoe_rx_list)) {
1401 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1402 		fcoe_recv_frame(skb);
1403 	} else {
1404 		__skb_queue_tail(&fps->fcoe_rx_list, skb);
1405 		if (fps->fcoe_rx_list.qlen == 1)
1406 			wake_up_process(fps->thread);
1407 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1408 	}
1409 
1410 	return 0;
1411 err:
1412 	per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
1413 	put_cpu();
1414 err2:
1415 	kfree_skb(skb);
1416 	return -1;
1417 }
1418 
1419 /**
1420  * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
1421  * @skb:  The packet to be transmitted
1422  * @tlen: The total length of the trailer
1423  *
1424  * Returns: 0 for success
1425  */
1426 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1427 {
1428 	struct fcoe_percpu_s *fps;
1429 	int rc;
1430 
1431 	fps = &get_cpu_var(fcoe_percpu);
1432 	rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
1433 	put_cpu_var(fcoe_percpu);
1434 
1435 	return rc;
1436 }
1437 
1438 /**
1439  * fcoe_xmit() - Transmit a FCoE frame
1440  * @lport: The local port that the frame is to be transmitted for
1441  * @fp:	   The frame to be transmitted
1442  *
1443  * Return: 0 for success
1444  */
1445 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1446 {
1447 	int wlen;
1448 	u32 crc;
1449 	struct ethhdr *eh;
1450 	struct fcoe_crc_eof *cp;
1451 	struct sk_buff *skb;
1452 	struct fcoe_dev_stats *stats;
1453 	struct fc_frame_header *fh;
1454 	unsigned int hlen;		/* header length implies the version */
1455 	unsigned int tlen;		/* trailer length */
1456 	unsigned int elen;		/* eth header, may include vlan */
1457 	struct fcoe_port *port = lport_priv(lport);
1458 	struct fcoe_interface *fcoe = port->priv;
1459 	u8 sof, eof;
1460 	struct fcoe_hdr *hp;
1461 
1462 	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1463 
1464 	fh = fc_frame_header_get(fp);
1465 	skb = fp_skb(fp);
1466 	wlen = skb->len / FCOE_WORD_TO_BYTE;
1467 
1468 	if (!lport->link_up) {
1469 		kfree_skb(skb);
1470 		return 0;
1471 	}
1472 
1473 	if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1474 	    fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1475 		return 0;
1476 
1477 	sof = fr_sof(fp);
1478 	eof = fr_eof(fp);
1479 
1480 	elen = sizeof(struct ethhdr);
1481 	hlen = sizeof(struct fcoe_hdr);
1482 	tlen = sizeof(struct fcoe_crc_eof);
1483 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1484 
1485 	/* crc offload */
1486 	if (likely(lport->crc_offload)) {
1487 		skb->ip_summed = CHECKSUM_PARTIAL;
1488 		skb->csum_start = skb_headroom(skb);
1489 		skb->csum_offset = skb->len;
1490 		crc = 0;
1491 	} else {
1492 		skb->ip_summed = CHECKSUM_NONE;
1493 		crc = fcoe_fc_crc(fp);
1494 	}
1495 
1496 	/* copy port crc and eof to the skb buff */
1497 	if (skb_is_nonlinear(skb)) {
1498 		skb_frag_t *frag;
1499 		if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
1500 			kfree_skb(skb);
1501 			return -ENOMEM;
1502 		}
1503 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1504 		cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
1505 			+ frag->page_offset;
1506 	} else {
1507 		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1508 	}
1509 
1510 	memset(cp, 0, sizeof(*cp));
1511 	cp->fcoe_eof = eof;
1512 	cp->fcoe_crc32 = cpu_to_le32(~crc);
1513 
1514 	if (skb_is_nonlinear(skb)) {
1515 		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1516 		cp = NULL;
1517 	}
1518 
1519 	/* adjust skb network/transport offsets to match mac/fcoe/port */
1520 	skb_push(skb, elen + hlen);
1521 	skb_reset_mac_header(skb);
1522 	skb_reset_network_header(skb);
1523 	skb->mac_len = elen;
1524 	skb->protocol = htons(ETH_P_FCOE);
1525 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1526 	    fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
1527 		skb->vlan_tci = VLAN_TAG_PRESENT |
1528 				vlan_dev_vlan_id(fcoe->netdev);
1529 		skb->dev = fcoe->realdev;
1530 	} else
1531 		skb->dev = fcoe->netdev;
1532 
1533 	/* fill up mac and fcoe headers */
1534 	eh = eth_hdr(skb);
1535 	eh->h_proto = htons(ETH_P_FCOE);
1536 	memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1537 	if (fcoe->ctlr.map_dest)
1538 		memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1539 
1540 	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1541 		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1542 	else
1543 		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1544 
1545 	hp = (struct fcoe_hdr *)(eh + 1);
1546 	memset(hp, 0, sizeof(*hp));
1547 	if (FC_FCOE_VER)
1548 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1549 	hp->fcoe_sof = sof;
1550 
1551 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1552 	if (lport->seq_offload && fr_max_payload(fp)) {
1553 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1554 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1555 	} else {
1556 		skb_shinfo(skb)->gso_type = 0;
1557 		skb_shinfo(skb)->gso_size = 0;
1558 	}
1559 	/* update tx stats: regardless if LLD fails */
1560 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1561 	stats->TxFrames++;
1562 	stats->TxWords += wlen;
1563 	put_cpu();
1564 
1565 	/* send down to lld */
1566 	fr_dev(fp) = lport;
1567 	fcoe_port_send(port, skb);
1568 	return 0;
1569 }
1570 
1571 /**
1572  * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
1573  * @skb: The completed skb (argument required by destructor)
1574  */
1575 static void fcoe_percpu_flush_done(struct sk_buff *skb)
1576 {
1577 	complete(&fcoe_flush_completion);
1578 }
1579 
1580 /**
1581  * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
1582  * @lport: The local port the frame was received on
1583  * @fp:	   The received frame
1584  *
1585  * Return: 0 on passing filtering checks
1586  */
1587 static inline int fcoe_filter_frames(struct fc_lport *lport,
1588 				     struct fc_frame *fp)
1589 {
1590 	struct fcoe_interface *fcoe;
1591 	struct fc_frame_header *fh;
1592 	struct sk_buff *skb = (struct sk_buff *)fp;
1593 	struct fcoe_dev_stats *stats;
1594 
1595 	/*
1596 	 * We only check CRC if no offload is available and if it is
1597 	 * it's solicited data, in which case, the FCP layer would
1598 	 * check it during the copy.
1599 	 */
1600 	if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1601 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1602 	else
1603 		fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1604 
1605 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1606 	fh = fc_frame_header_get(fp);
1607 	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
1608 		return 0;
1609 
1610 	fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1611 	if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1612 	    ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1613 		FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1614 		return -EINVAL;
1615 	}
1616 
1617 	if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
1618 	    le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
1619 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1620 		return 0;
1621 	}
1622 
1623 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1624 	stats->InvalidCRCCount++;
1625 	if (stats->InvalidCRCCount < 5)
1626 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
1627 	return -EINVAL;
1628 }
1629 
1630 /**
1631  * fcoe_recv_frame() - process a single received frame
1632  * @skb: frame to process
1633  */
1634 static void fcoe_recv_frame(struct sk_buff *skb)
1635 {
1636 	u32 fr_len;
1637 	struct fc_lport *lport;
1638 	struct fcoe_rcv_info *fr;
1639 	struct fcoe_dev_stats *stats;
1640 	struct fcoe_crc_eof crc_eof;
1641 	struct fc_frame *fp;
1642 	struct fcoe_port *port;
1643 	struct fcoe_hdr *hp;
1644 
1645 	fr = fcoe_dev_from_skb(skb);
1646 	lport = fr->fr_dev;
1647 	if (unlikely(!lport)) {
1648 		if (skb->destructor != fcoe_percpu_flush_done)
1649 			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1650 		kfree_skb(skb);
1651 		return;
1652 	}
1653 
1654 	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1655 			"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1656 			skb->len, skb->data_len,
1657 			skb->head, skb->data, skb_tail_pointer(skb),
1658 			skb_end_pointer(skb), skb->csum,
1659 			skb->dev ? skb->dev->name : "<NULL>");
1660 
1661 	port = lport_priv(lport);
1662 	if (skb_is_nonlinear(skb))
1663 		skb_linearize(skb);	/* not ideal */
1664 
1665 	/*
1666 	 * Frame length checks and setting up the header pointers
1667 	 * was done in fcoe_rcv already.
1668 	 */
1669 	hp = (struct fcoe_hdr *) skb_network_header(skb);
1670 
1671 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1672 	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1673 		if (stats->ErrorFrames < 5)
1674 			printk(KERN_WARNING "fcoe: FCoE version "
1675 			       "mismatch: The frame has "
1676 			       "version %x, but the "
1677 			       "initiator supports version "
1678 			       "%x\n", FC_FCOE_DECAPS_VER(hp),
1679 			       FC_FCOE_VER);
1680 		goto drop;
1681 	}
1682 
1683 	skb_pull(skb, sizeof(struct fcoe_hdr));
1684 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1685 
1686 	stats->RxFrames++;
1687 	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1688 
1689 	fp = (struct fc_frame *)skb;
1690 	fc_frame_init(fp);
1691 	fr_dev(fp) = lport;
1692 	fr_sof(fp) = hp->fcoe_sof;
1693 
1694 	/* Copy out the CRC and EOF trailer for access */
1695 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
1696 		goto drop;
1697 	fr_eof(fp) = crc_eof.fcoe_eof;
1698 	fr_crc(fp) = crc_eof.fcoe_crc32;
1699 	if (pskb_trim(skb, fr_len))
1700 		goto drop;
1701 
1702 	if (!fcoe_filter_frames(lport, fp)) {
1703 		put_cpu();
1704 		fc_exch_recv(lport, fp);
1705 		return;
1706 	}
1707 drop:
1708 	stats->ErrorFrames++;
1709 	put_cpu();
1710 	kfree_skb(skb);
1711 }
1712 
1713 /**
1714  * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
1715  * @arg: The per-CPU context
1716  *
1717  * Return: 0 for success
1718  */
1719 int fcoe_percpu_receive_thread(void *arg)
1720 {
1721 	struct fcoe_percpu_s *p = arg;
1722 	struct sk_buff *skb;
1723 
1724 	set_user_nice(current, -20);
1725 
1726 	while (!kthread_should_stop()) {
1727 
1728 		spin_lock_bh(&p->fcoe_rx_list.lock);
1729 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1730 			set_current_state(TASK_INTERRUPTIBLE);
1731 			spin_unlock_bh(&p->fcoe_rx_list.lock);
1732 			schedule();
1733 			set_current_state(TASK_RUNNING);
1734 			if (kthread_should_stop())
1735 				return 0;
1736 			spin_lock_bh(&p->fcoe_rx_list.lock);
1737 		}
1738 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1739 		fcoe_recv_frame(skb);
1740 	}
1741 	return 0;
1742 }
1743 
1744 /**
1745  * fcoe_dev_setup() - Setup the link change notification interface
1746  */
1747 static void fcoe_dev_setup(void)
1748 {
1749 	register_netdevice_notifier(&fcoe_notifier);
1750 }
1751 
1752 /**
1753  * fcoe_dev_cleanup() - Cleanup the link change notification interface
1754  */
1755 static void fcoe_dev_cleanup(void)
1756 {
1757 	unregister_netdevice_notifier(&fcoe_notifier);
1758 }
1759 
1760 /**
1761  * fcoe_device_notification() - Handler for net device events
1762  * @notifier: The context of the notification
1763  * @event:    The type of event
1764  * @ptr:      The net device that the event was on
1765  *
1766  * This function is called by the Ethernet driver in case of link change event.
1767  *
1768  * Returns: 0 for success
1769  */
1770 static int fcoe_device_notification(struct notifier_block *notifier,
1771 				    ulong event, void *ptr)
1772 {
1773 	struct fc_lport *lport = NULL;
1774 	struct net_device *netdev = ptr;
1775 	struct fcoe_interface *fcoe;
1776 	struct fcoe_port *port;
1777 	struct fcoe_dev_stats *stats;
1778 	u32 link_possible = 1;
1779 	u32 mfs;
1780 	int rc = NOTIFY_OK;
1781 
1782 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1783 		if (fcoe->netdev == netdev) {
1784 			lport = fcoe->ctlr.lp;
1785 			break;
1786 		}
1787 	}
1788 	if (!lport) {
1789 		rc = NOTIFY_DONE;
1790 		goto out;
1791 	}
1792 
1793 	switch (event) {
1794 	case NETDEV_DOWN:
1795 	case NETDEV_GOING_DOWN:
1796 		link_possible = 0;
1797 		break;
1798 	case NETDEV_UP:
1799 	case NETDEV_CHANGE:
1800 		break;
1801 	case NETDEV_CHANGEMTU:
1802 		if (netdev->features & NETIF_F_FCOE_MTU)
1803 			break;
1804 		mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1805 				     sizeof(struct fcoe_crc_eof));
1806 		if (mfs >= FC_MIN_MAX_FRAME)
1807 			fc_set_mfs(lport, mfs);
1808 		break;
1809 	case NETDEV_REGISTER:
1810 		break;
1811 	case NETDEV_UNREGISTER:
1812 		list_del(&fcoe->list);
1813 		port = lport_priv(fcoe->ctlr.lp);
1814 		queue_work(fcoe_wq, &port->destroy_work);
1815 		goto out;
1816 		break;
1817 	case NETDEV_FEAT_CHANGE:
1818 		fcoe_netdev_features_change(lport, netdev);
1819 		break;
1820 	default:
1821 		FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1822 				"from netdev netlink\n", event);
1823 	}
1824 
1825 	fcoe_link_speed_update(lport);
1826 
1827 	if (link_possible && !fcoe_link_ok(lport))
1828 		fcoe_ctlr_link_up(&fcoe->ctlr);
1829 	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1830 		stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1831 		stats->LinkFailureCount++;
1832 		put_cpu();
1833 		fcoe_clean_pending_queue(lport);
1834 	}
1835 out:
1836 	return rc;
1837 }
1838 
1839 /**
1840  * fcoe_disable() - Disables a FCoE interface
1841  * @netdev  : The net_device object the Ethernet interface to create on
1842  *
1843  * Called from fcoe transport.
1844  *
1845  * Returns: 0 for success
1846  */
1847 static int fcoe_disable(struct net_device *netdev)
1848 {
1849 	struct fcoe_interface *fcoe;
1850 	int rc = 0;
1851 
1852 	mutex_lock(&fcoe_config_mutex);
1853 
1854 	rtnl_lock();
1855 	fcoe = fcoe_hostlist_lookup_port(netdev);
1856 	rtnl_unlock();
1857 
1858 	if (fcoe) {
1859 		fcoe_ctlr_link_down(&fcoe->ctlr);
1860 		fcoe_clean_pending_queue(fcoe->ctlr.lp);
1861 	} else
1862 		rc = -ENODEV;
1863 
1864 	mutex_unlock(&fcoe_config_mutex);
1865 	return rc;
1866 }
1867 
1868 /**
1869  * fcoe_enable() - Enables a FCoE interface
1870  * @netdev  : The net_device object the Ethernet interface to create on
1871  *
1872  * Called from fcoe transport.
1873  *
1874  * Returns: 0 for success
1875  */
1876 static int fcoe_enable(struct net_device *netdev)
1877 {
1878 	struct fcoe_interface *fcoe;
1879 	int rc = 0;
1880 
1881 	mutex_lock(&fcoe_config_mutex);
1882 	rtnl_lock();
1883 	fcoe = fcoe_hostlist_lookup_port(netdev);
1884 	rtnl_unlock();
1885 
1886 	if (!fcoe)
1887 		rc = -ENODEV;
1888 	else if (!fcoe_link_ok(fcoe->ctlr.lp))
1889 		fcoe_ctlr_link_up(&fcoe->ctlr);
1890 
1891 	mutex_unlock(&fcoe_config_mutex);
1892 	return rc;
1893 }
1894 
1895 /**
1896  * fcoe_destroy() - Destroy a FCoE interface
1897  * @netdev  : The net_device object the Ethernet interface to create on
1898  *
1899  * Called from fcoe transport
1900  *
1901  * Returns: 0 for success
1902  */
1903 static int fcoe_destroy(struct net_device *netdev)
1904 {
1905 	struct fcoe_interface *fcoe;
1906 	struct fc_lport *lport;
1907 	struct fcoe_port *port;
1908 	int rc = 0;
1909 
1910 	mutex_lock(&fcoe_config_mutex);
1911 	rtnl_lock();
1912 	fcoe = fcoe_hostlist_lookup_port(netdev);
1913 	if (!fcoe) {
1914 		rc = -ENODEV;
1915 		goto out_nodev;
1916 	}
1917 	lport = fcoe->ctlr.lp;
1918 	port = lport_priv(lport);
1919 	list_del(&fcoe->list);
1920 	queue_work(fcoe_wq, &port->destroy_work);
1921 out_nodev:
1922 	rtnl_unlock();
1923 	mutex_unlock(&fcoe_config_mutex);
1924 	return rc;
1925 }
1926 
1927 /**
1928  * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
1929  * @work: Handle to the FCoE port to be destroyed
1930  */
1931 static void fcoe_destroy_work(struct work_struct *work)
1932 {
1933 	struct fcoe_port *port;
1934 	struct fcoe_interface *fcoe;
1935 	int npiv = 0;
1936 
1937 	port = container_of(work, struct fcoe_port, destroy_work);
1938 	mutex_lock(&fcoe_config_mutex);
1939 
1940 	/* set if this is an NPIV port */
1941 	npiv = port->lport->vport ? 1 : 0;
1942 
1943 	fcoe = port->priv;
1944 	fcoe_if_destroy(port->lport);
1945 
1946 	/* Do not tear down the fcoe interface for NPIV port */
1947 	if (!npiv)
1948 		fcoe_interface_cleanup(fcoe);
1949 
1950 	mutex_unlock(&fcoe_config_mutex);
1951 }
1952 
1953 /**
1954  * fcoe_match() - Check if the FCoE is supported on the given netdevice
1955  * @netdev  : The net_device object the Ethernet interface to create on
1956  *
1957  * Called from fcoe transport.
1958  *
1959  * Returns: always returns true as this is the default FCoE transport,
1960  * i.e., support all netdevs.
1961  */
1962 static bool fcoe_match(struct net_device *netdev)
1963 {
1964 	return true;
1965 }
1966 
1967 /**
1968  * fcoe_create() - Create a fcoe interface
1969  * @netdev  : The net_device object the Ethernet interface to create on
1970  * @fip_mode: The FIP mode for this creation
1971  *
1972  * Called from fcoe transport
1973  *
1974  * Returns: 0 for success
1975  */
1976 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1977 {
1978 	int rc = 0;
1979 	struct fcoe_interface *fcoe;
1980 	struct fc_lport *lport;
1981 
1982 	mutex_lock(&fcoe_config_mutex);
1983 	rtnl_lock();
1984 
1985 	/* look for existing lport */
1986 	if (fcoe_hostlist_lookup(netdev)) {
1987 		rc = -EEXIST;
1988 		goto out_nodev;
1989 	}
1990 
1991 	fcoe = fcoe_interface_create(netdev, fip_mode);
1992 	if (IS_ERR(fcoe)) {
1993 		rc = PTR_ERR(fcoe);
1994 		goto out_nodev;
1995 	}
1996 
1997 	lport = fcoe_if_create(fcoe, &netdev->dev, 0);
1998 	if (IS_ERR(lport)) {
1999 		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2000 		       netdev->name);
2001 		rc = -EIO;
2002 		rtnl_unlock();
2003 		fcoe_interface_cleanup(fcoe);
2004 		goto out_nortnl;
2005 	}
2006 
2007 	/* Make this the "master" N_Port */
2008 	fcoe->ctlr.lp = lport;
2009 
2010 	/* add to lports list */
2011 	fcoe_hostlist_add(lport);
2012 
2013 	/* start FIP Discovery and FLOGI */
2014 	lport->boot_time = jiffies;
2015 	fc_fabric_login(lport);
2016 	if (!fcoe_link_ok(lport))
2017 		fcoe_ctlr_link_up(&fcoe->ctlr);
2018 
2019 out_nodev:
2020 	rtnl_unlock();
2021 out_nortnl:
2022 	mutex_unlock(&fcoe_config_mutex);
2023 	return rc;
2024 }
2025 
2026 /**
2027  * fcoe_link_speed_update() - Update the supported and actual link speeds
2028  * @lport: The local port to update speeds for
2029  *
2030  * Returns: 0 if the ethtool query was successful
2031  *          -1 if the ethtool query failed
2032  */
2033 int fcoe_link_speed_update(struct fc_lport *lport)
2034 {
2035 	struct net_device *netdev = fcoe_netdev(lport);
2036 	struct ethtool_cmd ecmd;
2037 
2038 	if (!__ethtool_get_settings(netdev, &ecmd)) {
2039 		lport->link_supported_speeds &=
2040 			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2041 		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2042 				      SUPPORTED_1000baseT_Full))
2043 			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
2044 		if (ecmd.supported & SUPPORTED_10000baseT_Full)
2045 			lport->link_supported_speeds |=
2046 				FC_PORTSPEED_10GBIT;
2047 		switch (ethtool_cmd_speed(&ecmd)) {
2048 		case SPEED_1000:
2049 			lport->link_speed = FC_PORTSPEED_1GBIT;
2050 			break;
2051 		case SPEED_10000:
2052 			lport->link_speed = FC_PORTSPEED_10GBIT;
2053 			break;
2054 		}
2055 		return 0;
2056 	}
2057 	return -1;
2058 }
2059 
2060 /**
2061  * fcoe_link_ok() - Check if the link is OK for a local port
2062  * @lport: The local port to check link on
2063  *
2064  * Returns: 0 if link is UP and OK, -1 if not
2065  *
2066  */
2067 int fcoe_link_ok(struct fc_lport *lport)
2068 {
2069 	struct net_device *netdev = fcoe_netdev(lport);
2070 
2071 	if (netif_oper_up(netdev))
2072 		return 0;
2073 	return -1;
2074 }
2075 
2076 /**
2077  * fcoe_percpu_clean() - Clear all pending skbs for an local port
2078  * @lport: The local port whose skbs are to be cleared
2079  *
2080  * Must be called with fcoe_create_mutex held to single-thread completion.
2081  *
2082  * This flushes the pending skbs by adding a new skb to each queue and
2083  * waiting until they are all freed.  This assures us that not only are
2084  * there no packets that will be handled by the lport, but also that any
2085  * threads already handling packet have returned.
2086  */
2087 void fcoe_percpu_clean(struct fc_lport *lport)
2088 {
2089 	struct fcoe_percpu_s *pp;
2090 	struct fcoe_rcv_info *fr;
2091 	struct sk_buff_head *list;
2092 	struct sk_buff *skb, *next;
2093 	struct sk_buff *head;
2094 	unsigned int cpu;
2095 
2096 	for_each_possible_cpu(cpu) {
2097 		pp = &per_cpu(fcoe_percpu, cpu);
2098 		spin_lock_bh(&pp->fcoe_rx_list.lock);
2099 		list = &pp->fcoe_rx_list;
2100 		head = list->next;
2101 		for (skb = head; skb != (struct sk_buff *)list;
2102 		     skb = next) {
2103 			next = skb->next;
2104 			fr = fcoe_dev_from_skb(skb);
2105 			if (fr->fr_dev == lport) {
2106 				__skb_unlink(skb, list);
2107 				kfree_skb(skb);
2108 			}
2109 		}
2110 
2111 		if (!pp->thread || !cpu_online(cpu)) {
2112 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2113 			continue;
2114 		}
2115 
2116 		skb = dev_alloc_skb(0);
2117 		if (!skb) {
2118 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2119 			continue;
2120 		}
2121 		skb->destructor = fcoe_percpu_flush_done;
2122 
2123 		__skb_queue_tail(&pp->fcoe_rx_list, skb);
2124 		if (pp->fcoe_rx_list.qlen == 1)
2125 			wake_up_process(pp->thread);
2126 		spin_unlock_bh(&pp->fcoe_rx_list.lock);
2127 
2128 		wait_for_completion(&fcoe_flush_completion);
2129 	}
2130 }
2131 
2132 /**
2133  * fcoe_reset() - Reset a local port
2134  * @shost: The SCSI host associated with the local port to be reset
2135  *
2136  * Returns: Always 0 (return value required by FC transport template)
2137  */
2138 int fcoe_reset(struct Scsi_Host *shost)
2139 {
2140 	struct fc_lport *lport = shost_priv(shost);
2141 	struct fcoe_port *port = lport_priv(lport);
2142 	struct fcoe_interface *fcoe = port->priv;
2143 
2144 	fcoe_ctlr_link_down(&fcoe->ctlr);
2145 	fcoe_clean_pending_queue(fcoe->ctlr.lp);
2146 	if (!fcoe_link_ok(fcoe->ctlr.lp))
2147 		fcoe_ctlr_link_up(&fcoe->ctlr);
2148 	return 0;
2149 }
2150 
2151 /**
2152  * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
2153  * @netdev: The net device used as a key
2154  *
2155  * Locking: Must be called with the RNL mutex held.
2156  *
2157  * Returns: NULL or the FCoE interface
2158  */
2159 static struct fcoe_interface *
2160 fcoe_hostlist_lookup_port(const struct net_device *netdev)
2161 {
2162 	struct fcoe_interface *fcoe;
2163 
2164 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
2165 		if (fcoe->netdev == netdev)
2166 			return fcoe;
2167 	}
2168 	return NULL;
2169 }
2170 
2171 /**
2172  * fcoe_hostlist_lookup() - Find the local port associated with a
2173  *			    given net device
2174  * @netdev: The netdevice used as a key
2175  *
2176  * Locking: Must be called with the RTNL mutex held
2177  *
2178  * Returns: NULL or the local port
2179  */
2180 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2181 {
2182 	struct fcoe_interface *fcoe;
2183 
2184 	fcoe = fcoe_hostlist_lookup_port(netdev);
2185 	return (fcoe) ? fcoe->ctlr.lp : NULL;
2186 }
2187 
2188 /**
2189  * fcoe_hostlist_add() - Add the FCoE interface identified by a local
2190  *			 port to the hostlist
2191  * @lport: The local port that identifies the FCoE interface to be added
2192  *
2193  * Locking: must be called with the RTNL mutex held
2194  *
2195  * Returns: 0 for success
2196  */
2197 static int fcoe_hostlist_add(const struct fc_lport *lport)
2198 {
2199 	struct fcoe_interface *fcoe;
2200 	struct fcoe_port *port;
2201 
2202 	fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
2203 	if (!fcoe) {
2204 		port = lport_priv(lport);
2205 		fcoe = port->priv;
2206 		list_add_tail(&fcoe->list, &fcoe_hostlist);
2207 	}
2208 	return 0;
2209 }
2210 
2211 
2212 static struct fcoe_transport fcoe_sw_transport = {
2213 	.name = {FCOE_TRANSPORT_DEFAULT},
2214 	.attached = false,
2215 	.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
2216 	.match = fcoe_match,
2217 	.create = fcoe_create,
2218 	.destroy = fcoe_destroy,
2219 	.enable = fcoe_enable,
2220 	.disable = fcoe_disable,
2221 };
2222 
2223 /**
2224  * fcoe_init() - Initialize fcoe.ko
2225  *
2226  * Returns: 0 on success, or a negative value on failure
2227  */
2228 static int __init fcoe_init(void)
2229 {
2230 	struct fcoe_percpu_s *p;
2231 	unsigned int cpu;
2232 	int rc = 0;
2233 
2234 	fcoe_wq = alloc_workqueue("fcoe", 0, 0);
2235 	if (!fcoe_wq)
2236 		return -ENOMEM;
2237 
2238 	/* register as a fcoe transport */
2239 	rc = fcoe_transport_attach(&fcoe_sw_transport);
2240 	if (rc) {
2241 		printk(KERN_ERR "failed to register an fcoe transport, check "
2242 			"if libfcoe is loaded\n");
2243 		return rc;
2244 	}
2245 
2246 	mutex_lock(&fcoe_config_mutex);
2247 
2248 	for_each_possible_cpu(cpu) {
2249 		p = &per_cpu(fcoe_percpu, cpu);
2250 		skb_queue_head_init(&p->fcoe_rx_list);
2251 	}
2252 
2253 	for_each_online_cpu(cpu)
2254 		fcoe_percpu_thread_create(cpu);
2255 
2256 	/* Initialize per CPU interrupt thread */
2257 	rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
2258 	if (rc)
2259 		goto out_free;
2260 
2261 	/* Setup link change notification */
2262 	fcoe_dev_setup();
2263 
2264 	rc = fcoe_if_init();
2265 	if (rc)
2266 		goto out_free;
2267 
2268 	mutex_unlock(&fcoe_config_mutex);
2269 	return 0;
2270 
2271 out_free:
2272 	for_each_online_cpu(cpu) {
2273 		fcoe_percpu_thread_destroy(cpu);
2274 	}
2275 	mutex_unlock(&fcoe_config_mutex);
2276 	destroy_workqueue(fcoe_wq);
2277 	return rc;
2278 }
2279 module_init(fcoe_init);
2280 
2281 /**
2282  * fcoe_exit() - Clean up fcoe.ko
2283  *
2284  * Returns: 0 on success or a  negative value on failure
2285  */
2286 static void __exit fcoe_exit(void)
2287 {
2288 	struct fcoe_interface *fcoe, *tmp;
2289 	struct fcoe_port *port;
2290 	unsigned int cpu;
2291 
2292 	mutex_lock(&fcoe_config_mutex);
2293 
2294 	fcoe_dev_cleanup();
2295 
2296 	/* releases the associated fcoe hosts */
2297 	rtnl_lock();
2298 	list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2299 		list_del(&fcoe->list);
2300 		port = lport_priv(fcoe->ctlr.lp);
2301 		queue_work(fcoe_wq, &port->destroy_work);
2302 	}
2303 	rtnl_unlock();
2304 
2305 	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2306 
2307 	for_each_online_cpu(cpu)
2308 		fcoe_percpu_thread_destroy(cpu);
2309 
2310 	mutex_unlock(&fcoe_config_mutex);
2311 
2312 	/*
2313 	 * destroy_work's may be chained but destroy_workqueue()
2314 	 * can take care of them. Just kill the fcoe_wq.
2315 	 */
2316 	destroy_workqueue(fcoe_wq);
2317 
2318 	/*
2319 	 * Detaching from the scsi transport must happen after all
2320 	 * destroys are done on the fcoe_wq. destroy_workqueue will
2321 	 * enusre the fcoe_wq is flushed.
2322 	 */
2323 	fcoe_if_exit();
2324 
2325 	/* detach from fcoe transport */
2326 	fcoe_transport_detach(&fcoe_sw_transport);
2327 }
2328 module_exit(fcoe_exit);
2329 
2330 /**
2331  * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
2332  * @seq: active sequence in the FLOGI or FDISC exchange
2333  * @fp: response frame, or error encoded in a pointer (timeout)
2334  * @arg: pointer the the fcoe_ctlr structure
2335  *
2336  * This handles MAC address management for FCoE, then passes control on to
2337  * the libfc FLOGI response handler.
2338  */
2339 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2340 {
2341 	struct fcoe_ctlr *fip = arg;
2342 	struct fc_exch *exch = fc_seq_exch(seq);
2343 	struct fc_lport *lport = exch->lp;
2344 	u8 *mac;
2345 
2346 	if (IS_ERR(fp))
2347 		goto done;
2348 
2349 	mac = fr_cb(fp)->granted_mac;
2350 	/* pre-FIP */
2351 	if (is_zero_ether_addr(mac))
2352 		fcoe_ctlr_recv_flogi(fip, lport, fp);
2353 	if (!is_zero_ether_addr(mac))
2354 		fcoe_update_src_mac(lport, mac);
2355 done:
2356 	fc_lport_flogi_resp(seq, fp, lport);
2357 }
2358 
2359 /**
2360  * fcoe_logo_resp() - FCoE specific LOGO response handler
2361  * @seq: active sequence in the LOGO exchange
2362  * @fp: response frame, or error encoded in a pointer (timeout)
2363  * @arg: pointer the the fcoe_ctlr structure
2364  *
2365  * This handles MAC address management for FCoE, then passes control on to
2366  * the libfc LOGO response handler.
2367  */
2368 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2369 {
2370 	struct fc_lport *lport = arg;
2371 	static u8 zero_mac[ETH_ALEN] = { 0 };
2372 
2373 	if (!IS_ERR(fp))
2374 		fcoe_update_src_mac(lport, zero_mac);
2375 	fc_lport_logo_resp(seq, fp, lport);
2376 }
2377 
2378 /**
2379  * fcoe_elsct_send - FCoE specific ELS handler
2380  *
2381  * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
2382  * using FCoE specific response handlers and passing the FIP controller as
2383  * the argument (the lport is still available from the exchange).
2384  *
2385  * Most of the work here is just handed off to the libfc routine.
2386  */
2387 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2388 				      struct fc_frame *fp, unsigned int op,
2389 				      void (*resp)(struct fc_seq *,
2390 						   struct fc_frame *,
2391 						   void *),
2392 				      void *arg, u32 timeout)
2393 {
2394 	struct fcoe_port *port = lport_priv(lport);
2395 	struct fcoe_interface *fcoe = port->priv;
2396 	struct fcoe_ctlr *fip = &fcoe->ctlr;
2397 	struct fc_frame_header *fh = fc_frame_header_get(fp);
2398 
2399 	switch (op) {
2400 	case ELS_FLOGI:
2401 	case ELS_FDISC:
2402 		if (lport->point_to_multipoint)
2403 			break;
2404 		return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2405 				     fip, timeout);
2406 	case ELS_LOGO:
2407 		/* only hook onto fabric logouts, not port logouts */
2408 		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2409 			break;
2410 		return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2411 				     lport, timeout);
2412 	}
2413 	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2414 }
2415 
2416 /**
2417  * fcoe_vport_create() - create an fc_host/scsi_host for a vport
2418  * @vport: fc_vport object to create a new fc_host for
2419  * @disabled: start the new fc_host in a disabled state by default?
2420  *
2421  * Returns: 0 for success
2422  */
2423 static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2424 {
2425 	struct Scsi_Host *shost = vport_to_shost(vport);
2426 	struct fc_lport *n_port = shost_priv(shost);
2427 	struct fcoe_port *port = lport_priv(n_port);
2428 	struct fcoe_interface *fcoe = port->priv;
2429 	struct net_device *netdev = fcoe->netdev;
2430 	struct fc_lport *vn_port;
2431 	int rc;
2432 	char buf[32];
2433 
2434 	rc = fcoe_validate_vport_create(vport);
2435 	if (rc) {
2436 		fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
2437 		printk(KERN_ERR "fcoe: Failed to create vport, "
2438 			"WWPN (0x%s) already exists\n",
2439 			buf);
2440 		return rc;
2441 	}
2442 
2443 	mutex_lock(&fcoe_config_mutex);
2444 	rtnl_lock();
2445 	vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2446 	rtnl_unlock();
2447 	mutex_unlock(&fcoe_config_mutex);
2448 
2449 	if (IS_ERR(vn_port)) {
2450 		printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2451 		       netdev->name);
2452 		return -EIO;
2453 	}
2454 
2455 	if (disabled) {
2456 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2457 	} else {
2458 		vn_port->boot_time = jiffies;
2459 		fc_fabric_login(vn_port);
2460 		fc_vport_setlink(vn_port);
2461 	}
2462 	return 0;
2463 }
2464 
2465 /**
2466  * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
2467  * @vport: fc_vport object that is being destroyed
2468  *
2469  * Returns: 0 for success
2470  */
2471 static int fcoe_vport_destroy(struct fc_vport *vport)
2472 {
2473 	struct Scsi_Host *shost = vport_to_shost(vport);
2474 	struct fc_lport *n_port = shost_priv(shost);
2475 	struct fc_lport *vn_port = vport->dd_data;
2476 	struct fcoe_port *port = lport_priv(vn_port);
2477 
2478 	mutex_lock(&n_port->lp_mutex);
2479 	list_del(&vn_port->list);
2480 	mutex_unlock(&n_port->lp_mutex);
2481 	queue_work(fcoe_wq, &port->destroy_work);
2482 	return 0;
2483 }
2484 
2485 /**
2486  * fcoe_vport_disable() - change vport state
2487  * @vport: vport to bring online/offline
2488  * @disable: should the vport be disabled?
2489  */
2490 static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2491 {
2492 	struct fc_lport *lport = vport->dd_data;
2493 
2494 	if (disable) {
2495 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2496 		fc_fabric_logoff(lport);
2497 	} else {
2498 		lport->boot_time = jiffies;
2499 		fc_fabric_login(lport);
2500 		fc_vport_setlink(lport);
2501 	}
2502 
2503 	return 0;
2504 }
2505 
2506 /**
2507  * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
2508  * @vport: fc_vport with a new symbolic name string
2509  *
2510  * After generating a new symbolic name string, a new RSPN_ID request is
2511  * sent to the name server.  There is no response handler, so if it fails
2512  * for some reason it will not be retried.
2513  */
2514 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2515 {
2516 	struct fc_lport *lport = vport->dd_data;
2517 	struct fc_frame *fp;
2518 	size_t len;
2519 
2520 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
2521 		 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2522 		 fcoe_netdev(lport)->name, vport->symbolic_name);
2523 
2524 	if (lport->state != LPORT_ST_READY)
2525 		return;
2526 
2527 	len = strnlen(fc_host_symbolic_name(lport->host), 255);
2528 	fp = fc_frame_alloc(lport,
2529 			    sizeof(struct fc_ct_hdr) +
2530 			    sizeof(struct fc_ns_rspn) + len);
2531 	if (!fp)
2532 		return;
2533 	lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2534 			     NULL, NULL, 3 * lport->r_a_tov);
2535 }
2536 
2537 /**
2538  * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
2539  * @lport: the local port
2540  * @fc_lesb: the link error status block
2541  */
2542 static void fcoe_get_lesb(struct fc_lport *lport,
2543 			 struct fc_els_lesb *fc_lesb)
2544 {
2545 	struct net_device *netdev = fcoe_netdev(lport);
2546 
2547 	__fcoe_get_lesb(lport, fc_lesb, netdev);
2548 }
2549 
2550 /**
2551  * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2552  * @lport: the local port
2553  * @port_id: the port ID
2554  * @fp: the received frame, if any, that caused the port_id to be set.
2555  *
2556  * This routine handles the case where we received a FLOGI and are
2557  * entering point-to-point mode.  We need to call fcoe_ctlr_recv_flogi()
2558  * so it can set the non-mapped mode and gateway address.
2559  *
2560  * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
2561  */
2562 static void fcoe_set_port_id(struct fc_lport *lport,
2563 			     u32 port_id, struct fc_frame *fp)
2564 {
2565 	struct fcoe_port *port = lport_priv(lport);
2566 	struct fcoe_interface *fcoe = port->priv;
2567 
2568 	if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2569 		fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2570 }
2571