xref: /linux/drivers/scsi/fcoe/fcoe.c (revision 7c43185138cf523b0810ffd2c9e18e2ecb356730)
1 /*
2  * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/fs.h>
32 #include <linux/sysfs.h>
33 #include <linux/ctype.h>
34 #include <linux/workqueue.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsicam.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_transport_fc.h>
39 #include <net/rtnetlink.h>
40 
41 #include <scsi/fc/fc_encaps.h>
42 #include <scsi/fc/fc_fip.h>
43 
44 #include <scsi/libfc.h>
45 #include <scsi/fc_frame.h>
46 #include <scsi/libfcoe.h>
47 
48 #include "fcoe.h"
49 
50 MODULE_AUTHOR("Open-FCoE.org");
51 MODULE_DESCRIPTION("FCoE");
52 MODULE_LICENSE("GPL v2");
53 
54 /* Performance tuning parameters for fcoe */
55 static unsigned int fcoe_ddp_min;
56 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
57 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "	\
58 		 "Direct Data Placement (DDP).");
59 
60 DEFINE_MUTEX(fcoe_config_mutex);
61 
62 static struct workqueue_struct *fcoe_wq;
63 
64 /* fcoe_percpu_clean completion.  Waiter protected by fcoe_create_mutex */
65 static DECLARE_COMPLETION(fcoe_flush_completion);
66 
67 /* fcoe host list */
68 /* must only by accessed under the RTNL mutex */
69 LIST_HEAD(fcoe_hostlist);
70 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
71 
72 /* Function Prototypes */
73 static int fcoe_reset(struct Scsi_Host *);
74 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
75 static int fcoe_rcv(struct sk_buff *, struct net_device *,
76 		    struct packet_type *, struct net_device *);
77 static int fcoe_percpu_receive_thread(void *);
78 static void fcoe_percpu_clean(struct fc_lport *);
79 static int fcoe_link_speed_update(struct fc_lport *);
80 static int fcoe_link_ok(struct fc_lport *);
81 
82 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
83 static int fcoe_hostlist_add(const struct fc_lport *);
84 
85 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
86 static void fcoe_dev_setup(void);
87 static void fcoe_dev_cleanup(void);
88 static struct fcoe_interface
89 *fcoe_hostlist_lookup_port(const struct net_device *);
90 
91 static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
92 			 struct packet_type *, struct net_device *);
93 
94 static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
95 static void fcoe_update_src_mac(struct fc_lport *, u8 *);
96 static u8 *fcoe_get_src_mac(struct fc_lport *);
97 static void fcoe_destroy_work(struct work_struct *);
98 
99 static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
100 			  unsigned int);
101 static int fcoe_ddp_done(struct fc_lport *, u16);
102 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
103 			   unsigned int);
104 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
105 
106 static bool fcoe_match(struct net_device *netdev);
107 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
108 static int fcoe_destroy(struct net_device *netdev);
109 static int fcoe_enable(struct net_device *netdev);
110 static int fcoe_disable(struct net_device *netdev);
111 
112 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
113 				      u32 did, struct fc_frame *,
114 				      unsigned int op,
115 				      void (*resp)(struct fc_seq *,
116 						   struct fc_frame *,
117 						   void *),
118 				      void *, u32 timeout);
119 static void fcoe_recv_frame(struct sk_buff *skb);
120 
121 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
122 
123 /* notification function for packets from net device */
124 static struct notifier_block fcoe_notifier = {
125 	.notifier_call = fcoe_device_notification,
126 };
127 
128 /* notification function for CPU hotplug events */
129 static struct notifier_block fcoe_cpu_notifier = {
130 	.notifier_call = fcoe_cpu_callback,
131 };
132 
133 static struct scsi_transport_template *fcoe_nport_scsi_transport;
134 static struct scsi_transport_template *fcoe_vport_scsi_transport;
135 
136 static int fcoe_vport_destroy(struct fc_vport *);
137 static int fcoe_vport_create(struct fc_vport *, bool disabled);
138 static int fcoe_vport_disable(struct fc_vport *, bool disable);
139 static void fcoe_set_vport_symbolic_name(struct fc_vport *);
140 static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
141 static int fcoe_validate_vport_create(struct fc_vport *);
142 
143 static struct libfc_function_template fcoe_libfc_fcn_templ = {
144 	.frame_send = fcoe_xmit,
145 	.ddp_setup = fcoe_ddp_setup,
146 	.ddp_done = fcoe_ddp_done,
147 	.ddp_target = fcoe_ddp_target,
148 	.elsct_send = fcoe_elsct_send,
149 	.get_lesb = fcoe_get_lesb,
150 	.lport_set_port_id = fcoe_set_port_id,
151 };
152 
153 struct fc_function_template fcoe_nport_fc_functions = {
154 	.show_host_node_name = 1,
155 	.show_host_port_name = 1,
156 	.show_host_supported_classes = 1,
157 	.show_host_supported_fc4s = 1,
158 	.show_host_active_fc4s = 1,
159 	.show_host_maxframe_size = 1,
160 
161 	.show_host_port_id = 1,
162 	.show_host_supported_speeds = 1,
163 	.get_host_speed = fc_get_host_speed,
164 	.show_host_speed = 1,
165 	.show_host_port_type = 1,
166 	.get_host_port_state = fc_get_host_port_state,
167 	.show_host_port_state = 1,
168 	.show_host_symbolic_name = 1,
169 
170 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
171 	.show_rport_maxframe_size = 1,
172 	.show_rport_supported_classes = 1,
173 
174 	.show_host_fabric_name = 1,
175 	.show_starget_node_name = 1,
176 	.show_starget_port_name = 1,
177 	.show_starget_port_id = 1,
178 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
179 	.show_rport_dev_loss_tmo = 1,
180 	.get_fc_host_stats = fc_get_host_stats,
181 	.issue_fc_host_lip = fcoe_reset,
182 
183 	.terminate_rport_io = fc_rport_terminate_io,
184 
185 	.vport_create = fcoe_vport_create,
186 	.vport_delete = fcoe_vport_destroy,
187 	.vport_disable = fcoe_vport_disable,
188 	.set_vport_symbolic_name = fcoe_set_vport_symbolic_name,
189 
190 	.bsg_request = fc_lport_bsg_request,
191 };
192 
193 struct fc_function_template fcoe_vport_fc_functions = {
194 	.show_host_node_name = 1,
195 	.show_host_port_name = 1,
196 	.show_host_supported_classes = 1,
197 	.show_host_supported_fc4s = 1,
198 	.show_host_active_fc4s = 1,
199 	.show_host_maxframe_size = 1,
200 
201 	.show_host_port_id = 1,
202 	.show_host_supported_speeds = 1,
203 	.get_host_speed = fc_get_host_speed,
204 	.show_host_speed = 1,
205 	.show_host_port_type = 1,
206 	.get_host_port_state = fc_get_host_port_state,
207 	.show_host_port_state = 1,
208 	.show_host_symbolic_name = 1,
209 
210 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
211 	.show_rport_maxframe_size = 1,
212 	.show_rport_supported_classes = 1,
213 
214 	.show_host_fabric_name = 1,
215 	.show_starget_node_name = 1,
216 	.show_starget_port_name = 1,
217 	.show_starget_port_id = 1,
218 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
219 	.show_rport_dev_loss_tmo = 1,
220 	.get_fc_host_stats = fc_get_host_stats,
221 	.issue_fc_host_lip = fcoe_reset,
222 
223 	.terminate_rport_io = fc_rport_terminate_io,
224 
225 	.bsg_request = fc_lport_bsg_request,
226 };
227 
228 static struct scsi_host_template fcoe_shost_template = {
229 	.module = THIS_MODULE,
230 	.name = "FCoE Driver",
231 	.proc_name = FCOE_NAME,
232 	.queuecommand = fc_queuecommand,
233 	.eh_abort_handler = fc_eh_abort,
234 	.eh_device_reset_handler = fc_eh_device_reset,
235 	.eh_host_reset_handler = fc_eh_host_reset,
236 	.slave_alloc = fc_slave_alloc,
237 	.change_queue_depth = fc_change_queue_depth,
238 	.change_queue_type = fc_change_queue_type,
239 	.this_id = -1,
240 	.cmd_per_lun = 3,
241 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
242 	.use_clustering = ENABLE_CLUSTERING,
243 	.sg_tablesize = SG_ALL,
244 	.max_sectors = 0xffff,
245 };
246 
247 /**
248  * fcoe_interface_setup() - Setup a FCoE interface
249  * @fcoe:   The new FCoE interface
250  * @netdev: The net device that the fcoe interface is on
251  *
252  * Returns : 0 for success
253  * Locking: must be called with the RTNL mutex held
254  */
255 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
256 				struct net_device *netdev)
257 {
258 	struct fcoe_ctlr *fip = &fcoe->ctlr;
259 	struct netdev_hw_addr *ha;
260 	struct net_device *real_dev;
261 	u8 flogi_maddr[ETH_ALEN];
262 	const struct net_device_ops *ops;
263 
264 	fcoe->netdev = netdev;
265 
266 	/* Let LLD initialize for FCoE */
267 	ops = netdev->netdev_ops;
268 	if (ops->ndo_fcoe_enable) {
269 		if (ops->ndo_fcoe_enable(netdev))
270 			FCOE_NETDEV_DBG(netdev, "Failed to enable FCoE"
271 					" specific feature for LLD.\n");
272 	}
273 
274 	/* Do not support for bonding device */
275 	if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) {
276 		FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n");
277 		return -EOPNOTSUPP;
278 	}
279 
280 	/* look for SAN MAC address, if multiple SAN MACs exist, only
281 	 * use the first one for SPMA */
282 	real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
283 		vlan_dev_real_dev(netdev) : netdev;
284 	rcu_read_lock();
285 	for_each_dev_addr(real_dev, ha) {
286 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
287 		    (is_valid_ether_addr(ha->addr))) {
288 			memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
289 			fip->spma = 1;
290 			break;
291 		}
292 	}
293 	rcu_read_unlock();
294 
295 	/* setup Source Mac Address */
296 	if (!fip->spma)
297 		memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
298 
299 	/*
300 	 * Add FCoE MAC address as second unicast MAC address
301 	 * or enter promiscuous mode if not capable of listening
302 	 * for multiple unicast MACs.
303 	 */
304 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
305 	dev_uc_add(netdev, flogi_maddr);
306 	if (fip->spma)
307 		dev_uc_add(netdev, fip->ctl_src_addr);
308 	if (fip->mode == FIP_MODE_VN2VN) {
309 		dev_mc_add(netdev, FIP_ALL_VN2VN_MACS);
310 		dev_mc_add(netdev, FIP_ALL_P2P_MACS);
311 	} else
312 		dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
313 
314 	/*
315 	 * setup the receive function from ethernet driver
316 	 * on the ethertype for the given device
317 	 */
318 	fcoe->fcoe_packet_type.func = fcoe_rcv;
319 	fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
320 	fcoe->fcoe_packet_type.dev = netdev;
321 	dev_add_pack(&fcoe->fcoe_packet_type);
322 
323 	fcoe->fip_packet_type.func = fcoe_fip_recv;
324 	fcoe->fip_packet_type.type = htons(ETH_P_FIP);
325 	fcoe->fip_packet_type.dev = netdev;
326 	dev_add_pack(&fcoe->fip_packet_type);
327 
328 	return 0;
329 }
330 
331 /**
332  * fcoe_interface_create() - Create a FCoE interface on a net device
333  * @netdev: The net device to create the FCoE interface on
334  * @fip_mode: The mode to use for FIP
335  *
336  * Returns: pointer to a struct fcoe_interface or NULL on error
337  */
338 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
339 						    enum fip_state fip_mode)
340 {
341 	struct fcoe_interface *fcoe;
342 	int err;
343 
344 	if (!try_module_get(THIS_MODULE)) {
345 		FCOE_NETDEV_DBG(netdev,
346 				"Could not get a reference to the module\n");
347 		fcoe = ERR_PTR(-EBUSY);
348 		goto out;
349 	}
350 
351 	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
352 	if (!fcoe) {
353 		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
354 		fcoe = ERR_PTR(-ENOMEM);
355 		goto out_nomod;
356 	}
357 
358 	dev_hold(netdev);
359 	kref_init(&fcoe->kref);
360 
361 	/*
362 	 * Initialize FIP.
363 	 */
364 	fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
365 	fcoe->ctlr.send = fcoe_fip_send;
366 	fcoe->ctlr.update_mac = fcoe_update_src_mac;
367 	fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
368 
369 	err = fcoe_interface_setup(fcoe, netdev);
370 	if (err) {
371 		fcoe_ctlr_destroy(&fcoe->ctlr);
372 		kfree(fcoe);
373 		dev_put(netdev);
374 		fcoe = ERR_PTR(err);
375 		goto out_nomod;
376 	}
377 
378 	goto out;
379 
380 out_nomod:
381 	module_put(THIS_MODULE);
382 out:
383 	return fcoe;
384 }
385 
386 /**
387  * fcoe_interface_release() - fcoe_port kref release function
388  * @kref: Embedded reference count in an fcoe_interface struct
389  */
390 static void fcoe_interface_release(struct kref *kref)
391 {
392 	struct fcoe_interface *fcoe;
393 	struct net_device *netdev;
394 
395 	fcoe = container_of(kref, struct fcoe_interface, kref);
396 	netdev = fcoe->netdev;
397 	/* tear-down the FCoE controller */
398 	fcoe_ctlr_destroy(&fcoe->ctlr);
399 	kfree(fcoe);
400 	dev_put(netdev);
401 	module_put(THIS_MODULE);
402 }
403 
404 /**
405  * fcoe_interface_get() - Get a reference to a FCoE interface
406  * @fcoe: The FCoE interface to be held
407  */
408 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
409 {
410 	kref_get(&fcoe->kref);
411 }
412 
413 /**
414  * fcoe_interface_put() - Put a reference to a FCoE interface
415  * @fcoe: The FCoE interface to be released
416  */
417 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
418 {
419 	kref_put(&fcoe->kref, fcoe_interface_release);
420 }
421 
422 /**
423  * fcoe_interface_cleanup() - Clean up a FCoE interface
424  * @fcoe: The FCoE interface to be cleaned up
425  *
426  * Caller must be holding the RTNL mutex
427  */
428 void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
429 {
430 	struct net_device *netdev = fcoe->netdev;
431 	struct fcoe_ctlr *fip = &fcoe->ctlr;
432 	u8 flogi_maddr[ETH_ALEN];
433 	const struct net_device_ops *ops;
434 
435 	/*
436 	 * Don't listen for Ethernet packets anymore.
437 	 * synchronize_net() ensures that the packet handlers are not running
438 	 * on another CPU. dev_remove_pack() would do that, this calls the
439 	 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
440 	 */
441 	__dev_remove_pack(&fcoe->fcoe_packet_type);
442 	__dev_remove_pack(&fcoe->fip_packet_type);
443 	synchronize_net();
444 
445 	/* Delete secondary MAC addresses */
446 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
447 	dev_uc_del(netdev, flogi_maddr);
448 	if (fip->spma)
449 		dev_uc_del(netdev, fip->ctl_src_addr);
450 	if (fip->mode == FIP_MODE_VN2VN) {
451 		dev_mc_del(netdev, FIP_ALL_VN2VN_MACS);
452 		dev_mc_del(netdev, FIP_ALL_P2P_MACS);
453 	} else
454 		dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
455 
456 	/* Tell the LLD we are done w/ FCoE */
457 	ops = netdev->netdev_ops;
458 	if (ops->ndo_fcoe_disable) {
459 		if (ops->ndo_fcoe_disable(netdev))
460 			FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
461 					" specific feature for LLD.\n");
462 	}
463 
464 	/* Release the self-reference taken during fcoe_interface_create() */
465 	fcoe_interface_put(fcoe);
466 }
467 
468 /**
469  * fcoe_fip_recv() - Handler for received FIP frames
470  * @skb:      The receive skb
471  * @netdev:   The associated net device
472  * @ptype:    The packet_type structure which was used to register this handler
473  * @orig_dev: The original net_device the the skb was received on.
474  *	      (in case dev is a bond)
475  *
476  * Returns: 0 for success
477  */
478 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
479 			 struct packet_type *ptype,
480 			 struct net_device *orig_dev)
481 {
482 	struct fcoe_interface *fcoe;
483 
484 	fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
485 	fcoe_ctlr_recv(&fcoe->ctlr, skb);
486 	return 0;
487 }
488 
489 /**
490  * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
491  * @port: The FCoE port
492  * @skb: The FIP/FCoE packet to be sent
493  */
494 static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
495 {
496 	if (port->fcoe_pending_queue.qlen)
497 		fcoe_check_wait_queue(port->lport, skb);
498 	else if (fcoe_start_io(skb))
499 		fcoe_check_wait_queue(port->lport, skb);
500 }
501 
502 /**
503  * fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
504  * @fip: The FCoE controller
505  * @skb: The FIP packet to be sent
506  */
507 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
508 {
509 	skb->dev = fcoe_from_ctlr(fip)->netdev;
510 	fcoe_port_send(lport_priv(fip->lp), skb);
511 }
512 
513 /**
514  * fcoe_update_src_mac() - Update the Ethernet MAC filters
515  * @lport: The local port to update the source MAC on
516  * @addr:  Unicast MAC address to add
517  *
518  * Remove any previously-set unicast MAC filter.
519  * Add secondary FCoE MAC address filter for our OUI.
520  */
521 static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
522 {
523 	struct fcoe_port *port = lport_priv(lport);
524 	struct fcoe_interface *fcoe = port->priv;
525 
526 	rtnl_lock();
527 	if (!is_zero_ether_addr(port->data_src_addr))
528 		dev_uc_del(fcoe->netdev, port->data_src_addr);
529 	if (!is_zero_ether_addr(addr))
530 		dev_uc_add(fcoe->netdev, addr);
531 	memcpy(port->data_src_addr, addr, ETH_ALEN);
532 	rtnl_unlock();
533 }
534 
535 /**
536  * fcoe_get_src_mac() - return the Ethernet source address for an lport
537  * @lport: libfc lport
538  */
539 static u8 *fcoe_get_src_mac(struct fc_lport *lport)
540 {
541 	struct fcoe_port *port = lport_priv(lport);
542 
543 	return port->data_src_addr;
544 }
545 
546 /**
547  * fcoe_lport_config() - Set up a local port
548  * @lport: The local port to be setup
549  *
550  * Returns: 0 for success
551  */
552 static int fcoe_lport_config(struct fc_lport *lport)
553 {
554 	lport->link_up = 0;
555 	lport->qfull = 0;
556 	lport->max_retry_count = 3;
557 	lport->max_rport_retry_count = 3;
558 	lport->e_d_tov = 2 * 1000;	/* FC-FS default */
559 	lport->r_a_tov = 2 * 2 * 1000;
560 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
561 				 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
562 	lport->does_npiv = 1;
563 
564 	fc_lport_init_stats(lport);
565 
566 	/* lport fc_lport related configuration */
567 	fc_lport_config(lport);
568 
569 	/* offload related configuration */
570 	lport->crc_offload = 0;
571 	lport->seq_offload = 0;
572 	lport->lro_enabled = 0;
573 	lport->lro_xid = 0;
574 	lport->lso_max = 0;
575 
576 	return 0;
577 }
578 
579 /**
580  * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
581  * @netdev: the associated net device
582  * @wwn: the output WWN
583  * @type: the type of WWN (WWPN or WWNN)
584  *
585  * Returns: 0 for success
586  */
587 static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
588 {
589 	const struct net_device_ops *ops = netdev->netdev_ops;
590 
591 	if (ops->ndo_fcoe_get_wwn)
592 		return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
593 	return -EINVAL;
594 }
595 
596 /**
597  * fcoe_netdev_features_change - Updates the lport's offload flags based
598  * on the LLD netdev's FCoE feature flags
599  */
600 static void fcoe_netdev_features_change(struct fc_lport *lport,
601 					struct net_device *netdev)
602 {
603 	mutex_lock(&lport->lp_mutex);
604 
605 	if (netdev->features & NETIF_F_SG)
606 		lport->sg_supp = 1;
607 	else
608 		lport->sg_supp = 0;
609 
610 	if (netdev->features & NETIF_F_FCOE_CRC) {
611 		lport->crc_offload = 1;
612 		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
613 	} else {
614 		lport->crc_offload = 0;
615 	}
616 
617 	if (netdev->features & NETIF_F_FSO) {
618 		lport->seq_offload = 1;
619 		lport->lso_max = netdev->gso_max_size;
620 		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
621 				lport->lso_max);
622 	} else {
623 		lport->seq_offload = 0;
624 		lport->lso_max = 0;
625 	}
626 
627 	if (netdev->fcoe_ddp_xid) {
628 		lport->lro_enabled = 1;
629 		lport->lro_xid = netdev->fcoe_ddp_xid;
630 		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
631 				lport->lro_xid);
632 	} else {
633 		lport->lro_enabled = 0;
634 		lport->lro_xid = 0;
635 	}
636 
637 	mutex_unlock(&lport->lp_mutex);
638 }
639 
640 /**
641  * fcoe_netdev_config() - Set up net devive for SW FCoE
642  * @lport:  The local port that is associated with the net device
643  * @netdev: The associated net device
644  *
645  * Must be called after fcoe_lport_config() as it will use local port mutex
646  *
647  * Returns: 0 for success
648  */
649 static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
650 {
651 	u32 mfs;
652 	u64 wwnn, wwpn;
653 	struct fcoe_interface *fcoe;
654 	struct fcoe_port *port;
655 
656 	/* Setup lport private data to point to fcoe softc */
657 	port = lport_priv(lport);
658 	fcoe = port->priv;
659 
660 	/*
661 	 * Determine max frame size based on underlying device and optional
662 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
663 	 * will return 0, so do this first.
664 	 */
665 	mfs = netdev->mtu;
666 	if (netdev->features & NETIF_F_FCOE_MTU) {
667 		mfs = FCOE_MTU;
668 		FCOE_NETDEV_DBG(netdev, "Supports FCOE_MTU of %d bytes\n", mfs);
669 	}
670 	mfs -= (sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof));
671 	if (fc_set_mfs(lport, mfs))
672 		return -EINVAL;
673 
674 	/* offload features support */
675 	fcoe_netdev_features_change(lport, netdev);
676 
677 	skb_queue_head_init(&port->fcoe_pending_queue);
678 	port->fcoe_pending_queue_active = 0;
679 	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
680 
681 	fcoe_link_speed_update(lport);
682 
683 	if (!lport->vport) {
684 		if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
685 			wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
686 		fc_set_wwnn(lport, wwnn);
687 		if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
688 			wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
689 						 2, 0);
690 		fc_set_wwpn(lport, wwpn);
691 	}
692 
693 	return 0;
694 }
695 
696 /**
697  * fcoe_shost_config() - Set up the SCSI host associated with a local port
698  * @lport: The local port
699  * @dev:   The device associated with the SCSI host
700  *
701  * Must be called after fcoe_lport_config() and fcoe_netdev_config()
702  *
703  * Returns: 0 for success
704  */
705 static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
706 {
707 	int rc = 0;
708 
709 	/* lport scsi host config */
710 	lport->host->max_lun = FCOE_MAX_LUN;
711 	lport->host->max_id = FCOE_MAX_FCP_TARGET;
712 	lport->host->max_channel = 0;
713 	lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
714 
715 	if (lport->vport)
716 		lport->host->transportt = fcoe_vport_scsi_transport;
717 	else
718 		lport->host->transportt = fcoe_nport_scsi_transport;
719 
720 	/* add the new host to the SCSI-ml */
721 	rc = scsi_add_host(lport->host, dev);
722 	if (rc) {
723 		FCOE_NETDEV_DBG(fcoe_netdev(lport), "fcoe_shost_config: "
724 				"error on scsi_add_host\n");
725 		return rc;
726 	}
727 
728 	if (!lport->vport)
729 		fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
730 
731 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
732 		 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
733 		 fcoe_netdev(lport)->name);
734 
735 	return 0;
736 }
737 
738 /**
739  * fcoe_oem_match() - The match routine for the offloaded exchange manager
740  * @fp: The I/O frame
741  *
742  * This routine will be associated with an exchange manager (EM). When
743  * the libfc exchange handling code is looking for an EM to use it will
744  * call this routine and pass it the frame that it wishes to send. This
745  * routine will return True if the associated EM is to be used and False
746  * if the echange code should continue looking for an EM.
747  *
748  * The offload EM that this routine is associated with will handle any
749  * packets that are for SCSI read requests.
750  *
751  * This has been enhanced to work when FCoE stack is operating in target
752  * mode.
753  *
754  * Returns: True for read types I/O, otherwise returns false.
755  */
756 bool fcoe_oem_match(struct fc_frame *fp)
757 {
758 	struct fc_frame_header *fh = fc_frame_header_get(fp);
759 	struct fcp_cmnd *fcp;
760 
761 	if (fc_fcp_is_read(fr_fsp(fp)) &&
762 	    (fr_fsp(fp)->data_len > fcoe_ddp_min))
763 		return true;
764 	else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
765 		fcp = fc_frame_payload_get(fp, sizeof(*fcp));
766 		if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
767 		    fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
768 		    (fcp->fc_flags & FCP_CFL_WRDATA))
769 			return true;
770 	}
771 	return false;
772 }
773 
774 /**
775  * fcoe_em_config() - Allocate and configure an exchange manager
776  * @lport: The local port that the new EM will be associated with
777  *
778  * Returns: 0 on success
779  */
780 static inline int fcoe_em_config(struct fc_lport *lport)
781 {
782 	struct fcoe_port *port = lport_priv(lport);
783 	struct fcoe_interface *fcoe = port->priv;
784 	struct fcoe_interface *oldfcoe = NULL;
785 	struct net_device *old_real_dev, *cur_real_dev;
786 	u16 min_xid = FCOE_MIN_XID;
787 	u16 max_xid = FCOE_MAX_XID;
788 
789 	/*
790 	 * Check if need to allocate an em instance for
791 	 * offload exchange ids to be shared across all VN_PORTs/lport.
792 	 */
793 	if (!lport->lro_enabled || !lport->lro_xid ||
794 	    (lport->lro_xid >= max_xid)) {
795 		lport->lro_xid = 0;
796 		goto skip_oem;
797 	}
798 
799 	/*
800 	 * Reuse existing offload em instance in case
801 	 * it is already allocated on real eth device
802 	 */
803 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
804 		cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
805 	else
806 		cur_real_dev = fcoe->netdev;
807 
808 	list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
809 		if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
810 			old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
811 		else
812 			old_real_dev = oldfcoe->netdev;
813 
814 		if (cur_real_dev == old_real_dev) {
815 			fcoe->oem = oldfcoe->oem;
816 			break;
817 		}
818 	}
819 
820 	if (fcoe->oem) {
821 		if (!fc_exch_mgr_add(lport, fcoe->oem, fcoe_oem_match)) {
822 			printk(KERN_ERR "fcoe_em_config: failed to add "
823 			       "offload em:%p on interface:%s\n",
824 			       fcoe->oem, fcoe->netdev->name);
825 			return -ENOMEM;
826 		}
827 	} else {
828 		fcoe->oem = fc_exch_mgr_alloc(lport, FC_CLASS_3,
829 					      FCOE_MIN_XID, lport->lro_xid,
830 					      fcoe_oem_match);
831 		if (!fcoe->oem) {
832 			printk(KERN_ERR "fcoe_em_config: failed to allocate "
833 			       "em for offload exches on interface:%s\n",
834 			       fcoe->netdev->name);
835 			return -ENOMEM;
836 		}
837 	}
838 
839 	/*
840 	 * Exclude offload EM xid range from next EM xid range.
841 	 */
842 	min_xid += lport->lro_xid + 1;
843 
844 skip_oem:
845 	if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, min_xid, max_xid, NULL)) {
846 		printk(KERN_ERR "fcoe_em_config: failed to "
847 		       "allocate em on interface %s\n", fcoe->netdev->name);
848 		return -ENOMEM;
849 	}
850 
851 	return 0;
852 }
853 
854 /**
855  * fcoe_if_destroy() - Tear down a SW FCoE instance
856  * @lport: The local port to be destroyed
857  *
858  */
859 static void fcoe_if_destroy(struct fc_lport *lport)
860 {
861 	struct fcoe_port *port = lport_priv(lport);
862 	struct fcoe_interface *fcoe = port->priv;
863 	struct net_device *netdev = fcoe->netdev;
864 
865 	FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
866 
867 	/* Logout of the fabric */
868 	fc_fabric_logoff(lport);
869 
870 	/* Cleanup the fc_lport */
871 	fc_lport_destroy(lport);
872 
873 	/* Stop the transmit retry timer */
874 	del_timer_sync(&port->timer);
875 
876 	/* Free existing transmit skbs */
877 	fcoe_clean_pending_queue(lport);
878 
879 	rtnl_lock();
880 	if (!is_zero_ether_addr(port->data_src_addr))
881 		dev_uc_del(netdev, port->data_src_addr);
882 	rtnl_unlock();
883 
884 	/* Release reference held in fcoe_if_create() */
885 	fcoe_interface_put(fcoe);
886 
887 	/* Free queued packets for the per-CPU receive threads */
888 	fcoe_percpu_clean(lport);
889 
890 	/* Detach from the scsi-ml */
891 	fc_remove_host(lport->host);
892 	scsi_remove_host(lport->host);
893 
894 	/* Destroy lport scsi_priv */
895 	fc_fcp_destroy(lport);
896 
897 	/* There are no more rports or I/O, free the EM */
898 	fc_exch_mgr_free(lport);
899 
900 	/* Free memory used by statistical counters */
901 	fc_lport_free_stats(lport);
902 
903 	/* Release the Scsi_Host */
904 	scsi_host_put(lport->host);
905 }
906 
907 /**
908  * fcoe_ddp_setup() - Call a LLD's ddp_setup through the net device
909  * @lport: The local port to setup DDP for
910  * @xid:   The exchange ID for this DDP transfer
911  * @sgl:   The scatterlist describing this transfer
912  * @sgc:   The number of sg items
913  *
914  * Returns: 0 if the DDP context was not configured
915  */
916 static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
917 			  struct scatterlist *sgl, unsigned int sgc)
918 {
919 	struct net_device *netdev = fcoe_netdev(lport);
920 
921 	if (netdev->netdev_ops->ndo_fcoe_ddp_setup)
922 		return netdev->netdev_ops->ndo_fcoe_ddp_setup(netdev,
923 							      xid, sgl,
924 							      sgc);
925 
926 	return 0;
927 }
928 
929 /**
930  * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
931  * @lport: The local port to setup DDP for
932  * @xid:   The exchange ID for this DDP transfer
933  * @sgl:   The scatterlist describing this transfer
934  * @sgc:   The number of sg items
935  *
936  * Returns: 0 if the DDP context was not configured
937  */
938 static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
939 			   struct scatterlist *sgl, unsigned int sgc)
940 {
941 	struct net_device *netdev = fcoe_netdev(lport);
942 
943 	if (netdev->netdev_ops->ndo_fcoe_ddp_target)
944 		return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
945 							       sgl, sgc);
946 
947 	return 0;
948 }
949 
950 
951 /**
952  * fcoe_ddp_done() - Call a LLD's ddp_done through the net device
953  * @lport: The local port to complete DDP on
954  * @xid:   The exchange ID for this DDP transfer
955  *
956  * Returns: the length of data that have been completed by DDP
957  */
958 static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
959 {
960 	struct net_device *netdev = fcoe_netdev(lport);
961 
962 	if (netdev->netdev_ops->ndo_fcoe_ddp_done)
963 		return netdev->netdev_ops->ndo_fcoe_ddp_done(netdev, xid);
964 	return 0;
965 }
966 
967 /**
968  * fcoe_if_create() - Create a FCoE instance on an interface
969  * @fcoe:   The FCoE interface to create a local port on
970  * @parent: The device pointer to be the parent in sysfs for the SCSI host
971  * @npiv:   Indicates if the port is a vport or not
972  *
973  * Creates a fc_lport instance and a Scsi_Host instance and configure them.
974  *
975  * Returns: The allocated fc_lport or an error pointer
976  */
977 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
978 				       struct device *parent, int npiv)
979 {
980 	struct net_device *netdev = fcoe->netdev;
981 	struct fc_lport *lport, *n_port;
982 	struct fcoe_port *port;
983 	struct Scsi_Host *shost;
984 	int rc;
985 	/*
986 	 * parent is only a vport if npiv is 1,
987 	 * but we'll only use vport in that case so go ahead and set it
988 	 */
989 	struct fc_vport *vport = dev_to_vport(parent);
990 
991 	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
992 
993 	if (!npiv)
994 		lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
995 	else
996 		lport = libfc_vport_create(vport, sizeof(*port));
997 
998 	if (!lport) {
999 		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
1000 		rc = -ENOMEM;
1001 		goto out;
1002 	}
1003 	port = lport_priv(lport);
1004 	port->lport = lport;
1005 	port->priv = fcoe;
1006 	port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
1007 	port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
1008 	INIT_WORK(&port->destroy_work, fcoe_destroy_work);
1009 
1010 	/* configure a fc_lport including the exchange manager */
1011 	rc = fcoe_lport_config(lport);
1012 	if (rc) {
1013 		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
1014 				"interface\n");
1015 		goto out_host_put;
1016 	}
1017 
1018 	if (npiv) {
1019 		FCOE_NETDEV_DBG(netdev, "Setting vport names, "
1020 				"%16.16llx %16.16llx\n",
1021 				vport->node_name, vport->port_name);
1022 		fc_set_wwnn(lport, vport->node_name);
1023 		fc_set_wwpn(lport, vport->port_name);
1024 	}
1025 
1026 	/* configure lport network properties */
1027 	rc = fcoe_netdev_config(lport, netdev);
1028 	if (rc) {
1029 		FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
1030 				"interface\n");
1031 		goto out_lp_destroy;
1032 	}
1033 
1034 	/* configure lport scsi host properties */
1035 	rc = fcoe_shost_config(lport, parent);
1036 	if (rc) {
1037 		FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
1038 				"interface\n");
1039 		goto out_lp_destroy;
1040 	}
1041 
1042 	/* Initialize the library */
1043 	rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
1044 	if (rc) {
1045 		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1046 				"interface\n");
1047 		goto out_lp_destroy;
1048 	}
1049 
1050 	/*
1051 	 * fcoe_em_alloc() and fcoe_hostlist_add() both
1052 	 * need to be atomic with respect to other changes to the
1053 	 * hostlist since fcoe_em_alloc() looks for an existing EM
1054 	 * instance on host list updated by fcoe_hostlist_add().
1055 	 *
1056 	 * This is currently handled through the fcoe_config_mutex
1057 	 * begin held.
1058 	 */
1059 	if (!npiv)
1060 		/* lport exch manager allocation */
1061 		rc = fcoe_em_config(lport);
1062 	else {
1063 		shost = vport_to_shost(vport);
1064 		n_port = shost_priv(shost);
1065 		rc = fc_exch_mgr_list_clone(n_port, lport);
1066 	}
1067 
1068 	if (rc) {
1069 		FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
1070 		goto out_lp_destroy;
1071 	}
1072 
1073 	fcoe_interface_get(fcoe);
1074 	return lport;
1075 
1076 out_lp_destroy:
1077 	fc_exch_mgr_free(lport);
1078 out_host_put:
1079 	scsi_host_put(lport->host);
1080 out:
1081 	return ERR_PTR(rc);
1082 }
1083 
1084 /**
1085  * fcoe_if_init() - Initialization routine for fcoe.ko
1086  *
1087  * Attaches the SW FCoE transport to the FC transport
1088  *
1089  * Returns: 0 on success
1090  */
1091 static int __init fcoe_if_init(void)
1092 {
1093 	/* attach to scsi transport */
1094 	fcoe_nport_scsi_transport =
1095 		fc_attach_transport(&fcoe_nport_fc_functions);
1096 	fcoe_vport_scsi_transport =
1097 		fc_attach_transport(&fcoe_vport_fc_functions);
1098 
1099 	if (!fcoe_nport_scsi_transport) {
1100 		printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
1101 		return -ENODEV;
1102 	}
1103 
1104 	return 0;
1105 }
1106 
1107 /**
1108  * fcoe_if_exit() - Tear down fcoe.ko
1109  *
1110  * Detaches the SW FCoE transport from the FC transport
1111  *
1112  * Returns: 0 on success
1113  */
1114 int __exit fcoe_if_exit(void)
1115 {
1116 	fc_release_transport(fcoe_nport_scsi_transport);
1117 	fc_release_transport(fcoe_vport_scsi_transport);
1118 	fcoe_nport_scsi_transport = NULL;
1119 	fcoe_vport_scsi_transport = NULL;
1120 	return 0;
1121 }
1122 
1123 /**
1124  * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
1125  * @cpu: The CPU index of the CPU to create a receive thread for
1126  */
1127 static void fcoe_percpu_thread_create(unsigned int cpu)
1128 {
1129 	struct fcoe_percpu_s *p;
1130 	struct task_struct *thread;
1131 
1132 	p = &per_cpu(fcoe_percpu, cpu);
1133 
1134 	thread = kthread_create(fcoe_percpu_receive_thread,
1135 				(void *)p, "fcoethread/%d", cpu);
1136 
1137 	if (likely(!IS_ERR(thread))) {
1138 		kthread_bind(thread, cpu);
1139 		wake_up_process(thread);
1140 
1141 		spin_lock_bh(&p->fcoe_rx_list.lock);
1142 		p->thread = thread;
1143 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1144 	}
1145 }
1146 
1147 /**
1148  * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
1149  * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
1150  *
1151  * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
1152  * current CPU's Rx thread. If the thread being destroyed is bound to
1153  * the CPU processing this context the skbs will be freed.
1154  */
1155 static void fcoe_percpu_thread_destroy(unsigned int cpu)
1156 {
1157 	struct fcoe_percpu_s *p;
1158 	struct task_struct *thread;
1159 	struct page *crc_eof;
1160 	struct sk_buff *skb;
1161 #ifdef CONFIG_SMP
1162 	struct fcoe_percpu_s *p0;
1163 	unsigned targ_cpu = get_cpu();
1164 #endif /* CONFIG_SMP */
1165 
1166 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
1167 
1168 	/* Prevent any new skbs from being queued for this CPU. */
1169 	p = &per_cpu(fcoe_percpu, cpu);
1170 	spin_lock_bh(&p->fcoe_rx_list.lock);
1171 	thread = p->thread;
1172 	p->thread = NULL;
1173 	crc_eof = p->crc_eof_page;
1174 	p->crc_eof_page = NULL;
1175 	p->crc_eof_offset = 0;
1176 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1177 
1178 #ifdef CONFIG_SMP
1179 	/*
1180 	 * Don't bother moving the skb's if this context is running
1181 	 * on the same CPU that is having its thread destroyed. This
1182 	 * can easily happen when the module is removed.
1183 	 */
1184 	if (cpu != targ_cpu) {
1185 		p0 = &per_cpu(fcoe_percpu, targ_cpu);
1186 		spin_lock_bh(&p0->fcoe_rx_list.lock);
1187 		if (p0->thread) {
1188 			FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
1189 				 cpu, targ_cpu);
1190 
1191 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1192 				__skb_queue_tail(&p0->fcoe_rx_list, skb);
1193 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1194 		} else {
1195 			/*
1196 			 * The targeted CPU is not initialized and cannot accept
1197 			 * new	skbs. Unlock the targeted CPU and drop the skbs
1198 			 * on the CPU that is going offline.
1199 			 */
1200 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1201 				kfree_skb(skb);
1202 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
1203 		}
1204 	} else {
1205 		/*
1206 		 * This scenario occurs when the module is being removed
1207 		 * and all threads are being destroyed. skbs will continue
1208 		 * to be shifted from the CPU thread that is being removed
1209 		 * to the CPU thread associated with the CPU that is processing
1210 		 * the module removal. Once there is only one CPU Rx thread it
1211 		 * will reach this case and we will drop all skbs and later
1212 		 * stop the thread.
1213 		 */
1214 		spin_lock_bh(&p->fcoe_rx_list.lock);
1215 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1216 			kfree_skb(skb);
1217 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1218 	}
1219 	put_cpu();
1220 #else
1221 	/*
1222 	 * This a non-SMP scenario where the singular Rx thread is
1223 	 * being removed. Free all skbs and stop the thread.
1224 	 */
1225 	spin_lock_bh(&p->fcoe_rx_list.lock);
1226 	while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
1227 		kfree_skb(skb);
1228 	spin_unlock_bh(&p->fcoe_rx_list.lock);
1229 #endif
1230 
1231 	if (thread)
1232 		kthread_stop(thread);
1233 
1234 	if (crc_eof)
1235 		put_page(crc_eof);
1236 }
1237 
1238 /**
1239  * fcoe_cpu_callback() - Handler for CPU hotplug events
1240  * @nfb:    The callback data block
1241  * @action: The event triggering the callback
1242  * @hcpu:   The index of the CPU that the event is for
1243  *
1244  * This creates or destroys per-CPU data for fcoe
1245  *
1246  * Returns NOTIFY_OK always.
1247  */
1248 static int fcoe_cpu_callback(struct notifier_block *nfb,
1249 			     unsigned long action, void *hcpu)
1250 {
1251 	unsigned cpu = (unsigned long)hcpu;
1252 
1253 	switch (action) {
1254 	case CPU_ONLINE:
1255 	case CPU_ONLINE_FROZEN:
1256 		FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
1257 		fcoe_percpu_thread_create(cpu);
1258 		break;
1259 	case CPU_DEAD:
1260 	case CPU_DEAD_FROZEN:
1261 		FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
1262 		fcoe_percpu_thread_destroy(cpu);
1263 		break;
1264 	default:
1265 		break;
1266 	}
1267 	return NOTIFY_OK;
1268 }
1269 
1270 /**
1271  * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
1272  *			command.
1273  *
1274  * This routine selects next CPU based on cpumask to distribute
1275  * incoming requests in round robin.
1276  *
1277  * Returns: int CPU number
1278  */
1279 static inline unsigned int fcoe_select_cpu(void)
1280 {
1281 	static unsigned int selected_cpu;
1282 
1283 	selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
1284 	if (selected_cpu >= nr_cpu_ids)
1285 		selected_cpu = cpumask_first(cpu_online_mask);
1286 
1287 	return selected_cpu;
1288 }
1289 
1290 /**
1291  * fcoe_rcv() - Receive packets from a net device
1292  * @skb:    The received packet
1293  * @netdev: The net device that the packet was received on
1294  * @ptype:  The packet type context
1295  * @olddev: The last device net device
1296  *
1297  * This routine is called by NET_RX_SOFTIRQ. It receives a packet, builds a
1298  * FC frame and passes the frame to libfc.
1299  *
1300  * Returns: 0 for success
1301  */
1302 int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1303 	     struct packet_type *ptype, struct net_device *olddev)
1304 {
1305 	struct fc_lport *lport;
1306 	struct fcoe_rcv_info *fr;
1307 	struct fcoe_interface *fcoe;
1308 	struct fc_frame_header *fh;
1309 	struct fcoe_percpu_s *fps;
1310 	struct ethhdr *eh;
1311 	unsigned int cpu;
1312 
1313 	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1314 	lport = fcoe->ctlr.lp;
1315 	if (unlikely(!lport)) {
1316 		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1317 		goto err2;
1318 	}
1319 	if (!lport->link_up)
1320 		goto err2;
1321 
1322 	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
1323 			"data:%p tail:%p end:%p sum:%d dev:%s",
1324 			skb->len, skb->data_len, skb->head, skb->data,
1325 			skb_tail_pointer(skb), skb_end_pointer(skb),
1326 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1327 
1328 	eh = eth_hdr(skb);
1329 
1330 	if (is_fip_mode(&fcoe->ctlr) &&
1331 	    compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
1332 		FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1333 				eh->h_source);
1334 		goto err;
1335 	}
1336 
1337 	/*
1338 	 * Check for minimum frame length, and make sure required FCoE
1339 	 * and FC headers are pulled into the linear data area.
1340 	 */
1341 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1342 		     !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1343 		goto err;
1344 
1345 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1346 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1347 
1348 	if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
1349 		FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
1350 				eh->h_dest);
1351 		goto err;
1352 	}
1353 
1354 	fr = fcoe_dev_from_skb(skb);
1355 	fr->fr_dev = lport;
1356 
1357 	/*
1358 	 * In case the incoming frame's exchange is originated from
1359 	 * the initiator, then received frame's exchange id is ANDed
1360 	 * with fc_cpu_mask bits to get the same cpu on which exchange
1361 	 * was originated, otherwise select cpu using rx exchange id
1362 	 * or fcoe_select_cpu().
1363 	 */
1364 	if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1365 		cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1366 	else {
1367 		if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
1368 			cpu = fcoe_select_cpu();
1369 		else
1370 			cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
1371 	}
1372 
1373 	if (cpu >= nr_cpu_ids)
1374 		goto err;
1375 
1376 	fps = &per_cpu(fcoe_percpu, cpu);
1377 	spin_lock_bh(&fps->fcoe_rx_list.lock);
1378 	if (unlikely(!fps->thread)) {
1379 		/*
1380 		 * The targeted CPU is not ready, let's target
1381 		 * the first CPU now. For non-SMP systems this
1382 		 * will check the same CPU twice.
1383 		 */
1384 		FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
1385 				"ready for incoming skb- using first online "
1386 				"CPU.\n");
1387 
1388 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1389 		cpu = cpumask_first(cpu_online_mask);
1390 		fps = &per_cpu(fcoe_percpu, cpu);
1391 		spin_lock_bh(&fps->fcoe_rx_list.lock);
1392 		if (!fps->thread) {
1393 			spin_unlock_bh(&fps->fcoe_rx_list.lock);
1394 			goto err;
1395 		}
1396 	}
1397 
1398 	/*
1399 	 * We now have a valid CPU that we're targeting for
1400 	 * this skb. We also have this receive thread locked,
1401 	 * so we're free to queue skbs into it's queue.
1402 	 */
1403 
1404 	/* If this is a SCSI-FCP frame, and this is already executing on the
1405 	 * correct CPU, and the queue for this CPU is empty, then go ahead
1406 	 * and process the frame directly in the softirq context.
1407 	 * This lets us process completions without context switching from the
1408 	 * NET_RX softirq, to our receive processing thread, and then back to
1409 	 * BLOCK softirq context.
1410 	 */
1411 	if (fh->fh_type == FC_TYPE_FCP &&
1412 	    cpu == smp_processor_id() &&
1413 	    skb_queue_empty(&fps->fcoe_rx_list)) {
1414 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1415 		fcoe_recv_frame(skb);
1416 	} else {
1417 		__skb_queue_tail(&fps->fcoe_rx_list, skb);
1418 		if (fps->fcoe_rx_list.qlen == 1)
1419 			wake_up_process(fps->thread);
1420 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
1421 	}
1422 
1423 	return 0;
1424 err:
1425 	per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
1426 	put_cpu();
1427 err2:
1428 	kfree_skb(skb);
1429 	return -1;
1430 }
1431 
1432 /**
1433  * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
1434  * @skb:  The packet to be transmitted
1435  * @tlen: The total length of the trailer
1436  *
1437  * Returns: 0 for success
1438  */
1439 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
1440 {
1441 	struct fcoe_percpu_s *fps;
1442 	int rc;
1443 
1444 	fps = &get_cpu_var(fcoe_percpu);
1445 	rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
1446 	put_cpu_var(fcoe_percpu);
1447 
1448 	return rc;
1449 }
1450 
1451 /**
1452  * fcoe_xmit() - Transmit a FCoE frame
1453  * @lport: The local port that the frame is to be transmitted for
1454  * @fp:	   The frame to be transmitted
1455  *
1456  * Return: 0 for success
1457  */
1458 int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1459 {
1460 	int wlen;
1461 	u32 crc;
1462 	struct ethhdr *eh;
1463 	struct fcoe_crc_eof *cp;
1464 	struct sk_buff *skb;
1465 	struct fcoe_dev_stats *stats;
1466 	struct fc_frame_header *fh;
1467 	unsigned int hlen;		/* header length implies the version */
1468 	unsigned int tlen;		/* trailer length */
1469 	unsigned int elen;		/* eth header, may include vlan */
1470 	struct fcoe_port *port = lport_priv(lport);
1471 	struct fcoe_interface *fcoe = port->priv;
1472 	u8 sof, eof;
1473 	struct fcoe_hdr *hp;
1474 
1475 	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1476 
1477 	fh = fc_frame_header_get(fp);
1478 	skb = fp_skb(fp);
1479 	wlen = skb->len / FCOE_WORD_TO_BYTE;
1480 
1481 	if (!lport->link_up) {
1482 		kfree_skb(skb);
1483 		return 0;
1484 	}
1485 
1486 	if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1487 	    fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
1488 		return 0;
1489 
1490 	sof = fr_sof(fp);
1491 	eof = fr_eof(fp);
1492 
1493 	elen = sizeof(struct ethhdr);
1494 	hlen = sizeof(struct fcoe_hdr);
1495 	tlen = sizeof(struct fcoe_crc_eof);
1496 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1497 
1498 	/* crc offload */
1499 	if (likely(lport->crc_offload)) {
1500 		skb->ip_summed = CHECKSUM_PARTIAL;
1501 		skb->csum_start = skb_headroom(skb);
1502 		skb->csum_offset = skb->len;
1503 		crc = 0;
1504 	} else {
1505 		skb->ip_summed = CHECKSUM_NONE;
1506 		crc = fcoe_fc_crc(fp);
1507 	}
1508 
1509 	/* copy port crc and eof to the skb buff */
1510 	if (skb_is_nonlinear(skb)) {
1511 		skb_frag_t *frag;
1512 		if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
1513 			kfree_skb(skb);
1514 			return -ENOMEM;
1515 		}
1516 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1517 		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1518 			+ frag->page_offset;
1519 	} else {
1520 		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1521 	}
1522 
1523 	memset(cp, 0, sizeof(*cp));
1524 	cp->fcoe_eof = eof;
1525 	cp->fcoe_crc32 = cpu_to_le32(~crc);
1526 
1527 	if (skb_is_nonlinear(skb)) {
1528 		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1529 		cp = NULL;
1530 	}
1531 
1532 	/* adjust skb network/transport offsets to match mac/fcoe/port */
1533 	skb_push(skb, elen + hlen);
1534 	skb_reset_mac_header(skb);
1535 	skb_reset_network_header(skb);
1536 	skb->mac_len = elen;
1537 	skb->protocol = htons(ETH_P_FCOE);
1538 	skb->dev = fcoe->netdev;
1539 
1540 	/* fill up mac and fcoe headers */
1541 	eh = eth_hdr(skb);
1542 	eh->h_proto = htons(ETH_P_FCOE);
1543 	memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1544 	if (fcoe->ctlr.map_dest)
1545 		memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1546 
1547 	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1548 		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1549 	else
1550 		memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1551 
1552 	hp = (struct fcoe_hdr *)(eh + 1);
1553 	memset(hp, 0, sizeof(*hp));
1554 	if (FC_FCOE_VER)
1555 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1556 	hp->fcoe_sof = sof;
1557 
1558 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1559 	if (lport->seq_offload && fr_max_payload(fp)) {
1560 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1561 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1562 	} else {
1563 		skb_shinfo(skb)->gso_type = 0;
1564 		skb_shinfo(skb)->gso_size = 0;
1565 	}
1566 	/* update tx stats: regardless if LLD fails */
1567 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1568 	stats->TxFrames++;
1569 	stats->TxWords += wlen;
1570 	put_cpu();
1571 
1572 	/* send down to lld */
1573 	fr_dev(fp) = lport;
1574 	fcoe_port_send(port, skb);
1575 	return 0;
1576 }
1577 
1578 /**
1579  * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
1580  * @skb: The completed skb (argument required by destructor)
1581  */
1582 static void fcoe_percpu_flush_done(struct sk_buff *skb)
1583 {
1584 	complete(&fcoe_flush_completion);
1585 }
1586 
1587 /**
1588  * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
1589  * @lport: The local port the frame was received on
1590  * @fp:	   The received frame
1591  *
1592  * Return: 0 on passing filtering checks
1593  */
1594 static inline int fcoe_filter_frames(struct fc_lport *lport,
1595 				     struct fc_frame *fp)
1596 {
1597 	struct fcoe_interface *fcoe;
1598 	struct fc_frame_header *fh;
1599 	struct sk_buff *skb = (struct sk_buff *)fp;
1600 	struct fcoe_dev_stats *stats;
1601 
1602 	/*
1603 	 * We only check CRC if no offload is available and if it is
1604 	 * it's solicited data, in which case, the FCP layer would
1605 	 * check it during the copy.
1606 	 */
1607 	if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1608 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1609 	else
1610 		fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1611 
1612 	fh = (struct fc_frame_header *) skb_transport_header(skb);
1613 	fh = fc_frame_header_get(fp);
1614 	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
1615 		return 0;
1616 
1617 	fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1618 	if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1619 	    ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1620 		FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1621 		return -EINVAL;
1622 	}
1623 
1624 	if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
1625 	    le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
1626 		fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1627 		return 0;
1628 	}
1629 
1630 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1631 	stats->InvalidCRCCount++;
1632 	if (stats->InvalidCRCCount < 5)
1633 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
1634 	return -EINVAL;
1635 }
1636 
1637 /**
1638  * fcoe_recv_frame() - process a single received frame
1639  * @skb: frame to process
1640  */
1641 static void fcoe_recv_frame(struct sk_buff *skb)
1642 {
1643 	u32 fr_len;
1644 	struct fc_lport *lport;
1645 	struct fcoe_rcv_info *fr;
1646 	struct fcoe_dev_stats *stats;
1647 	struct fcoe_crc_eof crc_eof;
1648 	struct fc_frame *fp;
1649 	struct fcoe_port *port;
1650 	struct fcoe_hdr *hp;
1651 
1652 	fr = fcoe_dev_from_skb(skb);
1653 	lport = fr->fr_dev;
1654 	if (unlikely(!lport)) {
1655 		if (skb->destructor != fcoe_percpu_flush_done)
1656 			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
1657 		kfree_skb(skb);
1658 		return;
1659 	}
1660 
1661 	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1662 			"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1663 			skb->len, skb->data_len,
1664 			skb->head, skb->data, skb_tail_pointer(skb),
1665 			skb_end_pointer(skb), skb->csum,
1666 			skb->dev ? skb->dev->name : "<NULL>");
1667 
1668 	port = lport_priv(lport);
1669 	if (skb_is_nonlinear(skb))
1670 		skb_linearize(skb);	/* not ideal */
1671 
1672 	/*
1673 	 * Frame length checks and setting up the header pointers
1674 	 * was done in fcoe_rcv already.
1675 	 */
1676 	hp = (struct fcoe_hdr *) skb_network_header(skb);
1677 
1678 	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1679 	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1680 		if (stats->ErrorFrames < 5)
1681 			printk(KERN_WARNING "fcoe: FCoE version "
1682 			       "mismatch: The frame has "
1683 			       "version %x, but the "
1684 			       "initiator supports version "
1685 			       "%x\n", FC_FCOE_DECAPS_VER(hp),
1686 			       FC_FCOE_VER);
1687 		goto drop;
1688 	}
1689 
1690 	skb_pull(skb, sizeof(struct fcoe_hdr));
1691 	fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1692 
1693 	stats->RxFrames++;
1694 	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1695 
1696 	fp = (struct fc_frame *)skb;
1697 	fc_frame_init(fp);
1698 	fr_dev(fp) = lport;
1699 	fr_sof(fp) = hp->fcoe_sof;
1700 
1701 	/* Copy out the CRC and EOF trailer for access */
1702 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof)))
1703 		goto drop;
1704 	fr_eof(fp) = crc_eof.fcoe_eof;
1705 	fr_crc(fp) = crc_eof.fcoe_crc32;
1706 	if (pskb_trim(skb, fr_len))
1707 		goto drop;
1708 
1709 	if (!fcoe_filter_frames(lport, fp)) {
1710 		put_cpu();
1711 		fc_exch_recv(lport, fp);
1712 		return;
1713 	}
1714 drop:
1715 	stats->ErrorFrames++;
1716 	put_cpu();
1717 	kfree_skb(skb);
1718 }
1719 
1720 /**
1721  * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
1722  * @arg: The per-CPU context
1723  *
1724  * Return: 0 for success
1725  */
1726 int fcoe_percpu_receive_thread(void *arg)
1727 {
1728 	struct fcoe_percpu_s *p = arg;
1729 	struct sk_buff *skb;
1730 
1731 	set_user_nice(current, -20);
1732 
1733 	while (!kthread_should_stop()) {
1734 
1735 		spin_lock_bh(&p->fcoe_rx_list.lock);
1736 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1737 			set_current_state(TASK_INTERRUPTIBLE);
1738 			spin_unlock_bh(&p->fcoe_rx_list.lock);
1739 			schedule();
1740 			set_current_state(TASK_RUNNING);
1741 			if (kthread_should_stop())
1742 				return 0;
1743 			spin_lock_bh(&p->fcoe_rx_list.lock);
1744 		}
1745 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1746 		fcoe_recv_frame(skb);
1747 	}
1748 	return 0;
1749 }
1750 
1751 /**
1752  * fcoe_dev_setup() - Setup the link change notification interface
1753  */
1754 static void fcoe_dev_setup(void)
1755 {
1756 	register_netdevice_notifier(&fcoe_notifier);
1757 }
1758 
1759 /**
1760  * fcoe_dev_cleanup() - Cleanup the link change notification interface
1761  */
1762 static void fcoe_dev_cleanup(void)
1763 {
1764 	unregister_netdevice_notifier(&fcoe_notifier);
1765 }
1766 
1767 /**
1768  * fcoe_device_notification() - Handler for net device events
1769  * @notifier: The context of the notification
1770  * @event:    The type of event
1771  * @ptr:      The net device that the event was on
1772  *
1773  * This function is called by the Ethernet driver in case of link change event.
1774  *
1775  * Returns: 0 for success
1776  */
1777 static int fcoe_device_notification(struct notifier_block *notifier,
1778 				    ulong event, void *ptr)
1779 {
1780 	struct fc_lport *lport = NULL;
1781 	struct net_device *netdev = ptr;
1782 	struct fcoe_interface *fcoe;
1783 	struct fcoe_port *port;
1784 	struct fcoe_dev_stats *stats;
1785 	u32 link_possible = 1;
1786 	u32 mfs;
1787 	int rc = NOTIFY_OK;
1788 
1789 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1790 		if (fcoe->netdev == netdev) {
1791 			lport = fcoe->ctlr.lp;
1792 			break;
1793 		}
1794 	}
1795 	if (!lport) {
1796 		rc = NOTIFY_DONE;
1797 		goto out;
1798 	}
1799 
1800 	switch (event) {
1801 	case NETDEV_DOWN:
1802 	case NETDEV_GOING_DOWN:
1803 		link_possible = 0;
1804 		break;
1805 	case NETDEV_UP:
1806 	case NETDEV_CHANGE:
1807 		break;
1808 	case NETDEV_CHANGEMTU:
1809 		if (netdev->features & NETIF_F_FCOE_MTU)
1810 			break;
1811 		mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1812 				     sizeof(struct fcoe_crc_eof));
1813 		if (mfs >= FC_MIN_MAX_FRAME)
1814 			fc_set_mfs(lport, mfs);
1815 		break;
1816 	case NETDEV_REGISTER:
1817 		break;
1818 	case NETDEV_UNREGISTER:
1819 		list_del(&fcoe->list);
1820 		port = lport_priv(fcoe->ctlr.lp);
1821 		queue_work(fcoe_wq, &port->destroy_work);
1822 		goto out;
1823 		break;
1824 	case NETDEV_FEAT_CHANGE:
1825 		fcoe_netdev_features_change(lport, netdev);
1826 		break;
1827 	default:
1828 		FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1829 				"from netdev netlink\n", event);
1830 	}
1831 
1832 	fcoe_link_speed_update(lport);
1833 
1834 	if (link_possible && !fcoe_link_ok(lport))
1835 		fcoe_ctlr_link_up(&fcoe->ctlr);
1836 	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1837 		stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1838 		stats->LinkFailureCount++;
1839 		put_cpu();
1840 		fcoe_clean_pending_queue(lport);
1841 	}
1842 out:
1843 	return rc;
1844 }
1845 
1846 /**
1847  * fcoe_disable() - Disables a FCoE interface
1848  * @netdev  : The net_device object the Ethernet interface to create on
1849  *
1850  * Called from fcoe transport.
1851  *
1852  * Returns: 0 for success
1853  */
1854 static int fcoe_disable(struct net_device *netdev)
1855 {
1856 	struct fcoe_interface *fcoe;
1857 	int rc = 0;
1858 
1859 	mutex_lock(&fcoe_config_mutex);
1860 
1861 	rtnl_lock();
1862 	fcoe = fcoe_hostlist_lookup_port(netdev);
1863 	rtnl_unlock();
1864 
1865 	if (fcoe) {
1866 		fcoe_ctlr_link_down(&fcoe->ctlr);
1867 		fcoe_clean_pending_queue(fcoe->ctlr.lp);
1868 	} else
1869 		rc = -ENODEV;
1870 
1871 	mutex_unlock(&fcoe_config_mutex);
1872 	return rc;
1873 }
1874 
1875 /**
1876  * fcoe_enable() - Enables a FCoE interface
1877  * @netdev  : The net_device object the Ethernet interface to create on
1878  *
1879  * Called from fcoe transport.
1880  *
1881  * Returns: 0 for success
1882  */
1883 static int fcoe_enable(struct net_device *netdev)
1884 {
1885 	struct fcoe_interface *fcoe;
1886 	int rc = 0;
1887 
1888 	mutex_lock(&fcoe_config_mutex);
1889 	rtnl_lock();
1890 	fcoe = fcoe_hostlist_lookup_port(netdev);
1891 	rtnl_unlock();
1892 
1893 	if (!fcoe)
1894 		rc = -ENODEV;
1895 	else if (!fcoe_link_ok(fcoe->ctlr.lp))
1896 		fcoe_ctlr_link_up(&fcoe->ctlr);
1897 
1898 	mutex_unlock(&fcoe_config_mutex);
1899 	return rc;
1900 }
1901 
1902 /**
1903  * fcoe_destroy() - Destroy a FCoE interface
1904  * @netdev  : The net_device object the Ethernet interface to create on
1905  *
1906  * Called from fcoe transport
1907  *
1908  * Returns: 0 for success
1909  */
1910 static int fcoe_destroy(struct net_device *netdev)
1911 {
1912 	struct fcoe_interface *fcoe;
1913 	struct fc_lport *lport;
1914 	struct fcoe_port *port;
1915 	int rc = 0;
1916 
1917 	mutex_lock(&fcoe_config_mutex);
1918 	rtnl_lock();
1919 	fcoe = fcoe_hostlist_lookup_port(netdev);
1920 	if (!fcoe) {
1921 		rc = -ENODEV;
1922 		goto out_nodev;
1923 	}
1924 	lport = fcoe->ctlr.lp;
1925 	port = lport_priv(lport);
1926 	list_del(&fcoe->list);
1927 	queue_work(fcoe_wq, &port->destroy_work);
1928 out_nodev:
1929 	rtnl_unlock();
1930 	mutex_unlock(&fcoe_config_mutex);
1931 	return rc;
1932 }
1933 
1934 /**
1935  * fcoe_destroy_work() - Destroy a FCoE port in a deferred work context
1936  * @work: Handle to the FCoE port to be destroyed
1937  */
1938 static void fcoe_destroy_work(struct work_struct *work)
1939 {
1940 	struct fcoe_port *port;
1941 	struct fcoe_interface *fcoe;
1942 	int npiv = 0;
1943 
1944 	port = container_of(work, struct fcoe_port, destroy_work);
1945 	mutex_lock(&fcoe_config_mutex);
1946 
1947 	/* set if this is an NPIV port */
1948 	npiv = port->lport->vport ? 1 : 0;
1949 
1950 	fcoe = port->priv;
1951 	fcoe_if_destroy(port->lport);
1952 
1953 	/* Do not tear down the fcoe interface for NPIV port */
1954 	if (!npiv) {
1955 		rtnl_lock();
1956 		fcoe_interface_cleanup(fcoe);
1957 		rtnl_unlock();
1958 	}
1959 
1960 	mutex_unlock(&fcoe_config_mutex);
1961 }
1962 
1963 /**
1964  * fcoe_match() - Check if the FCoE is supported on the given netdevice
1965  * @netdev  : The net_device object the Ethernet interface to create on
1966  *
1967  * Called from fcoe transport.
1968  *
1969  * Returns: always returns true as this is the default FCoE transport,
1970  * i.e., support all netdevs.
1971  */
1972 static bool fcoe_match(struct net_device *netdev)
1973 {
1974 	return true;
1975 }
1976 
1977 /**
1978  * fcoe_create() - Create a fcoe interface
1979  * @netdev  : The net_device object the Ethernet interface to create on
1980  * @fip_mode: The FIP mode for this creation
1981  *
1982  * Called from fcoe transport
1983  *
1984  * Returns: 0 for success
1985  */
1986 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
1987 {
1988 	int rc = 0;
1989 	struct fcoe_interface *fcoe;
1990 	struct fc_lport *lport;
1991 
1992 	mutex_lock(&fcoe_config_mutex);
1993 	rtnl_lock();
1994 
1995 	/* look for existing lport */
1996 	if (fcoe_hostlist_lookup(netdev)) {
1997 		rc = -EEXIST;
1998 		goto out_nodev;
1999 	}
2000 
2001 	fcoe = fcoe_interface_create(netdev, fip_mode);
2002 	if (IS_ERR(fcoe)) {
2003 		rc = PTR_ERR(fcoe);
2004 		goto out_nodev;
2005 	}
2006 
2007 	lport = fcoe_if_create(fcoe, &netdev->dev, 0);
2008 	if (IS_ERR(lport)) {
2009 		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2010 		       netdev->name);
2011 		rc = -EIO;
2012 		fcoe_interface_cleanup(fcoe);
2013 		goto out_nodev;
2014 	}
2015 
2016 	/* Make this the "master" N_Port */
2017 	fcoe->ctlr.lp = lport;
2018 
2019 	/* add to lports list */
2020 	fcoe_hostlist_add(lport);
2021 
2022 	/* start FIP Discovery and FLOGI */
2023 	lport->boot_time = jiffies;
2024 	fc_fabric_login(lport);
2025 	if (!fcoe_link_ok(lport))
2026 		fcoe_ctlr_link_up(&fcoe->ctlr);
2027 
2028 out_nodev:
2029 	rtnl_unlock();
2030 	mutex_unlock(&fcoe_config_mutex);
2031 	return rc;
2032 }
2033 
2034 /**
2035  * fcoe_link_speed_update() - Update the supported and actual link speeds
2036  * @lport: The local port to update speeds for
2037  *
2038  * Returns: 0 if the ethtool query was successful
2039  *          -1 if the ethtool query failed
2040  */
2041 int fcoe_link_speed_update(struct fc_lport *lport)
2042 {
2043 	struct net_device *netdev = fcoe_netdev(lport);
2044 	struct ethtool_cmd ecmd;
2045 
2046 	if (!dev_ethtool_get_settings(netdev, &ecmd)) {
2047 		lport->link_supported_speeds &=
2048 			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2049 		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2050 				      SUPPORTED_1000baseT_Full))
2051 			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
2052 		if (ecmd.supported & SUPPORTED_10000baseT_Full)
2053 			lport->link_supported_speeds |=
2054 				FC_PORTSPEED_10GBIT;
2055 		switch (ethtool_cmd_speed(&ecmd)) {
2056 		case SPEED_1000:
2057 			lport->link_speed = FC_PORTSPEED_1GBIT;
2058 			break;
2059 		case SPEED_10000:
2060 			lport->link_speed = FC_PORTSPEED_10GBIT;
2061 			break;
2062 		}
2063 		return 0;
2064 	}
2065 	return -1;
2066 }
2067 
2068 /**
2069  * fcoe_link_ok() - Check if the link is OK for a local port
2070  * @lport: The local port to check link on
2071  *
2072  * Returns: 0 if link is UP and OK, -1 if not
2073  *
2074  */
2075 int fcoe_link_ok(struct fc_lport *lport)
2076 {
2077 	struct net_device *netdev = fcoe_netdev(lport);
2078 
2079 	if (netif_oper_up(netdev))
2080 		return 0;
2081 	return -1;
2082 }
2083 
2084 /**
2085  * fcoe_percpu_clean() - Clear all pending skbs for an local port
2086  * @lport: The local port whose skbs are to be cleared
2087  *
2088  * Must be called with fcoe_create_mutex held to single-thread completion.
2089  *
2090  * This flushes the pending skbs by adding a new skb to each queue and
2091  * waiting until they are all freed.  This assures us that not only are
2092  * there no packets that will be handled by the lport, but also that any
2093  * threads already handling packet have returned.
2094  */
2095 void fcoe_percpu_clean(struct fc_lport *lport)
2096 {
2097 	struct fcoe_percpu_s *pp;
2098 	struct fcoe_rcv_info *fr;
2099 	struct sk_buff_head *list;
2100 	struct sk_buff *skb, *next;
2101 	struct sk_buff *head;
2102 	unsigned int cpu;
2103 
2104 	for_each_possible_cpu(cpu) {
2105 		pp = &per_cpu(fcoe_percpu, cpu);
2106 		spin_lock_bh(&pp->fcoe_rx_list.lock);
2107 		list = &pp->fcoe_rx_list;
2108 		head = list->next;
2109 		for (skb = head; skb != (struct sk_buff *)list;
2110 		     skb = next) {
2111 			next = skb->next;
2112 			fr = fcoe_dev_from_skb(skb);
2113 			if (fr->fr_dev == lport) {
2114 				__skb_unlink(skb, list);
2115 				kfree_skb(skb);
2116 			}
2117 		}
2118 
2119 		if (!pp->thread || !cpu_online(cpu)) {
2120 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2121 			continue;
2122 		}
2123 
2124 		skb = dev_alloc_skb(0);
2125 		if (!skb) {
2126 			spin_unlock_bh(&pp->fcoe_rx_list.lock);
2127 			continue;
2128 		}
2129 		skb->destructor = fcoe_percpu_flush_done;
2130 
2131 		__skb_queue_tail(&pp->fcoe_rx_list, skb);
2132 		if (pp->fcoe_rx_list.qlen == 1)
2133 			wake_up_process(pp->thread);
2134 		spin_unlock_bh(&pp->fcoe_rx_list.lock);
2135 
2136 		wait_for_completion(&fcoe_flush_completion);
2137 	}
2138 }
2139 
2140 /**
2141  * fcoe_reset() - Reset a local port
2142  * @shost: The SCSI host associated with the local port to be reset
2143  *
2144  * Returns: Always 0 (return value required by FC transport template)
2145  */
2146 int fcoe_reset(struct Scsi_Host *shost)
2147 {
2148 	struct fc_lport *lport = shost_priv(shost);
2149 	struct fcoe_port *port = lport_priv(lport);
2150 	struct fcoe_interface *fcoe = port->priv;
2151 
2152 	fcoe_ctlr_link_down(&fcoe->ctlr);
2153 	fcoe_clean_pending_queue(fcoe->ctlr.lp);
2154 	if (!fcoe_link_ok(fcoe->ctlr.lp))
2155 		fcoe_ctlr_link_up(&fcoe->ctlr);
2156 	return 0;
2157 }
2158 
2159 /**
2160  * fcoe_hostlist_lookup_port() - Find the FCoE interface associated with a net device
2161  * @netdev: The net device used as a key
2162  *
2163  * Locking: Must be called with the RNL mutex held.
2164  *
2165  * Returns: NULL or the FCoE interface
2166  */
2167 static struct fcoe_interface *
2168 fcoe_hostlist_lookup_port(const struct net_device *netdev)
2169 {
2170 	struct fcoe_interface *fcoe;
2171 
2172 	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
2173 		if (fcoe->netdev == netdev)
2174 			return fcoe;
2175 	}
2176 	return NULL;
2177 }
2178 
2179 /**
2180  * fcoe_hostlist_lookup() - Find the local port associated with a
2181  *			    given net device
2182  * @netdev: The netdevice used as a key
2183  *
2184  * Locking: Must be called with the RTNL mutex held
2185  *
2186  * Returns: NULL or the local port
2187  */
2188 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2189 {
2190 	struct fcoe_interface *fcoe;
2191 
2192 	fcoe = fcoe_hostlist_lookup_port(netdev);
2193 	return (fcoe) ? fcoe->ctlr.lp : NULL;
2194 }
2195 
2196 /**
2197  * fcoe_hostlist_add() - Add the FCoE interface identified by a local
2198  *			 port to the hostlist
2199  * @lport: The local port that identifies the FCoE interface to be added
2200  *
2201  * Locking: must be called with the RTNL mutex held
2202  *
2203  * Returns: 0 for success
2204  */
2205 static int fcoe_hostlist_add(const struct fc_lport *lport)
2206 {
2207 	struct fcoe_interface *fcoe;
2208 	struct fcoe_port *port;
2209 
2210 	fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
2211 	if (!fcoe) {
2212 		port = lport_priv(lport);
2213 		fcoe = port->priv;
2214 		list_add_tail(&fcoe->list, &fcoe_hostlist);
2215 	}
2216 	return 0;
2217 }
2218 
2219 
2220 static struct fcoe_transport fcoe_sw_transport = {
2221 	.name = {FCOE_TRANSPORT_DEFAULT},
2222 	.attached = false,
2223 	.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
2224 	.match = fcoe_match,
2225 	.create = fcoe_create,
2226 	.destroy = fcoe_destroy,
2227 	.enable = fcoe_enable,
2228 	.disable = fcoe_disable,
2229 };
2230 
2231 /**
2232  * fcoe_init() - Initialize fcoe.ko
2233  *
2234  * Returns: 0 on success, or a negative value on failure
2235  */
2236 static int __init fcoe_init(void)
2237 {
2238 	struct fcoe_percpu_s *p;
2239 	unsigned int cpu;
2240 	int rc = 0;
2241 
2242 	fcoe_wq = alloc_workqueue("fcoe", 0, 0);
2243 	if (!fcoe_wq)
2244 		return -ENOMEM;
2245 
2246 	/* register as a fcoe transport */
2247 	rc = fcoe_transport_attach(&fcoe_sw_transport);
2248 	if (rc) {
2249 		printk(KERN_ERR "failed to register an fcoe transport, check "
2250 			"if libfcoe is loaded\n");
2251 		return rc;
2252 	}
2253 
2254 	mutex_lock(&fcoe_config_mutex);
2255 
2256 	for_each_possible_cpu(cpu) {
2257 		p = &per_cpu(fcoe_percpu, cpu);
2258 		skb_queue_head_init(&p->fcoe_rx_list);
2259 	}
2260 
2261 	for_each_online_cpu(cpu)
2262 		fcoe_percpu_thread_create(cpu);
2263 
2264 	/* Initialize per CPU interrupt thread */
2265 	rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
2266 	if (rc)
2267 		goto out_free;
2268 
2269 	/* Setup link change notification */
2270 	fcoe_dev_setup();
2271 
2272 	rc = fcoe_if_init();
2273 	if (rc)
2274 		goto out_free;
2275 
2276 	mutex_unlock(&fcoe_config_mutex);
2277 	return 0;
2278 
2279 out_free:
2280 	for_each_online_cpu(cpu) {
2281 		fcoe_percpu_thread_destroy(cpu);
2282 	}
2283 	mutex_unlock(&fcoe_config_mutex);
2284 	destroy_workqueue(fcoe_wq);
2285 	return rc;
2286 }
2287 module_init(fcoe_init);
2288 
2289 /**
2290  * fcoe_exit() - Clean up fcoe.ko
2291  *
2292  * Returns: 0 on success or a  negative value on failure
2293  */
2294 static void __exit fcoe_exit(void)
2295 {
2296 	struct fcoe_interface *fcoe, *tmp;
2297 	struct fcoe_port *port;
2298 	unsigned int cpu;
2299 
2300 	mutex_lock(&fcoe_config_mutex);
2301 
2302 	fcoe_dev_cleanup();
2303 
2304 	/* releases the associated fcoe hosts */
2305 	rtnl_lock();
2306 	list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2307 		list_del(&fcoe->list);
2308 		port = lport_priv(fcoe->ctlr.lp);
2309 		queue_work(fcoe_wq, &port->destroy_work);
2310 	}
2311 	rtnl_unlock();
2312 
2313 	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2314 
2315 	for_each_online_cpu(cpu)
2316 		fcoe_percpu_thread_destroy(cpu);
2317 
2318 	mutex_unlock(&fcoe_config_mutex);
2319 
2320 	/*
2321 	 * destroy_work's may be chained but destroy_workqueue()
2322 	 * can take care of them. Just kill the fcoe_wq.
2323 	 */
2324 	destroy_workqueue(fcoe_wq);
2325 
2326 	/*
2327 	 * Detaching from the scsi transport must happen after all
2328 	 * destroys are done on the fcoe_wq. destroy_workqueue will
2329 	 * enusre the fcoe_wq is flushed.
2330 	 */
2331 	fcoe_if_exit();
2332 
2333 	/* detach from fcoe transport */
2334 	fcoe_transport_detach(&fcoe_sw_transport);
2335 }
2336 module_exit(fcoe_exit);
2337 
2338 /**
2339  * fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
2340  * @seq: active sequence in the FLOGI or FDISC exchange
2341  * @fp: response frame, or error encoded in a pointer (timeout)
2342  * @arg: pointer the the fcoe_ctlr structure
2343  *
2344  * This handles MAC address management for FCoE, then passes control on to
2345  * the libfc FLOGI response handler.
2346  */
2347 static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2348 {
2349 	struct fcoe_ctlr *fip = arg;
2350 	struct fc_exch *exch = fc_seq_exch(seq);
2351 	struct fc_lport *lport = exch->lp;
2352 	u8 *mac;
2353 
2354 	if (IS_ERR(fp))
2355 		goto done;
2356 
2357 	mac = fr_cb(fp)->granted_mac;
2358 	if (is_zero_ether_addr(mac)) {
2359 		/* pre-FIP */
2360 		if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
2361 			fc_frame_free(fp);
2362 			return;
2363 		}
2364 	}
2365 	fcoe_update_src_mac(lport, mac);
2366 done:
2367 	fc_lport_flogi_resp(seq, fp, lport);
2368 }
2369 
2370 /**
2371  * fcoe_logo_resp() - FCoE specific LOGO response handler
2372  * @seq: active sequence in the LOGO exchange
2373  * @fp: response frame, or error encoded in a pointer (timeout)
2374  * @arg: pointer the the fcoe_ctlr structure
2375  *
2376  * This handles MAC address management for FCoE, then passes control on to
2377  * the libfc LOGO response handler.
2378  */
2379 static void fcoe_logo_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
2380 {
2381 	struct fc_lport *lport = arg;
2382 	static u8 zero_mac[ETH_ALEN] = { 0 };
2383 
2384 	if (!IS_ERR(fp))
2385 		fcoe_update_src_mac(lport, zero_mac);
2386 	fc_lport_logo_resp(seq, fp, lport);
2387 }
2388 
2389 /**
2390  * fcoe_elsct_send - FCoE specific ELS handler
2391  *
2392  * This does special case handling of FIP encapsualted ELS exchanges for FCoE,
2393  * using FCoE specific response handlers and passing the FIP controller as
2394  * the argument (the lport is still available from the exchange).
2395  *
2396  * Most of the work here is just handed off to the libfc routine.
2397  */
2398 static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2399 				      struct fc_frame *fp, unsigned int op,
2400 				      void (*resp)(struct fc_seq *,
2401 						   struct fc_frame *,
2402 						   void *),
2403 				      void *arg, u32 timeout)
2404 {
2405 	struct fcoe_port *port = lport_priv(lport);
2406 	struct fcoe_interface *fcoe = port->priv;
2407 	struct fcoe_ctlr *fip = &fcoe->ctlr;
2408 	struct fc_frame_header *fh = fc_frame_header_get(fp);
2409 
2410 	switch (op) {
2411 	case ELS_FLOGI:
2412 	case ELS_FDISC:
2413 		if (lport->point_to_multipoint)
2414 			break;
2415 		return fc_elsct_send(lport, did, fp, op, fcoe_flogi_resp,
2416 				     fip, timeout);
2417 	case ELS_LOGO:
2418 		/* only hook onto fabric logouts, not port logouts */
2419 		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
2420 			break;
2421 		return fc_elsct_send(lport, did, fp, op, fcoe_logo_resp,
2422 				     lport, timeout);
2423 	}
2424 	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
2425 }
2426 
2427 /**
2428  * fcoe_vport_create() - create an fc_host/scsi_host for a vport
2429  * @vport: fc_vport object to create a new fc_host for
2430  * @disabled: start the new fc_host in a disabled state by default?
2431  *
2432  * Returns: 0 for success
2433  */
2434 static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2435 {
2436 	struct Scsi_Host *shost = vport_to_shost(vport);
2437 	struct fc_lport *n_port = shost_priv(shost);
2438 	struct fcoe_port *port = lport_priv(n_port);
2439 	struct fcoe_interface *fcoe = port->priv;
2440 	struct net_device *netdev = fcoe->netdev;
2441 	struct fc_lport *vn_port;
2442 	int rc;
2443 	char buf[32];
2444 
2445 	rc = fcoe_validate_vport_create(vport);
2446 	if (rc) {
2447 		wwn_to_str(vport->port_name, buf, sizeof(buf));
2448 		printk(KERN_ERR "fcoe: Failed to create vport, "
2449 			"WWPN (0x%s) already exists\n",
2450 			buf);
2451 		return rc;
2452 	}
2453 
2454 	mutex_lock(&fcoe_config_mutex);
2455 	vn_port = fcoe_if_create(fcoe, &vport->dev, 1);
2456 	mutex_unlock(&fcoe_config_mutex);
2457 
2458 	if (IS_ERR(vn_port)) {
2459 		printk(KERN_ERR "fcoe: fcoe_vport_create(%s) failed\n",
2460 		       netdev->name);
2461 		return -EIO;
2462 	}
2463 
2464 	if (disabled) {
2465 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2466 	} else {
2467 		vn_port->boot_time = jiffies;
2468 		fc_fabric_login(vn_port);
2469 		fc_vport_setlink(vn_port);
2470 	}
2471 	return 0;
2472 }
2473 
2474 /**
2475  * fcoe_vport_destroy() - destroy the fc_host/scsi_host for a vport
2476  * @vport: fc_vport object that is being destroyed
2477  *
2478  * Returns: 0 for success
2479  */
2480 static int fcoe_vport_destroy(struct fc_vport *vport)
2481 {
2482 	struct Scsi_Host *shost = vport_to_shost(vport);
2483 	struct fc_lport *n_port = shost_priv(shost);
2484 	struct fc_lport *vn_port = vport->dd_data;
2485 	struct fcoe_port *port = lport_priv(vn_port);
2486 
2487 	mutex_lock(&n_port->lp_mutex);
2488 	list_del(&vn_port->list);
2489 	mutex_unlock(&n_port->lp_mutex);
2490 	queue_work(fcoe_wq, &port->destroy_work);
2491 	return 0;
2492 }
2493 
2494 /**
2495  * fcoe_vport_disable() - change vport state
2496  * @vport: vport to bring online/offline
2497  * @disable: should the vport be disabled?
2498  */
2499 static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
2500 {
2501 	struct fc_lport *lport = vport->dd_data;
2502 
2503 	if (disable) {
2504 		fc_vport_set_state(vport, FC_VPORT_DISABLED);
2505 		fc_fabric_logoff(lport);
2506 	} else {
2507 		lport->boot_time = jiffies;
2508 		fc_fabric_login(lport);
2509 		fc_vport_setlink(lport);
2510 	}
2511 
2512 	return 0;
2513 }
2514 
2515 /**
2516  * fcoe_vport_set_symbolic_name() - append vport string to symbolic name
2517  * @vport: fc_vport with a new symbolic name string
2518  *
2519  * After generating a new symbolic name string, a new RSPN_ID request is
2520  * sent to the name server.  There is no response handler, so if it fails
2521  * for some reason it will not be retried.
2522  */
2523 static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2524 {
2525 	struct fc_lport *lport = vport->dd_data;
2526 	struct fc_frame *fp;
2527 	size_t len;
2528 
2529 	snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
2530 		 "%s v%s over %s : %s", FCOE_NAME, FCOE_VERSION,
2531 		 fcoe_netdev(lport)->name, vport->symbolic_name);
2532 
2533 	if (lport->state != LPORT_ST_READY)
2534 		return;
2535 
2536 	len = strnlen(fc_host_symbolic_name(lport->host), 255);
2537 	fp = fc_frame_alloc(lport,
2538 			    sizeof(struct fc_ct_hdr) +
2539 			    sizeof(struct fc_ns_rspn) + len);
2540 	if (!fp)
2541 		return;
2542 	lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RSPN_ID,
2543 			     NULL, NULL, 3 * lport->r_a_tov);
2544 }
2545 
2546 /**
2547  * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
2548  * @lport: the local port
2549  * @fc_lesb: the link error status block
2550  */
2551 static void fcoe_get_lesb(struct fc_lport *lport,
2552 			 struct fc_els_lesb *fc_lesb)
2553 {
2554 	unsigned int cpu;
2555 	u32 lfc, vlfc, mdac;
2556 	struct fcoe_dev_stats *devst;
2557 	struct fcoe_fc_els_lesb *lesb;
2558 	struct rtnl_link_stats64 temp;
2559 	struct net_device *netdev = fcoe_netdev(lport);
2560 
2561 	lfc = 0;
2562 	vlfc = 0;
2563 	mdac = 0;
2564 	lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
2565 	memset(lesb, 0, sizeof(*lesb));
2566 	for_each_possible_cpu(cpu) {
2567 		devst = per_cpu_ptr(lport->dev_stats, cpu);
2568 		lfc += devst->LinkFailureCount;
2569 		vlfc += devst->VLinkFailureCount;
2570 		mdac += devst->MissDiscAdvCount;
2571 	}
2572 	lesb->lesb_link_fail = htonl(lfc);
2573 	lesb->lesb_vlink_fail = htonl(vlfc);
2574 	lesb->lesb_miss_fka = htonl(mdac);
2575 	lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
2576 }
2577 
2578 /**
2579  * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2580  * @lport: the local port
2581  * @port_id: the port ID
2582  * @fp: the received frame, if any, that caused the port_id to be set.
2583  *
2584  * This routine handles the case where we received a FLOGI and are
2585  * entering point-to-point mode.  We need to call fcoe_ctlr_recv_flogi()
2586  * so it can set the non-mapped mode and gateway address.
2587  *
2588  * The FLOGI LS_ACC is handled by fcoe_flogi_resp().
2589  */
2590 static void fcoe_set_port_id(struct fc_lport *lport,
2591 			     u32 port_id, struct fc_frame *fp)
2592 {
2593 	struct fcoe_port *port = lport_priv(lport);
2594 	struct fcoe_interface *fcoe = port->priv;
2595 
2596 	if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2597 		fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2598 }
2599 
2600 /**
2601  * fcoe_validate_vport_create() - Validate a vport before creating it
2602  * @vport: NPIV port to be created
2603  *
2604  * This routine is meant to add validation for a vport before creating it
2605  * via fcoe_vport_create().
2606  * Current validations are:
2607  *      - WWPN supplied is unique for given lport
2608  *
2609  *
2610 */
2611 static int fcoe_validate_vport_create(struct fc_vport *vport)
2612 {
2613 	struct Scsi_Host *shost = vport_to_shost(vport);
2614 	struct fc_lport *n_port = shost_priv(shost);
2615 	struct fc_lport *vn_port;
2616 	int rc = 0;
2617 	char buf[32];
2618 
2619 	mutex_lock(&n_port->lp_mutex);
2620 
2621 	wwn_to_str(vport->port_name, buf, sizeof(buf));
2622 	/* Check if the wwpn is not same as that of the lport */
2623 	if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
2624 		FCOE_DBG("vport WWPN 0x%s is same as that of the "
2625 			"base port WWPN\n", buf);
2626 		rc = -EINVAL;
2627 		goto out;
2628 	}
2629 
2630 	/* Check if there is any existing vport with same wwpn */
2631 	list_for_each_entry(vn_port, &n_port->vports, list) {
2632 		if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
2633 			FCOE_DBG("vport with given WWPN 0x%s already "
2634 			"exists\n", buf);
2635 			rc = -EINVAL;
2636 			break;
2637 		}
2638 	}
2639 
2640 out:
2641 	mutex_unlock(&n_port->lp_mutex);
2642 
2643 	return rc;
2644 }
2645