xref: /linux/drivers/scsi/fcoe/fcoe.c (revision e27ecdd94d81e5bc3d1f68591701db5adb342f0d)
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/cpu.h>
30 #include <linux/fs.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsicam.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <net/rtnetlink.h>
38 
39 #include <scsi/fc/fc_encaps.h>
40 #include <scsi/fc/fc_fip.h>
41 
42 #include <scsi/libfc.h>
43 #include <scsi/fc_frame.h>
44 #include <scsi/libfcoe.h>
45 
46 #include "fcoe.h"
47 
48 static int debug_fcoe;
49 
50 MODULE_AUTHOR("Open-FCoE.org");
51 MODULE_DESCRIPTION("FCoE");
52 MODULE_LICENSE("GPL v2");
53 
54 /* fcoe host list */
55 LIST_HEAD(fcoe_hostlist);
56 DEFINE_RWLOCK(fcoe_hostlist_lock);
57 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
58 
59 /* Function Prototypes */
60 static int fcoe_reset(struct Scsi_Host *shost);
61 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
62 static int fcoe_rcv(struct sk_buff *, struct net_device *,
63 		    struct packet_type *, struct net_device *);
64 static int fcoe_percpu_receive_thread(void *arg);
65 static void fcoe_clean_pending_queue(struct fc_lport *lp);
66 static void fcoe_percpu_clean(struct fc_lport *lp);
67 static int fcoe_link_ok(struct fc_lport *lp);
68 
69 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
70 static int fcoe_hostlist_add(const struct fc_lport *);
71 static int fcoe_hostlist_remove(const struct fc_lport *);
72 
73 static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
74 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
75 static void fcoe_dev_setup(void);
76 static void fcoe_dev_cleanup(void);
77 
78 /* notification function from net device */
79 static struct notifier_block fcoe_notifier = {
80 	.notifier_call = fcoe_device_notification,
81 };
82 
83 static struct scsi_transport_template *scsi_transport_fcoe_sw;
84 
85 struct fc_function_template fcoe_transport_function = {
86 	.show_host_node_name = 1,
87 	.show_host_port_name = 1,
88 	.show_host_supported_classes = 1,
89 	.show_host_supported_fc4s = 1,
90 	.show_host_active_fc4s = 1,
91 	.show_host_maxframe_size = 1,
92 
93 	.show_host_port_id = 1,
94 	.show_host_supported_speeds = 1,
95 	.get_host_speed = fc_get_host_speed,
96 	.show_host_speed = 1,
97 	.show_host_port_type = 1,
98 	.get_host_port_state = fc_get_host_port_state,
99 	.show_host_port_state = 1,
100 	.show_host_symbolic_name = 1,
101 
102 	.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
103 	.show_rport_maxframe_size = 1,
104 	.show_rport_supported_classes = 1,
105 
106 	.show_host_fabric_name = 1,
107 	.show_starget_node_name = 1,
108 	.show_starget_port_name = 1,
109 	.show_starget_port_id = 1,
110 	.set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
111 	.show_rport_dev_loss_tmo = 1,
112 	.get_fc_host_stats = fc_get_host_stats,
113 	.issue_fc_host_lip = fcoe_reset,
114 
115 	.terminate_rport_io = fc_rport_terminate_io,
116 };
117 
118 static struct scsi_host_template fcoe_shost_template = {
119 	.module = THIS_MODULE,
120 	.name = "FCoE Driver",
121 	.proc_name = FCOE_NAME,
122 	.queuecommand = fc_queuecommand,
123 	.eh_abort_handler = fc_eh_abort,
124 	.eh_device_reset_handler = fc_eh_device_reset,
125 	.eh_host_reset_handler = fc_eh_host_reset,
126 	.slave_alloc = fc_slave_alloc,
127 	.change_queue_depth = fc_change_queue_depth,
128 	.change_queue_type = fc_change_queue_type,
129 	.this_id = -1,
130 	.cmd_per_lun = 32,
131 	.can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
132 	.use_clustering = ENABLE_CLUSTERING,
133 	.sg_tablesize = SG_ALL,
134 	.max_sectors = 0xffff,
135 };
136 
137 /**
138  * fcoe_fip_recv - handle a received FIP frame.
139  * @skb: the receive skb
140  * @dev: associated &net_device
141  * @ptype: the &packet_type structure which was used to register this handler.
142  * @orig_dev: original receive &net_device, in case @dev is a bond.
143  *
144  * Returns: 0 for success
145  */
146 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
147 			 struct packet_type *ptype,
148 			 struct net_device *orig_dev)
149 {
150 	struct fcoe_softc *fc;
151 
152 	fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
153 	fcoe_ctlr_recv(&fc->ctlr, skb);
154 	return 0;
155 }
156 
157 /**
158  * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
159  * @fip: FCoE controller.
160  * @skb: FIP Packet.
161  */
162 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
163 {
164 	skb->dev = fcoe_from_ctlr(fip)->real_dev;
165 	dev_queue_xmit(skb);
166 }
167 
168 /**
169  * fcoe_update_src_mac() - Update Ethernet MAC filters.
170  * @fip: FCoE controller.
171  * @old: Unicast MAC address to delete if the MAC is non-zero.
172  * @new: Unicast MAC address to add.
173  *
174  * Remove any previously-set unicast MAC filter.
175  * Add secondary FCoE MAC address filter for our OUI.
176  */
177 static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
178 {
179 	struct fcoe_softc *fc;
180 
181 	fc = fcoe_from_ctlr(fip);
182 	rtnl_lock();
183 	if (!is_zero_ether_addr(old))
184 		dev_unicast_delete(fc->real_dev, old);
185 	dev_unicast_add(fc->real_dev, new);
186 	rtnl_unlock();
187 }
188 
189 /**
190  * fcoe_lport_config() - sets up the fc_lport
191  * @lp: ptr to the fc_lport
192  *
193  * Returns: 0 for success
194  */
195 static int fcoe_lport_config(struct fc_lport *lp)
196 {
197 	lp->link_up = 0;
198 	lp->qfull = 0;
199 	lp->max_retry_count = 3;
200 	lp->max_rport_retry_count = 3;
201 	lp->e_d_tov = 2 * 1000;	/* FC-FS default */
202 	lp->r_a_tov = 2 * 2 * 1000;
203 	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
204 			      FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
205 
206 	fc_lport_init_stats(lp);
207 
208 	/* lport fc_lport related configuration */
209 	fc_lport_config(lp);
210 
211 	/* offload related configuration */
212 	lp->crc_offload = 0;
213 	lp->seq_offload = 0;
214 	lp->lro_enabled = 0;
215 	lp->lro_xid = 0;
216 	lp->lso_max = 0;
217 
218 	return 0;
219 }
220 
221 /**
222  * fcoe_netdev_cleanup() - clean up netdev configurations
223  * @fc: ptr to the fcoe_softc
224  */
225 void fcoe_netdev_cleanup(struct fcoe_softc *fc)
226 {
227 	u8 flogi_maddr[ETH_ALEN];
228 
229 	/* Don't listen for Ethernet packets anymore */
230 	dev_remove_pack(&fc->fcoe_packet_type);
231 	dev_remove_pack(&fc->fip_packet_type);
232 
233 	/* Delete secondary MAC addresses */
234 	rtnl_lock();
235 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
236 	dev_unicast_delete(fc->real_dev, flogi_maddr);
237 	if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
238 		dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr);
239 	if (fc->ctlr.spma)
240 		dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr);
241 	dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
242 	rtnl_unlock();
243 }
244 
245 /**
246  * fcoe_queue_timer() - fcoe queue timer
247  * @lp: the fc_lport pointer
248  *
249  * Calls fcoe_check_wait_queue on timeout
250  *
251  */
252 static void fcoe_queue_timer(ulong lp)
253 {
254 	fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
255 }
256 
257 /**
258  * fcoe_netdev_config() - Set up netdev for SW FCoE
259  * @lp : ptr to the fc_lport
260  * @netdev : ptr to the associated netdevice struct
261  *
262  * Must be called after fcoe_lport_config() as it will use lport mutex
263  *
264  * Returns : 0 for success
265  */
266 static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
267 {
268 	u32 mfs;
269 	u64 wwnn, wwpn;
270 	struct fcoe_softc *fc;
271 	u8 flogi_maddr[ETH_ALEN];
272 	struct netdev_hw_addr *ha;
273 
274 	/* Setup lport private data to point to fcoe softc */
275 	fc = lport_priv(lp);
276 	fc->ctlr.lp = lp;
277 	fc->real_dev = netdev;
278 	fc->phys_dev = netdev;
279 
280 	/* Require support for get_pauseparam ethtool op. */
281 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
282 		fc->phys_dev = vlan_dev_real_dev(netdev);
283 
284 	/* Do not support for bonding device */
285 	if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
286 	    (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
287 	    (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
288 		return -EOPNOTSUPP;
289 	}
290 
291 	/*
292 	 * Determine max frame size based on underlying device and optional
293 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
294 	 * will return 0, so do this first.
295 	 */
296 	mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
297 				   sizeof(struct fcoe_crc_eof));
298 	if (fc_set_mfs(lp, mfs))
299 		return -EINVAL;
300 
301 	/* offload features support */
302 	if (fc->real_dev->features & NETIF_F_SG)
303 		lp->sg_supp = 1;
304 
305 #ifdef NETIF_F_FCOE_CRC
306 	if (netdev->features & NETIF_F_FCOE_CRC) {
307 		lp->crc_offload = 1;
308 		printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
309 		       netdev->name);
310 	}
311 #endif
312 #ifdef NETIF_F_FSO
313 	if (netdev->features & NETIF_F_FSO) {
314 		lp->seq_offload = 1;
315 		lp->lso_max = netdev->gso_max_size;
316 		printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
317 		       netdev->name, lp->lso_max);
318 	}
319 #endif
320 	if (netdev->fcoe_ddp_xid) {
321 		lp->lro_enabled = 1;
322 		lp->lro_xid = netdev->fcoe_ddp_xid;
323 		printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
324 		       netdev->name, lp->lro_xid);
325 	}
326 	skb_queue_head_init(&fc->fcoe_pending_queue);
327 	fc->fcoe_pending_queue_active = 0;
328 	setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
329 
330 	/* look for SAN MAC address, if multiple SAN MACs exist, only
331 	 * use the first one for SPMA */
332 	rcu_read_lock();
333 	for_each_dev_addr(netdev, ha) {
334 		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
335 		    (is_valid_ether_addr(fc->ctlr.ctl_src_addr))) {
336 			memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
337 			fc->ctlr.spma = 1;
338 			break;
339 		}
340 	}
341 	rcu_read_unlock();
342 
343 	/* setup Source Mac Address */
344 	if (!fc->ctlr.spma)
345 		memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
346 		       fc->real_dev->addr_len);
347 
348 	wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
349 	fc_set_wwnn(lp, wwnn);
350 	/* XXX - 3rd arg needs to be vlan id */
351 	wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
352 	fc_set_wwpn(lp, wwpn);
353 
354 	/*
355 	 * Add FCoE MAC address as second unicast MAC address
356 	 * or enter promiscuous mode if not capable of listening
357 	 * for multiple unicast MACs.
358 	 */
359 	rtnl_lock();
360 	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
361 	dev_unicast_add(fc->real_dev, flogi_maddr);
362 	if (fc->ctlr.spma)
363 		dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr);
364 	dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
365 	rtnl_unlock();
366 
367 	/*
368 	 * setup the receive function from ethernet driver
369 	 * on the ethertype for the given device
370 	 */
371 	fc->fcoe_packet_type.func = fcoe_rcv;
372 	fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
373 	fc->fcoe_packet_type.dev = fc->real_dev;
374 	dev_add_pack(&fc->fcoe_packet_type);
375 
376 	fc->fip_packet_type.func = fcoe_fip_recv;
377 	fc->fip_packet_type.type = htons(ETH_P_FIP);
378 	fc->fip_packet_type.dev = fc->real_dev;
379 	dev_add_pack(&fc->fip_packet_type);
380 
381 	return 0;
382 }
383 
384 /**
385  * fcoe_shost_config() - Sets up fc_lport->host
386  * @lp : ptr to the fc_lport
387  * @shost : ptr to the associated scsi host
388  * @dev : device associated to scsi host
389  *
390  * Must be called after fcoe_lport_config() and fcoe_netdev_config()
391  *
392  * Returns : 0 for success
393  */
394 static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
395 				struct device *dev)
396 {
397 	int rc = 0;
398 
399 	/* lport scsi host config */
400 	lp->host = shost;
401 
402 	lp->host->max_lun = FCOE_MAX_LUN;
403 	lp->host->max_id = FCOE_MAX_FCP_TARGET;
404 	lp->host->max_channel = 0;
405 	lp->host->transportt = scsi_transport_fcoe_sw;
406 
407 	/* add the new host to the SCSI-ml */
408 	rc = scsi_add_host(lp->host, dev);
409 	if (rc) {
410 		FC_DBG("fcoe_shost_config:error on scsi_add_host\n");
411 		return rc;
412 	}
413 	sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
414 		FCOE_NAME, FCOE_VERSION,
415 		fcoe_netdev(lp)->name);
416 
417 	return 0;
418 }
419 
420 /**
421  * fcoe_em_config() - allocates em for this lport
422  * @lp: the port that em is to allocated for
423  *
424  * Returns : 0 on success
425  */
426 static inline int fcoe_em_config(struct fc_lport *lp)
427 {
428 	BUG_ON(lp->emp);
429 
430 	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
431 				    FCOE_MIN_XID, FCOE_MAX_XID);
432 	if (!lp->emp)
433 		return -ENOMEM;
434 
435 	return 0;
436 }
437 
438 /**
439  * fcoe_if_destroy() - FCoE software HBA tear-down function
440  * @netdev: ptr to the associated net_device
441  *
442  * Returns: 0 if link is OK for use by FCoE.
443  */
444 static int fcoe_if_destroy(struct net_device *netdev)
445 {
446 	struct fc_lport *lp = NULL;
447 	struct fcoe_softc *fc;
448 
449 	BUG_ON(!netdev);
450 
451 	printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n",
452 	       netdev->name);
453 
454 	lp = fcoe_hostlist_lookup(netdev);
455 	if (!lp)
456 		return -ENODEV;
457 
458 	fc = lport_priv(lp);
459 
460 	/* Logout of the fabric */
461 	fc_fabric_logoff(lp);
462 
463 	/* Remove the instance from fcoe's list */
464 	fcoe_hostlist_remove(lp);
465 
466 	/* clean up netdev configurations */
467 	fcoe_netdev_cleanup(fc);
468 
469 	/* tear-down the FCoE controller */
470 	fcoe_ctlr_destroy(&fc->ctlr);
471 
472 	/* Cleanup the fc_lport */
473 	fc_lport_destroy(lp);
474 	fc_fcp_destroy(lp);
475 
476 	/* Detach from the scsi-ml */
477 	fc_remove_host(lp->host);
478 	scsi_remove_host(lp->host);
479 
480 	/* There are no more rports or I/O, free the EM */
481 	if (lp->emp)
482 		fc_exch_mgr_free(lp->emp);
483 
484 	/* Free the per-CPU receive threads */
485 	fcoe_percpu_clean(lp);
486 
487 	/* Free existing skbs */
488 	fcoe_clean_pending_queue(lp);
489 
490 	/* Stop the timer */
491 	del_timer_sync(&fc->timer);
492 
493 	/* Free memory used by statistical counters */
494 	fc_lport_free_stats(lp);
495 
496 	/* Release the net_device and Scsi_Host */
497 	dev_put(fc->real_dev);
498 	scsi_host_put(lp->host);
499 
500 	return 0;
501 }
502 
503 /*
504  * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
505  * @lp:	the corresponding fc_lport
506  * @xid: the exchange id for this ddp transfer
507  * @sgl: the scatterlist describing this transfer
508  * @sgc: number of sg items
509  *
510  * Returns : 0 no ddp
511  */
512 static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
513 			     struct scatterlist *sgl, unsigned int sgc)
514 {
515 	struct net_device *n = fcoe_netdev(lp);
516 
517 	if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
518 		return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
519 
520 	return 0;
521 }
522 
523 /*
524  * fcoe_ddp_done - calls LLD's ddp_done through net_device
525  * @lp:	the corresponding fc_lport
526  * @xid: the exchange id for this ddp transfer
527  *
528  * Returns : the length of data that have been completed by ddp
529  */
530 static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
531 {
532 	struct net_device *n = fcoe_netdev(lp);
533 
534 	if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
535 		return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
536 	return 0;
537 }
538 
539 static struct libfc_function_template fcoe_libfc_fcn_templ = {
540 	.frame_send = fcoe_xmit,
541 	.ddp_setup = fcoe_ddp_setup,
542 	.ddp_done = fcoe_ddp_done,
543 };
544 
545 /**
546  * fcoe_if_create() - this function creates the fcoe interface
547  * @netdev: pointer the associated netdevice
548  *
549  * Creates fc_lport struct and scsi_host for lport, configures lport
550  * and starts fabric login.
551  *
552  * Returns : 0 on success
553  */
554 static int fcoe_if_create(struct net_device *netdev)
555 {
556 	int rc;
557 	struct fc_lport *lp = NULL;
558 	struct fcoe_softc *fc;
559 	struct Scsi_Host *shost;
560 
561 	BUG_ON(!netdev);
562 
563 	printk(KERN_DEBUG "fcoe_if_create:interface on %s\n",
564 	       netdev->name);
565 
566 	lp = fcoe_hostlist_lookup(netdev);
567 	if (lp)
568 		return -EEXIST;
569 
570 	shost = libfc_host_alloc(&fcoe_shost_template,
571 				 sizeof(struct fcoe_softc));
572 	if (!shost) {
573 		FC_DBG("Could not allocate host structure\n");
574 		return -ENOMEM;
575 	}
576 	lp = shost_priv(shost);
577 	fc = lport_priv(lp);
578 
579 	/* configure fc_lport, e.g., em */
580 	rc = fcoe_lport_config(lp);
581 	if (rc) {
582 		FC_DBG("Could not configure lport\n");
583 		goto out_host_put;
584 	}
585 
586 	/*
587 	 * Initialize FIP.
588 	 */
589 	fcoe_ctlr_init(&fc->ctlr);
590 	fc->ctlr.send = fcoe_fip_send;
591 	fc->ctlr.update_mac = fcoe_update_src_mac;
592 
593 	/* configure lport network properties */
594 	rc = fcoe_netdev_config(lp, netdev);
595 	if (rc) {
596 		FC_DBG("Could not configure netdev for the interface\n");
597 		goto out_netdev_cleanup;
598 	}
599 
600 	/* configure lport scsi host properties */
601 	rc = fcoe_shost_config(lp, shost, &netdev->dev);
602 	if (rc) {
603 		FC_DBG("Could not configure shost for lport\n");
604 		goto out_netdev_cleanup;
605 	}
606 
607 	/* lport exch manager allocation */
608 	rc = fcoe_em_config(lp);
609 	if (rc) {
610 		FC_DBG("Could not configure em for lport\n");
611 		goto out_netdev_cleanup;
612 	}
613 
614 	/* Initialize the library */
615 	rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
616 	if (rc) {
617 		FC_DBG("Could not configure libfc for lport!\n");
618 		goto out_lp_destroy;
619 	}
620 
621 	/* add to lports list */
622 	fcoe_hostlist_add(lp);
623 
624 	lp->boot_time = jiffies;
625 
626 	fc_fabric_login(lp);
627 
628 	if (!fcoe_link_ok(lp))
629 		fcoe_ctlr_link_up(&fc->ctlr);
630 
631 	dev_hold(netdev);
632 
633 	return rc;
634 
635 out_lp_destroy:
636 	fc_exch_mgr_free(lp->emp); /* Free the EM */
637 out_netdev_cleanup:
638 	fcoe_netdev_cleanup(fc);
639 out_host_put:
640 	scsi_host_put(lp->host);
641 	return rc;
642 }
643 
644 /**
645  * fcoe_if_init() - attach to scsi transport
646  *
647  * Returns : 0 on success
648  */
649 static int __init fcoe_if_init(void)
650 {
651 	/* attach to scsi transport */
652 	scsi_transport_fcoe_sw =
653 		fc_attach_transport(&fcoe_transport_function);
654 
655 	if (!scsi_transport_fcoe_sw) {
656 		printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n");
657 		return -ENODEV;
658 	}
659 
660 	return 0;
661 }
662 
663 /**
664  * fcoe_if_exit() - detach from scsi transport
665  *
666  * Returns : 0 on success
667  */
668 int __exit fcoe_if_exit(void)
669 {
670 	fc_release_transport(scsi_transport_fcoe_sw);
671 	return 0;
672 }
673 
674 /**
675  * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
676  * @cpu: cpu index for the online cpu
677  */
678 static void fcoe_percpu_thread_create(unsigned int cpu)
679 {
680 	struct fcoe_percpu_s *p;
681 	struct task_struct *thread;
682 
683 	p = &per_cpu(fcoe_percpu, cpu);
684 
685 	thread = kthread_create(fcoe_percpu_receive_thread,
686 				(void *)p, "fcoethread/%d", cpu);
687 
688 	if (likely(!IS_ERR(p->thread))) {
689 		kthread_bind(thread, cpu);
690 		wake_up_process(thread);
691 
692 		spin_lock_bh(&p->fcoe_rx_list.lock);
693 		p->thread = thread;
694 		spin_unlock_bh(&p->fcoe_rx_list.lock);
695 	}
696 }
697 
698 /**
699  * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
700  * @cpu: cpu index the rx thread is to be removed
701  *
702  * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
703  * current CPU's Rx thread. If the thread being destroyed is bound to
704  * the CPU processing this context the skbs will be freed.
705  */
706 static void fcoe_percpu_thread_destroy(unsigned int cpu)
707 {
708 	struct fcoe_percpu_s *p;
709 	struct task_struct *thread;
710 	struct page *crc_eof;
711 	struct sk_buff *skb;
712 #ifdef CONFIG_SMP
713 	struct fcoe_percpu_s *p0;
714 	unsigned targ_cpu = smp_processor_id();
715 #endif /* CONFIG_SMP */
716 
717 	printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
718 
719 	/* Prevent any new skbs from being queued for this CPU. */
720 	p = &per_cpu(fcoe_percpu, cpu);
721 	spin_lock_bh(&p->fcoe_rx_list.lock);
722 	thread = p->thread;
723 	p->thread = NULL;
724 	crc_eof = p->crc_eof_page;
725 	p->crc_eof_page = NULL;
726 	p->crc_eof_offset = 0;
727 	spin_unlock_bh(&p->fcoe_rx_list.lock);
728 
729 #ifdef CONFIG_SMP
730 	/*
731 	 * Don't bother moving the skb's if this context is running
732 	 * on the same CPU that is having its thread destroyed. This
733 	 * can easily happen when the module is removed.
734 	 */
735 	if (cpu != targ_cpu) {
736 		p0 = &per_cpu(fcoe_percpu, targ_cpu);
737 		spin_lock_bh(&p0->fcoe_rx_list.lock);
738 		if (p0->thread) {
739 			FC_DBG("Moving frames from CPU %d to CPU %d\n",
740 			       cpu, targ_cpu);
741 
742 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
743 				__skb_queue_tail(&p0->fcoe_rx_list, skb);
744 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
745 		} else {
746 			/*
747 			 * The targeted CPU is not initialized and cannot accept
748 			 * new  skbs. Unlock the targeted CPU and drop the skbs
749 			 * on the CPU that is going offline.
750 			 */
751 			while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
752 				kfree_skb(skb);
753 			spin_unlock_bh(&p0->fcoe_rx_list.lock);
754 		}
755 	} else {
756 		/*
757 		 * This scenario occurs when the module is being removed
758 		 * and all threads are being destroyed. skbs will continue
759 		 * to be shifted from the CPU thread that is being removed
760 		 * to the CPU thread associated with the CPU that is processing
761 		 * the module removal. Once there is only one CPU Rx thread it
762 		 * will reach this case and we will drop all skbs and later
763 		 * stop the thread.
764 		 */
765 		spin_lock_bh(&p->fcoe_rx_list.lock);
766 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
767 			kfree_skb(skb);
768 		spin_unlock_bh(&p->fcoe_rx_list.lock);
769 	}
770 #else
771 	/*
772 	 * This a non-SMP scenario where the singular Rx thread is
773 	 * being removed. Free all skbs and stop the thread.
774 	 */
775 	spin_lock_bh(&p->fcoe_rx_list.lock);
776 	while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
777 		kfree_skb(skb);
778 	spin_unlock_bh(&p->fcoe_rx_list.lock);
779 #endif
780 
781 	if (thread)
782 		kthread_stop(thread);
783 
784 	if (crc_eof)
785 		put_page(crc_eof);
786 }
787 
788 /**
789  * fcoe_cpu_callback() - fcoe cpu hotplug event callback
790  * @nfb: callback data block
791  * @action: event triggering the callback
792  * @hcpu: index for the cpu of this event
793  *
794  * This creates or destroys per cpu data for fcoe
795  *
796  * Returns NOTIFY_OK always.
797  */
798 static int fcoe_cpu_callback(struct notifier_block *nfb,
799 			     unsigned long action, void *hcpu)
800 {
801 	unsigned cpu = (unsigned long)hcpu;
802 
803 	switch (action) {
804 	case CPU_ONLINE:
805 	case CPU_ONLINE_FROZEN:
806 		FC_DBG("CPU %x online: Create Rx thread\n", cpu);
807 		fcoe_percpu_thread_create(cpu);
808 		break;
809 	case CPU_DEAD:
810 	case CPU_DEAD_FROZEN:
811 		FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
812 		fcoe_percpu_thread_destroy(cpu);
813 		break;
814 	default:
815 		break;
816 	}
817 	return NOTIFY_OK;
818 }
819 
820 static struct notifier_block fcoe_cpu_notifier = {
821 	.notifier_call = fcoe_cpu_callback,
822 };
823 
824 /**
825  * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
826  * @skb: the receive skb
827  * @dev: associated net device
828  * @ptype: context
829  * @olddev: last device
830  *
831  * this function will receive the packet and build fc frame and pass it up
832  *
833  * Returns: 0 for success
834  */
835 int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
836 	     struct packet_type *ptype, struct net_device *olddev)
837 {
838 	struct fc_lport *lp;
839 	struct fcoe_rcv_info *fr;
840 	struct fcoe_softc *fc;
841 	struct fc_frame_header *fh;
842 	struct fcoe_percpu_s *fps;
843 	unsigned short oxid;
844 	unsigned int cpu = 0;
845 
846 	fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
847 	lp = fc->ctlr.lp;
848 	if (unlikely(lp == NULL)) {
849 		FC_DBG("cannot find hba structure");
850 		goto err2;
851 	}
852 	if (!lp->link_up)
853 		goto err2;
854 
855 	if (unlikely(debug_fcoe)) {
856 		FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
857 		       "end:%p sum:%d dev:%s", skb->len, skb->data_len,
858 		       skb->head, skb->data, skb_tail_pointer(skb),
859 		       skb_end_pointer(skb), skb->csum,
860 		       skb->dev ? skb->dev->name : "<NULL>");
861 
862 	}
863 
864 	/* check for FCOE packet type */
865 	if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
866 		FC_DBG("wrong FC type frame");
867 		goto err;
868 	}
869 
870 	/*
871 	 * Check for minimum frame length, and make sure required FCoE
872 	 * and FC headers are pulled into the linear data area.
873 	 */
874 	if (unlikely((skb->len < FCOE_MIN_FRAME) ||
875 	    !pskb_may_pull(skb, FCOE_HEADER_LEN)))
876 		goto err;
877 
878 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
879 	fh = (struct fc_frame_header *) skb_transport_header(skb);
880 
881 	oxid = ntohs(fh->fh_ox_id);
882 
883 	fr = fcoe_dev_from_skb(skb);
884 	fr->fr_dev = lp;
885 	fr->ptype = ptype;
886 
887 #ifdef CONFIG_SMP
888 	/*
889 	 * The incoming frame exchange id(oxid) is ANDed with num of online
890 	 * cpu bits to get cpu and then this cpu is used for selecting
891 	 * a per cpu kernel thread from fcoe_percpu.
892 	 */
893 	cpu = oxid & (num_online_cpus() - 1);
894 #endif
895 
896 	fps = &per_cpu(fcoe_percpu, cpu);
897 	spin_lock_bh(&fps->fcoe_rx_list.lock);
898 	if (unlikely(!fps->thread)) {
899 		/*
900 		 * The targeted CPU is not ready, let's target
901 		 * the first CPU now. For non-SMP systems this
902 		 * will check the same CPU twice.
903 		 */
904 		FC_DBG("CPU is online, but no receive thread ready "
905 		       "for incoming skb- using first online CPU.\n");
906 
907 		spin_unlock_bh(&fps->fcoe_rx_list.lock);
908 		cpu = first_cpu(cpu_online_map);
909 		fps = &per_cpu(fcoe_percpu, cpu);
910 		spin_lock_bh(&fps->fcoe_rx_list.lock);
911 		if (!fps->thread) {
912 			spin_unlock_bh(&fps->fcoe_rx_list.lock);
913 			goto err;
914 		}
915 	}
916 
917 	/*
918 	 * We now have a valid CPU that we're targeting for
919 	 * this skb. We also have this receive thread locked,
920 	 * so we're free to queue skbs into it's queue.
921 	 */
922 	__skb_queue_tail(&fps->fcoe_rx_list, skb);
923 	if (fps->fcoe_rx_list.qlen == 1)
924 		wake_up_process(fps->thread);
925 
926 	spin_unlock_bh(&fps->fcoe_rx_list.lock);
927 
928 	return 0;
929 err:
930 	fc_lport_get_stats(lp)->ErrorFrames++;
931 
932 err2:
933 	kfree_skb(skb);
934 	return -1;
935 }
936 
937 /**
938  * fcoe_start_io() - pass to netdev to start xmit for fcoe
939  * @skb: the skb to be xmitted
940  *
941  * Returns: 0 for success
942  */
943 static inline int fcoe_start_io(struct sk_buff *skb)
944 {
945 	int rc;
946 
947 	skb_get(skb);
948 	rc = dev_queue_xmit(skb);
949 	if (rc != 0)
950 		return rc;
951 	kfree_skb(skb);
952 	return 0;
953 }
954 
955 /**
956  * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
957  * @skb: the skb to be xmitted
958  * @tlen: total len
959  *
960  * Returns: 0 for success
961  */
962 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
963 {
964 	struct fcoe_percpu_s *fps;
965 	struct page *page;
966 
967 	fps = &get_cpu_var(fcoe_percpu);
968 	page = fps->crc_eof_page;
969 	if (!page) {
970 		page = alloc_page(GFP_ATOMIC);
971 		if (!page) {
972 			put_cpu_var(fcoe_percpu);
973 			return -ENOMEM;
974 		}
975 		fps->crc_eof_page = page;
976 		fps->crc_eof_offset = 0;
977 	}
978 
979 	get_page(page);
980 	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
981 			   fps->crc_eof_offset, tlen);
982 	skb->len += tlen;
983 	skb->data_len += tlen;
984 	skb->truesize += tlen;
985 	fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
986 
987 	if (fps->crc_eof_offset >= PAGE_SIZE) {
988 		fps->crc_eof_page = NULL;
989 		fps->crc_eof_offset = 0;
990 		put_page(page);
991 	}
992 	put_cpu_var(fcoe_percpu);
993 	return 0;
994 }
995 
996 /**
997  * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
998  * @fp: the fc_frame containing data to be checksummed
999  *
1000  * This uses crc32() to calculate the crc for fc frame
1001  * Return   : 32 bit crc
1002  */
1003 u32 fcoe_fc_crc(struct fc_frame *fp)
1004 {
1005 	struct sk_buff *skb = fp_skb(fp);
1006 	struct skb_frag_struct *frag;
1007 	unsigned char *data;
1008 	unsigned long off, len, clen;
1009 	u32 crc;
1010 	unsigned i;
1011 
1012 	crc = crc32(~0, skb->data, skb_headlen(skb));
1013 
1014 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1015 		frag = &skb_shinfo(skb)->frags[i];
1016 		off = frag->page_offset;
1017 		len = frag->size;
1018 		while (len > 0) {
1019 			clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1020 			data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1021 					   KM_SKB_DATA_SOFTIRQ);
1022 			crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1023 			kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1024 			off += clen;
1025 			len -= clen;
1026 		}
1027 	}
1028 	return crc;
1029 }
1030 
1031 /**
1032  * fcoe_xmit() - FCoE frame transmit function
1033  * @lp:	the associated local port
1034  * @fp: the fc_frame to be transmitted
1035  *
1036  * Return   : 0 for success
1037  */
1038 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1039 {
1040 	int wlen;
1041 	u32 crc;
1042 	struct ethhdr *eh;
1043 	struct fcoe_crc_eof *cp;
1044 	struct sk_buff *skb;
1045 	struct fcoe_dev_stats *stats;
1046 	struct fc_frame_header *fh;
1047 	unsigned int hlen;		/* header length implies the version */
1048 	unsigned int tlen;		/* trailer length */
1049 	unsigned int elen;		/* eth header, may include vlan */
1050 	struct fcoe_softc *fc;
1051 	u8 sof, eof;
1052 	struct fcoe_hdr *hp;
1053 
1054 	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1055 
1056 	fc = lport_priv(lp);
1057 	fh = fc_frame_header_get(fp);
1058 	skb = fp_skb(fp);
1059 	wlen = skb->len / FCOE_WORD_TO_BYTE;
1060 
1061 	if (!lp->link_up) {
1062 		kfree_skb(skb);
1063 		return 0;
1064 	}
1065 
1066 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1067 	    fcoe_ctlr_els_send(&fc->ctlr, skb))
1068 		return 0;
1069 
1070 	sof = fr_sof(fp);
1071 	eof = fr_eof(fp);
1072 
1073 	elen = sizeof(struct ethhdr);
1074 	hlen = sizeof(struct fcoe_hdr);
1075 	tlen = sizeof(struct fcoe_crc_eof);
1076 	wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1077 
1078 	/* crc offload */
1079 	if (likely(lp->crc_offload)) {
1080 		skb->ip_summed = CHECKSUM_PARTIAL;
1081 		skb->csum_start = skb_headroom(skb);
1082 		skb->csum_offset = skb->len;
1083 		crc = 0;
1084 	} else {
1085 		skb->ip_summed = CHECKSUM_NONE;
1086 		crc = fcoe_fc_crc(fp);
1087 	}
1088 
1089 	/* copy fc crc and eof to the skb buff */
1090 	if (skb_is_nonlinear(skb)) {
1091 		skb_frag_t *frag;
1092 		if (fcoe_get_paged_crc_eof(skb, tlen)) {
1093 			kfree_skb(skb);
1094 			return -ENOMEM;
1095 		}
1096 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1097 		cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1098 			+ frag->page_offset;
1099 	} else {
1100 		cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1101 	}
1102 
1103 	memset(cp, 0, sizeof(*cp));
1104 	cp->fcoe_eof = eof;
1105 	cp->fcoe_crc32 = cpu_to_le32(~crc);
1106 
1107 	if (skb_is_nonlinear(skb)) {
1108 		kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1109 		cp = NULL;
1110 	}
1111 
1112 	/* adjust skb network/transport offsets to match mac/fcoe/fc */
1113 	skb_push(skb, elen + hlen);
1114 	skb_reset_mac_header(skb);
1115 	skb_reset_network_header(skb);
1116 	skb->mac_len = elen;
1117 	skb->protocol = htons(ETH_P_FCOE);
1118 	skb->dev = fc->real_dev;
1119 
1120 	/* fill up mac and fcoe headers */
1121 	eh = eth_hdr(skb);
1122 	eh->h_proto = htons(ETH_P_FCOE);
1123 	if (fc->ctlr.map_dest)
1124 		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1125 	else
1126 		/* insert GW address */
1127 		memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN);
1128 
1129 	if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1130 		memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN);
1131 	else
1132 		memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN);
1133 
1134 	hp = (struct fcoe_hdr *)(eh + 1);
1135 	memset(hp, 0, sizeof(*hp));
1136 	if (FC_FCOE_VER)
1137 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1138 	hp->fcoe_sof = sof;
1139 
1140 #ifdef NETIF_F_FSO
1141 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1142 	if (lp->seq_offload && fr_max_payload(fp)) {
1143 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1144 		skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1145 	} else {
1146 		skb_shinfo(skb)->gso_type = 0;
1147 		skb_shinfo(skb)->gso_size = 0;
1148 	}
1149 #endif
1150 	/* update tx stats: regardless if LLD fails */
1151 	stats = fc_lport_get_stats(lp);
1152 	stats->TxFrames++;
1153 	stats->TxWords += wlen;
1154 
1155 	/* send down to lld */
1156 	fr_dev(fp) = lp;
1157 	if (fc->fcoe_pending_queue.qlen)
1158 		fcoe_check_wait_queue(lp, skb);
1159 	else if (fcoe_start_io(skb))
1160 		fcoe_check_wait_queue(lp, skb);
1161 
1162 	return 0;
1163 }
1164 
1165 /**
1166  * fcoe_percpu_receive_thread() - recv thread per cpu
1167  * @arg: ptr to the fcoe per cpu struct
1168  *
1169  * Return: 0 for success
1170  */
1171 int fcoe_percpu_receive_thread(void *arg)
1172 {
1173 	struct fcoe_percpu_s *p = arg;
1174 	u32 fr_len;
1175 	struct fc_lport *lp;
1176 	struct fcoe_rcv_info *fr;
1177 	struct fcoe_dev_stats *stats;
1178 	struct fc_frame_header *fh;
1179 	struct sk_buff *skb;
1180 	struct fcoe_crc_eof crc_eof;
1181 	struct fc_frame *fp;
1182 	u8 *mac = NULL;
1183 	struct fcoe_softc *fc;
1184 	struct fcoe_hdr *hp;
1185 
1186 	set_user_nice(current, -20);
1187 
1188 	while (!kthread_should_stop()) {
1189 
1190 		spin_lock_bh(&p->fcoe_rx_list.lock);
1191 		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1192 			set_current_state(TASK_INTERRUPTIBLE);
1193 			spin_unlock_bh(&p->fcoe_rx_list.lock);
1194 			schedule();
1195 			set_current_state(TASK_RUNNING);
1196 			if (kthread_should_stop())
1197 				return 0;
1198 			spin_lock_bh(&p->fcoe_rx_list.lock);
1199 		}
1200 		spin_unlock_bh(&p->fcoe_rx_list.lock);
1201 		fr = fcoe_dev_from_skb(skb);
1202 		lp = fr->fr_dev;
1203 		if (unlikely(lp == NULL)) {
1204 			FC_DBG("invalid HBA Structure");
1205 			kfree_skb(skb);
1206 			continue;
1207 		}
1208 
1209 		if (unlikely(debug_fcoe)) {
1210 			FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
1211 			       "tail:%p end:%p sum:%d dev:%s",
1212 			       skb->len, skb->data_len,
1213 			       skb->head, skb->data, skb_tail_pointer(skb),
1214 			       skb_end_pointer(skb), skb->csum,
1215 			       skb->dev ? skb->dev->name : "<NULL>");
1216 		}
1217 
1218 		/*
1219 		 * Save source MAC address before discarding header.
1220 		 */
1221 		fc = lport_priv(lp);
1222 		if (skb_is_nonlinear(skb))
1223 			skb_linearize(skb);	/* not ideal */
1224 		mac = eth_hdr(skb)->h_source;
1225 
1226 		/*
1227 		 * Frame length checks and setting up the header pointers
1228 		 * was done in fcoe_rcv already.
1229 		 */
1230 		hp = (struct fcoe_hdr *) skb_network_header(skb);
1231 		fh = (struct fc_frame_header *) skb_transport_header(skb);
1232 
1233 		stats = fc_lport_get_stats(lp);
1234 		if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1235 			if (stats->ErrorFrames < 5)
1236 				printk(KERN_WARNING "FCoE version "
1237 				       "mismatch: The frame has "
1238 				       "version %x, but the "
1239 				       "initiator supports version "
1240 				       "%x\n", FC_FCOE_DECAPS_VER(hp),
1241 				       FC_FCOE_VER);
1242 			stats->ErrorFrames++;
1243 			kfree_skb(skb);
1244 			continue;
1245 		}
1246 
1247 		skb_pull(skb, sizeof(struct fcoe_hdr));
1248 		fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1249 
1250 		stats->RxFrames++;
1251 		stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1252 
1253 		fp = (struct fc_frame *)skb;
1254 		fc_frame_init(fp);
1255 		fr_dev(fp) = lp;
1256 		fr_sof(fp) = hp->fcoe_sof;
1257 
1258 		/* Copy out the CRC and EOF trailer for access */
1259 		if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1260 			kfree_skb(skb);
1261 			continue;
1262 		}
1263 		fr_eof(fp) = crc_eof.fcoe_eof;
1264 		fr_crc(fp) = crc_eof.fcoe_crc32;
1265 		if (pskb_trim(skb, fr_len)) {
1266 			kfree_skb(skb);
1267 			continue;
1268 		}
1269 
1270 		/*
1271 		 * We only check CRC if no offload is available and if it is
1272 		 * it's solicited data, in which case, the FCP layer would
1273 		 * check it during the copy.
1274 		 */
1275 		if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1276 			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1277 		else
1278 			fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1279 
1280 		fh = fc_frame_header_get(fp);
1281 		if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1282 		    fh->fh_type == FC_TYPE_FCP) {
1283 			fc_exch_recv(lp, lp->emp, fp);
1284 			continue;
1285 		}
1286 		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1287 			if (le32_to_cpu(fr_crc(fp)) !=
1288 			    ~crc32(~0, skb->data, fr_len)) {
1289 				if (debug_fcoe || stats->InvalidCRCCount < 5)
1290 					printk(KERN_WARNING "fcoe: dropping "
1291 					       "frame with CRC error\n");
1292 				stats->InvalidCRCCount++;
1293 				stats->ErrorFrames++;
1294 				fc_frame_free(fp);
1295 				continue;
1296 			}
1297 			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1298 		}
1299 		if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1300 		    fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) {
1301 			fc_frame_free(fp);
1302 			continue;
1303 		}
1304 		fc_exch_recv(lp, lp->emp, fp);
1305 	}
1306 	return 0;
1307 }
1308 
1309 /**
1310  * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1311  * @lp: the fc_lport
1312  *
1313  * This empties the wait_queue, dequeue the head of the wait_queue queue
1314  * and calls fcoe_start_io() for each packet, if all skb have been
1315  * transmitted, return qlen or -1 if a error occurs, then restore
1316  * wait_queue and try again later.
1317  *
1318  * The wait_queue is used when the skb transmit fails. skb will go
1319  * in the wait_queue which will be emptied by the timer function or
1320  * by the next skb transmit.
1321  */
1322 static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1323 {
1324 	struct fcoe_softc *fc = lport_priv(lp);
1325 	int rc;
1326 
1327 	spin_lock_bh(&fc->fcoe_pending_queue.lock);
1328 
1329 	if (skb)
1330 		__skb_queue_tail(&fc->fcoe_pending_queue, skb);
1331 
1332 	if (fc->fcoe_pending_queue_active)
1333 		goto out;
1334 	fc->fcoe_pending_queue_active = 1;
1335 
1336 	while (fc->fcoe_pending_queue.qlen) {
1337 		/* keep qlen > 0 until fcoe_start_io succeeds */
1338 		fc->fcoe_pending_queue.qlen++;
1339 		skb = __skb_dequeue(&fc->fcoe_pending_queue);
1340 
1341 		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1342 		rc = fcoe_start_io(skb);
1343 		spin_lock_bh(&fc->fcoe_pending_queue.lock);
1344 
1345 		if (rc) {
1346 			__skb_queue_head(&fc->fcoe_pending_queue, skb);
1347 			/* undo temporary increment above */
1348 			fc->fcoe_pending_queue.qlen--;
1349 			break;
1350 		}
1351 		/* undo temporary increment above */
1352 		fc->fcoe_pending_queue.qlen--;
1353 	}
1354 
1355 	if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1356 		lp->qfull = 0;
1357 	if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
1358 		mod_timer(&fc->timer, jiffies + 2);
1359 	fc->fcoe_pending_queue_active = 0;
1360 out:
1361 	if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1362 		lp->qfull = 1;
1363 	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1364 	return;
1365 }
1366 
1367 /**
1368  * fcoe_dev_setup() - setup link change notification interface
1369  */
1370 static void fcoe_dev_setup(void)
1371 {
1372 	register_netdevice_notifier(&fcoe_notifier);
1373 }
1374 
1375 /**
1376  * fcoe_dev_cleanup() - cleanup link change notification interface
1377  */
1378 static void fcoe_dev_cleanup(void)
1379 {
1380 	unregister_netdevice_notifier(&fcoe_notifier);
1381 }
1382 
1383 /**
1384  * fcoe_device_notification() - netdev event notification callback
1385  * @notifier: context of the notification
1386  * @event: type of event
1387  * @ptr: fixed array for output parsed ifname
1388  *
1389  * This function is called by the ethernet driver in case of link change event
1390  *
1391  * Returns: 0 for success
1392  */
1393 static int fcoe_device_notification(struct notifier_block *notifier,
1394 				    ulong event, void *ptr)
1395 {
1396 	struct fc_lport *lp = NULL;
1397 	struct net_device *real_dev = ptr;
1398 	struct fcoe_softc *fc;
1399 	struct fcoe_dev_stats *stats;
1400 	u32 link_possible = 1;
1401 	u32 mfs;
1402 	int rc = NOTIFY_OK;
1403 
1404 	read_lock(&fcoe_hostlist_lock);
1405 	list_for_each_entry(fc, &fcoe_hostlist, list) {
1406 		if (fc->real_dev == real_dev) {
1407 			lp = fc->ctlr.lp;
1408 			break;
1409 		}
1410 	}
1411 	read_unlock(&fcoe_hostlist_lock);
1412 	if (lp == NULL) {
1413 		rc = NOTIFY_DONE;
1414 		goto out;
1415 	}
1416 
1417 	switch (event) {
1418 	case NETDEV_DOWN:
1419 	case NETDEV_GOING_DOWN:
1420 		link_possible = 0;
1421 		break;
1422 	case NETDEV_UP:
1423 	case NETDEV_CHANGE:
1424 		break;
1425 	case NETDEV_CHANGEMTU:
1426 		mfs = fc->real_dev->mtu -
1427 			(sizeof(struct fcoe_hdr) +
1428 			 sizeof(struct fcoe_crc_eof));
1429 		if (mfs >= FC_MIN_MAX_FRAME)
1430 			fc_set_mfs(lp, mfs);
1431 		break;
1432 	case NETDEV_REGISTER:
1433 		break;
1434 	default:
1435 		FC_DBG("Unknown event %ld from netdev netlink\n", event);
1436 	}
1437 	if (link_possible && !fcoe_link_ok(lp))
1438 		fcoe_ctlr_link_up(&fc->ctlr);
1439 	else if (fcoe_ctlr_link_down(&fc->ctlr)) {
1440 		stats = fc_lport_get_stats(lp);
1441 		stats->LinkFailureCount++;
1442 		fcoe_clean_pending_queue(lp);
1443 	}
1444 out:
1445 	return rc;
1446 }
1447 
1448 /**
1449  * fcoe_if_to_netdev() - parse a name buffer to get netdev
1450  * @buffer: incoming buffer to be copied
1451  *
1452  * Returns: NULL or ptr to net_device
1453  */
1454 static struct net_device *fcoe_if_to_netdev(const char *buffer)
1455 {
1456 	char *cp;
1457 	char ifname[IFNAMSIZ + 2];
1458 
1459 	if (buffer) {
1460 		strlcpy(ifname, buffer, IFNAMSIZ);
1461 		cp = ifname + strlen(ifname);
1462 		while (--cp >= ifname && *cp == '\n')
1463 			*cp = '\0';
1464 		return dev_get_by_name(&init_net, ifname);
1465 	}
1466 	return NULL;
1467 }
1468 
1469 /**
1470  * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
1471  * @netdev: the target netdev
1472  *
1473  * Returns: ptr to the struct module, NULL for failure
1474  */
1475 static struct module *
1476 fcoe_netdev_to_module_owner(const struct net_device *netdev)
1477 {
1478 	struct device *dev;
1479 
1480 	if (!netdev)
1481 		return NULL;
1482 
1483 	dev = netdev->dev.parent;
1484 	if (!dev)
1485 		return NULL;
1486 
1487 	if (!dev->driver)
1488 		return NULL;
1489 
1490 	return dev->driver->owner;
1491 }
1492 
1493 /**
1494  * fcoe_ethdrv_get() - Hold the Ethernet driver
1495  * @netdev: the target netdev
1496  *
1497  * Holds the Ethernet driver module by try_module_get() for
1498  * the corresponding netdev.
1499  *
1500  * Returns: 0 for success
1501  */
1502 static int fcoe_ethdrv_get(const struct net_device *netdev)
1503 {
1504 	struct module *owner;
1505 
1506 	owner = fcoe_netdev_to_module_owner(netdev);
1507 	if (owner) {
1508 		printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
1509 		       module_name(owner), netdev->name);
1510 		return  try_module_get(owner);
1511 	}
1512 	return -ENODEV;
1513 }
1514 
1515 /**
1516  * fcoe_ethdrv_put() - Release the Ethernet driver
1517  * @netdev: the target netdev
1518  *
1519  * Releases the Ethernet driver module by module_put for
1520  * the corresponding netdev.
1521  *
1522  * Returns: 0 for success
1523  */
1524 static int fcoe_ethdrv_put(const struct net_device *netdev)
1525 {
1526 	struct module *owner;
1527 
1528 	owner = fcoe_netdev_to_module_owner(netdev);
1529 	if (owner) {
1530 		printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
1531 		       module_name(owner), netdev->name);
1532 		module_put(owner);
1533 		return 0;
1534 	}
1535 	return -ENODEV;
1536 }
1537 
1538 /**
1539  * fcoe_destroy() - handles the destroy from sysfs
1540  * @buffer: expected to be an eth if name
1541  * @kp: associated kernel param
1542  *
1543  * Returns: 0 for success
1544  */
1545 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1546 {
1547 	int rc;
1548 	struct net_device *netdev;
1549 
1550 	netdev = fcoe_if_to_netdev(buffer);
1551 	if (!netdev) {
1552 		rc = -ENODEV;
1553 		goto out_nodev;
1554 	}
1555 	/* look for existing lport */
1556 	if (!fcoe_hostlist_lookup(netdev)) {
1557 		rc = -ENODEV;
1558 		goto out_putdev;
1559 	}
1560 	rc = fcoe_if_destroy(netdev);
1561 	if (rc) {
1562 		printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n",
1563 		       netdev->name);
1564 		rc = -EIO;
1565 		goto out_putdev;
1566 	}
1567 	fcoe_ethdrv_put(netdev);
1568 	rc = 0;
1569 out_putdev:
1570 	dev_put(netdev);
1571 out_nodev:
1572 	return rc;
1573 }
1574 
1575 /**
1576  * fcoe_create() - Handles the create call from sysfs
1577  * @buffer: expected to be an eth if name
1578  * @kp: associated kernel param
1579  *
1580  * Returns: 0 for success
1581  */
1582 static int fcoe_create(const char *buffer, struct kernel_param *kp)
1583 {
1584 	int rc;
1585 	struct net_device *netdev;
1586 
1587 	netdev = fcoe_if_to_netdev(buffer);
1588 	if (!netdev) {
1589 		rc = -ENODEV;
1590 		goto out_nodev;
1591 	}
1592 	/* look for existing lport */
1593 	if (fcoe_hostlist_lookup(netdev)) {
1594 		rc = -EEXIST;
1595 		goto out_putdev;
1596 	}
1597 	fcoe_ethdrv_get(netdev);
1598 
1599 	rc = fcoe_if_create(netdev);
1600 	if (rc) {
1601 		printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n",
1602 		       netdev->name);
1603 		fcoe_ethdrv_put(netdev);
1604 		rc = -EIO;
1605 		goto out_putdev;
1606 	}
1607 	rc = 0;
1608 out_putdev:
1609 	dev_put(netdev);
1610 out_nodev:
1611 	return rc;
1612 }
1613 
1614 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1615 __MODULE_PARM_TYPE(create, "string");
1616 MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
1617 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1618 __MODULE_PARM_TYPE(destroy, "string");
1619 MODULE_PARM_DESC(destroy, "Destroy fcoe port");
1620 
1621 /**
1622  * fcoe_link_ok() - Check if link is ok for the fc_lport
1623  * @lp: ptr to the fc_lport
1624  *
1625  * Any permanently-disqualifying conditions have been previously checked.
1626  * This also updates the speed setting, which may change with link for 100/1000.
1627  *
1628  * This function should probably be checking for PAUSE support at some point
1629  * in the future. Currently Per-priority-pause is not determinable using
1630  * ethtool, so we shouldn't be restrictive until that problem is resolved.
1631  *
1632  * Returns: 0 if link is OK for use by FCoE.
1633  *
1634  */
1635 int fcoe_link_ok(struct fc_lport *lp)
1636 {
1637 	struct fcoe_softc *fc = lport_priv(lp);
1638 	struct net_device *dev = fc->real_dev;
1639 	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1640 	int rc = 0;
1641 
1642 	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1643 		dev = fc->phys_dev;
1644 		if (dev->ethtool_ops->get_settings) {
1645 			dev->ethtool_ops->get_settings(dev, &ecmd);
1646 			lp->link_supported_speeds &=
1647 				~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1648 			if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1649 					      SUPPORTED_1000baseT_Full))
1650 				lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1651 			if (ecmd.supported & SUPPORTED_10000baseT_Full)
1652 				lp->link_supported_speeds |=
1653 					FC_PORTSPEED_10GBIT;
1654 			if (ecmd.speed == SPEED_1000)
1655 				lp->link_speed = FC_PORTSPEED_1GBIT;
1656 			if (ecmd.speed == SPEED_10000)
1657 				lp->link_speed = FC_PORTSPEED_10GBIT;
1658 		}
1659 	} else
1660 		rc = -1;
1661 
1662 	return rc;
1663 }
1664 
1665 /**
1666  * fcoe_percpu_clean() - Clear the pending skbs for an lport
1667  * @lp: the fc_lport
1668  */
1669 void fcoe_percpu_clean(struct fc_lport *lp)
1670 {
1671 	struct fcoe_percpu_s *pp;
1672 	struct fcoe_rcv_info *fr;
1673 	struct sk_buff_head *list;
1674 	struct sk_buff *skb, *next;
1675 	struct sk_buff *head;
1676 	unsigned int cpu;
1677 
1678 	for_each_possible_cpu(cpu) {
1679 		pp = &per_cpu(fcoe_percpu, cpu);
1680 		spin_lock_bh(&pp->fcoe_rx_list.lock);
1681 		list = &pp->fcoe_rx_list;
1682 		head = list->next;
1683 		for (skb = head; skb != (struct sk_buff *)list;
1684 		     skb = next) {
1685 			next = skb->next;
1686 			fr = fcoe_dev_from_skb(skb);
1687 			if (fr->fr_dev == lp) {
1688 				__skb_unlink(skb, list);
1689 				kfree_skb(skb);
1690 			}
1691 		}
1692 		spin_unlock_bh(&pp->fcoe_rx_list.lock);
1693 	}
1694 }
1695 
1696 /**
1697  * fcoe_clean_pending_queue() - Dequeue a skb and free it
1698  * @lp: the corresponding fc_lport
1699  *
1700  * Returns: none
1701  */
1702 void fcoe_clean_pending_queue(struct fc_lport *lp)
1703 {
1704 	struct fcoe_softc  *fc = lport_priv(lp);
1705 	struct sk_buff *skb;
1706 
1707 	spin_lock_bh(&fc->fcoe_pending_queue.lock);
1708 	while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1709 		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1710 		kfree_skb(skb);
1711 		spin_lock_bh(&fc->fcoe_pending_queue.lock);
1712 	}
1713 	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1714 }
1715 
1716 /**
1717  * fcoe_reset() - Resets the fcoe
1718  * @shost: shost the reset is from
1719  *
1720  * Returns: always 0
1721  */
1722 int fcoe_reset(struct Scsi_Host *shost)
1723 {
1724 	struct fc_lport *lport = shost_priv(shost);
1725 	fc_lport_reset(lport);
1726 	return 0;
1727 }
1728 
1729 /**
1730  * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
1731  * @dev: this is currently ptr to net_device
1732  *
1733  * Returns: NULL or the located fcoe_softc
1734  */
1735 static struct fcoe_softc *
1736 fcoe_hostlist_lookup_softc(const struct net_device *dev)
1737 {
1738 	struct fcoe_softc *fc;
1739 
1740 	read_lock(&fcoe_hostlist_lock);
1741 	list_for_each_entry(fc, &fcoe_hostlist, list) {
1742 		if (fc->real_dev == dev) {
1743 			read_unlock(&fcoe_hostlist_lock);
1744 			return fc;
1745 		}
1746 	}
1747 	read_unlock(&fcoe_hostlist_lock);
1748 	return NULL;
1749 }
1750 
1751 /**
1752  * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1753  * @netdev: ptr to net_device
1754  *
1755  * Returns: 0 for success
1756  */
1757 struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1758 {
1759 	struct fcoe_softc *fc;
1760 
1761 	fc = fcoe_hostlist_lookup_softc(netdev);
1762 
1763 	return (fc) ? fc->ctlr.lp : NULL;
1764 }
1765 
1766 /**
1767  * fcoe_hostlist_add() - Add a lport to lports list
1768  * @lp: ptr to the fc_lport to be added
1769  *
1770  * Returns: 0 for success
1771  */
1772 int fcoe_hostlist_add(const struct fc_lport *lp)
1773 {
1774 	struct fcoe_softc *fc;
1775 
1776 	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1777 	if (!fc) {
1778 		fc = lport_priv(lp);
1779 		write_lock_bh(&fcoe_hostlist_lock);
1780 		list_add_tail(&fc->list, &fcoe_hostlist);
1781 		write_unlock_bh(&fcoe_hostlist_lock);
1782 	}
1783 	return 0;
1784 }
1785 
1786 /**
1787  * fcoe_hostlist_remove() - remove a lport from lports list
1788  * @lp: ptr to the fc_lport to be removed
1789  *
1790  * Returns: 0 for success
1791  */
1792 int fcoe_hostlist_remove(const struct fc_lport *lp)
1793 {
1794 	struct fcoe_softc *fc;
1795 
1796 	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
1797 	BUG_ON(!fc);
1798 	write_lock_bh(&fcoe_hostlist_lock);
1799 	list_del(&fc->list);
1800 	write_unlock_bh(&fcoe_hostlist_lock);
1801 
1802 	return 0;
1803 }
1804 
1805 /**
1806  * fcoe_init() - fcoe module loading initialization
1807  *
1808  * Returns 0 on success, negative on failure
1809  */
1810 static int __init fcoe_init(void)
1811 {
1812 	unsigned int cpu;
1813 	int rc = 0;
1814 	struct fcoe_percpu_s *p;
1815 
1816 	INIT_LIST_HEAD(&fcoe_hostlist);
1817 	rwlock_init(&fcoe_hostlist_lock);
1818 
1819 	for_each_possible_cpu(cpu) {
1820 		p = &per_cpu(fcoe_percpu, cpu);
1821 		skb_queue_head_init(&p->fcoe_rx_list);
1822 	}
1823 
1824 	for_each_online_cpu(cpu)
1825 		fcoe_percpu_thread_create(cpu);
1826 
1827 	/* Initialize per CPU interrupt thread */
1828 	rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1829 	if (rc)
1830 		goto out_free;
1831 
1832 	/* Setup link change notification */
1833 	fcoe_dev_setup();
1834 
1835 	fcoe_if_init();
1836 
1837 	return 0;
1838 
1839 out_free:
1840 	for_each_online_cpu(cpu) {
1841 		fcoe_percpu_thread_destroy(cpu);
1842 	}
1843 
1844 	return rc;
1845 }
1846 module_init(fcoe_init);
1847 
1848 /**
1849  * fcoe_exit() - fcoe module unloading cleanup
1850  *
1851  * Returns 0 on success, negative on failure
1852  */
1853 static void __exit fcoe_exit(void)
1854 {
1855 	unsigned int cpu;
1856 	struct fcoe_softc *fc, *tmp;
1857 
1858 	fcoe_dev_cleanup();
1859 
1860 	/* releases the associated fcoe hosts */
1861 	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
1862 		fcoe_if_destroy(fc->real_dev);
1863 
1864 	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
1865 
1866 	for_each_online_cpu(cpu) {
1867 		fcoe_percpu_thread_destroy(cpu);
1868 	}
1869 
1870 	/* detach from scsi transport */
1871 	fcoe_if_exit();
1872 }
1873 module_exit(fcoe_exit);
1874