xref: /linux/drivers/message/fusion/mptlan.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Logic Fibre Channel PCI chip/adapters
5  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2007 LSI Logic Corporation
8  *
9  */
10 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11 /*
12     This program is free software; you can redistribute it and/or modify
13     it under the terms of the GNU General Public License as published by
14     the Free Software Foundation; version 2 of the License.
15 
16     This program is distributed in the hope that it will be useful,
17     but WITHOUT ANY WARRANTY; without even the implied warranty of
18     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19     GNU General Public License for more details.
20 
21     NO WARRANTY
22     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26     solely responsible for determining the appropriateness of using and
27     distributing the Program and assumes all risks associated with its
28     exercise of rights under this Agreement, including but not limited to
29     the risks and costs of program errors, damage to or loss of data,
30     programs or equipment, and unavailability or interruption of operations.
31 
32     DISCLAIMER OF LIABILITY
33     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40 
41     You should have received a copy of the GNU General Public License
42     along with this program; if not, write to the Free Software
43     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
44 */
45 
46 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /*
48  * Define statements used for debugging
49  */
50 //#define MPT_LAN_IO_DEBUG
51 
52 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
53 
54 #include "mptlan.h"
55 #include <linux/init.h>
56 #include <linux/module.h>
57 #include <linux/fs.h>
58 
59 #define my_VERSION	MPT_LINUX_VERSION_COMMON
60 #define MYNAM		"mptlan"
61 
62 MODULE_LICENSE("GPL");
63 MODULE_VERSION(my_VERSION);
64 
65 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
66 /*
67  * MPT LAN message sizes without variable part.
68  */
69 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
70 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
71 
72 #define MPT_LAN_TRANSACTION32_SIZE \
73 	(sizeof(SGETransaction32_t) - sizeof(u32))
74 
75 /*
76  *  Fusion MPT LAN private structures
77  */
78 
79 struct NAA_Hosed {
80 	u16 NAA;
81 	u8 ieee[FC_ALEN];
82 	struct NAA_Hosed *next;
83 };
84 
85 struct BufferControl {
86 	struct sk_buff	*skb;
87 	dma_addr_t	dma;
88 	unsigned int	len;
89 };
90 
91 struct mpt_lan_priv {
92 	MPT_ADAPTER *mpt_dev;
93 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
94 
95 	atomic_t buckets_out;		/* number of unused buckets on IOC */
96 	int bucketthresh;		/* Send more when this many left */
97 
98 	int *mpt_txfidx; /* Free Tx Context list */
99 	int mpt_txfidx_tail;
100 	spinlock_t txfidx_lock;
101 
102 	int *mpt_rxfidx; /* Free Rx Context list */
103 	int mpt_rxfidx_tail;
104 	spinlock_t rxfidx_lock;
105 
106 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
107 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
108 
109 	int max_buckets_out;		/* Max buckets to send to IOC */
110 	int tx_max_out;			/* IOC's Tx queue len */
111 
112 	u32 total_posted;
113 	u32 total_received;
114 	struct net_device_stats stats;	/* Per device statistics */
115 
116 	struct delayed_work post_buckets_task;
117 	struct net_device *dev;
118 	unsigned long post_buckets_active;
119 };
120 
121 struct mpt_lan_ohdr {
122 	u16	dtype;
123 	u8	daddr[FC_ALEN];
124 	u16	stype;
125 	u8	saddr[FC_ALEN];
126 };
127 
128 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
129 
130 /*
131  *  Forward protos...
132  */
133 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
134 		       MPT_FRAME_HDR *reply);
135 static int  mpt_lan_open(struct net_device *dev);
136 static int  mpt_lan_reset(struct net_device *dev);
137 static int  mpt_lan_close(struct net_device *dev);
138 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
139 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
140 					   int priority);
141 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
142 static int  mpt_lan_receive_post_reply(struct net_device *dev,
143 				       LANReceivePostReply_t *pRecvRep);
144 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
145 static int  mpt_lan_send_reply(struct net_device *dev,
146 			       LANSendReply_t *pSendRep);
147 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
148 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
149 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
150 					 struct net_device *dev);
151 
152 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
153 /*
154  *  Fusion MPT LAN private data
155  */
156 static int LanCtx = -1;
157 
158 static u32 max_buckets_out = 127;
159 static u32 tx_max_out_p = 127 - 16;
160 
161 #ifdef QLOGIC_NAA_WORKAROUND
162 static struct NAA_Hosed *mpt_bad_naa = NULL;
163 DEFINE_RWLOCK(bad_naa_lock);
164 #endif
165 
166 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
167 /*
168  * Fusion MPT LAN external data
169  */
170 extern int mpt_lan_index;
171 
172 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
173 /**
174  *	lan_reply - Handle all data sent from the hardware.
175  *	@ioc: Pointer to MPT_ADAPTER structure
176  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
177  *	@reply: Pointer to MPT reply frame
178  *
179  *	Returns 1 indicating original alloc'd request frame ptr
180  *	should be freed, or 0 if it shouldn't.
181  */
182 static int
183 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
184 {
185 	struct net_device *dev = ioc->netdev;
186 	int FreeReqFrame = 0;
187 
188 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
189 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
190 
191 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
192 //			mf, reply));
193 
194 	if (mf == NULL) {
195 		u32 tmsg = CAST_PTR_TO_U32(reply);
196 
197 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
198 				IOC_AND_NETDEV_NAMES_s_s(dev),
199 				tmsg));
200 
201 		switch (GET_LAN_FORM(tmsg)) {
202 
203 		// NOTE!  (Optimization) First case here is now caught in
204 		//  mptbase.c::mpt_interrupt() routine and callcack here
205 		//  is now skipped for this case!
206 #if 0
207 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
208 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
209 //				  "MessageContext turbo reply received\n"));
210 			FreeReqFrame = 1;
211 			break;
212 #endif
213 
214 		case LAN_REPLY_FORM_SEND_SINGLE:
215 //			dioprintk((MYNAM "/lan_reply: "
216 //				  "calling mpt_lan_send_reply (turbo)\n"));
217 
218 			// Potential BUG here?
219 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
220 			//  If/when mpt_lan_send_turbo would return 1 here,
221 			//  calling routine (mptbase.c|mpt_interrupt)
222 			//  would Oops because mf has already been set
223 			//  to NULL.  So after return from this func,
224 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
225 			//  item back onto its adapter FreeQ - Oops!:-(
226 			//  It's Ok, since mpt_lan_send_turbo() *currently*
227 			//  always returns 0, but..., just in case:
228 
229 			(void) mpt_lan_send_turbo(dev, tmsg);
230 			FreeReqFrame = 0;
231 
232 			break;
233 
234 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
235 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
236 //				  "rcv-Turbo = %08x\n", tmsg));
237 			mpt_lan_receive_post_turbo(dev, tmsg);
238 			break;
239 
240 		default:
241 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
242 				"that I don't know what to do with\n");
243 
244 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
245 
246 			break;
247 		}
248 
249 		return FreeReqFrame;
250 	}
251 
252 //	msg = (u32 *) reply;
253 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
254 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
255 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
256 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
257 //		  reply->u.hdr.Function));
258 
259 	switch (reply->u.hdr.Function) {
260 
261 	case MPI_FUNCTION_LAN_SEND:
262 	{
263 		LANSendReply_t *pSendRep;
264 
265 		pSendRep = (LANSendReply_t *) reply;
266 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
267 		break;
268 	}
269 
270 	case MPI_FUNCTION_LAN_RECEIVE:
271 	{
272 		LANReceivePostReply_t *pRecvRep;
273 
274 		pRecvRep = (LANReceivePostReply_t *) reply;
275 		if (pRecvRep->NumberOfContexts) {
276 			mpt_lan_receive_post_reply(dev, pRecvRep);
277 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
278 				FreeReqFrame = 1;
279 		} else
280 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
281 				  "ReceivePostReply received.\n"));
282 		break;
283 	}
284 
285 	case MPI_FUNCTION_LAN_RESET:
286 		/* Just a default reply. Might want to check it to
287 		 * make sure that everything went ok.
288 		 */
289 		FreeReqFrame = 1;
290 		break;
291 
292 	case MPI_FUNCTION_EVENT_NOTIFICATION:
293 	case MPI_FUNCTION_EVENT_ACK:
294 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
295 		 *  Should be routed to mpt_lan_event_process(), but just in case...
296 		 */
297 		FreeReqFrame = 1;
298 		break;
299 
300 	default:
301 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
302 			"reply that I don't know what to do with\n");
303 
304 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
305 		FreeReqFrame = 1;
306 
307 		break;
308 	}
309 
310 	return FreeReqFrame;
311 }
312 
313 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
314 static int
315 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
316 {
317 	struct net_device *dev = ioc->netdev;
318 	struct mpt_lan_priv *priv;
319 
320 	if (dev == NULL)
321 		return(1);
322 	else
323 		priv = netdev_priv(dev);
324 
325 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
326 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
327 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
328 
329 	if (priv->mpt_rxfidx == NULL)
330 		return (1);
331 
332 	if (reset_phase == MPT_IOC_SETUP_RESET) {
333 		;
334 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
335 		int i;
336 		unsigned long flags;
337 
338 		netif_stop_queue(dev);
339 
340 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
341 
342 		atomic_set(&priv->buckets_out, 0);
343 
344 		/* Reset Rx Free Tail index and re-populate the queue. */
345 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
346 		priv->mpt_rxfidx_tail = -1;
347 		for (i = 0; i < priv->max_buckets_out; i++)
348 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
349 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
350 	} else {
351 		mpt_lan_post_receive_buckets(priv);
352 		netif_wake_queue(dev);
353 	}
354 
355 	return 1;
356 }
357 
358 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
359 static int
360 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
361 {
362 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
363 
364 	switch (le32_to_cpu(pEvReply->Event)) {
365 	case MPI_EVENT_NONE:				/* 00 */
366 	case MPI_EVENT_LOG_DATA:			/* 01 */
367 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
368 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
369 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
370 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
371 	case MPI_EVENT_RESCAN:				/* 06 */
372 		/* Ok, do we need to do anything here? As far as
373 		   I can tell, this is when a new device gets added
374 		   to the loop. */
375 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
376 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
377 	case MPI_EVENT_LOGOUT:				/* 09 */
378 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
379 	default:
380 		break;
381 	}
382 
383 	/*
384 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
385 	 *  Do NOT do it here now!
386 	 */
387 
388 	return 1;
389 }
390 
391 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
392 static int
393 mpt_lan_open(struct net_device *dev)
394 {
395 	struct mpt_lan_priv *priv = netdev_priv(dev);
396 	int i;
397 
398 	if (mpt_lan_reset(dev) != 0) {
399 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
400 
401 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
402 
403 		if (mpt_dev->active)
404 			printk ("The ioc is active. Perhaps it needs to be"
405 				" reset?\n");
406 		else
407 			printk ("The ioc in inactive, most likely in the "
408 				"process of being reset. Please try again in "
409 				"a moment.\n");
410 	}
411 
412 	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
413 	if (priv->mpt_txfidx == NULL)
414 		goto out;
415 	priv->mpt_txfidx_tail = -1;
416 
417 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
418 				GFP_KERNEL);
419 	if (priv->SendCtl == NULL)
420 		goto out_mpt_txfidx;
421 	for (i = 0; i < priv->tx_max_out; i++)
422 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
423 
424 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
425 
426 	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
427 				   GFP_KERNEL);
428 	if (priv->mpt_rxfidx == NULL)
429 		goto out_SendCtl;
430 	priv->mpt_rxfidx_tail = -1;
431 
432 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
433 			       sizeof(struct BufferControl),
434 			       GFP_KERNEL);
435 	if (priv->RcvCtl == NULL)
436 		goto out_mpt_rxfidx;
437 	for (i = 0; i < priv->max_buckets_out; i++)
438 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
439 
440 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
441 /**/	for (i = 0; i < priv->tx_max_out; i++)
442 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
443 /**/	dlprintk(("\n"));
444 
445 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
446 
447 	mpt_lan_post_receive_buckets(priv);
448 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
449 			IOC_AND_NETDEV_NAMES_s_s(dev));
450 
451 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
452 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
453 			" Notifications. This is a bad thing! We're not going "
454 			"to go ahead, but I'd be leery of system stability at "
455 			"this point.\n");
456 	}
457 
458 	netif_start_queue(dev);
459 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
460 
461 	return 0;
462 out_mpt_rxfidx:
463 	kfree(priv->mpt_rxfidx);
464 	priv->mpt_rxfidx = NULL;
465 out_SendCtl:
466 	kfree(priv->SendCtl);
467 	priv->SendCtl = NULL;
468 out_mpt_txfidx:
469 	kfree(priv->mpt_txfidx);
470 	priv->mpt_txfidx = NULL;
471 out:	return -ENOMEM;
472 }
473 
474 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
475 /* Send a LanReset message to the FW. This should result in the FW returning
476    any buckets it still has. */
477 static int
478 mpt_lan_reset(struct net_device *dev)
479 {
480 	MPT_FRAME_HDR *mf;
481 	LANResetRequest_t *pResetReq;
482 	struct mpt_lan_priv *priv = netdev_priv(dev);
483 
484 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
485 
486 	if (mf == NULL) {
487 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
488 		"Unable to allocate a request frame.\n"));
489 */
490 		return -1;
491 	}
492 
493 	pResetReq = (LANResetRequest_t *) mf;
494 
495 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
496 	pResetReq->ChainOffset	= 0;
497 	pResetReq->Reserved	= 0;
498 	pResetReq->PortNumber	= priv->pnum;
499 	pResetReq->MsgFlags	= 0;
500 	pResetReq->Reserved2	= 0;
501 
502 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
503 
504 	return 0;
505 }
506 
507 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
508 static int
509 mpt_lan_close(struct net_device *dev)
510 {
511 	struct mpt_lan_priv *priv = netdev_priv(dev);
512 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
513 	unsigned long timeout;
514 	int i;
515 
516 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
517 
518 	mpt_event_deregister(LanCtx);
519 
520 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
521 		  "since driver was loaded, %d still out\n",
522 		  priv->total_posted,atomic_read(&priv->buckets_out)));
523 
524 	netif_stop_queue(dev);
525 
526 	mpt_lan_reset(dev);
527 
528 	timeout = jiffies + 2 * HZ;
529 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
530 		schedule_timeout_interruptible(1);
531 
532 	for (i = 0; i < priv->max_buckets_out; i++) {
533 		if (priv->RcvCtl[i].skb != NULL) {
534 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
535 /**/				  "is still out\n", i));
536 			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
537 					 priv->RcvCtl[i].len,
538 					 PCI_DMA_FROMDEVICE);
539 			dev_kfree_skb(priv->RcvCtl[i].skb);
540 		}
541 	}
542 
543 	kfree(priv->RcvCtl);
544 	kfree(priv->mpt_rxfidx);
545 
546 	for (i = 0; i < priv->tx_max_out; i++) {
547 		if (priv->SendCtl[i].skb != NULL) {
548 			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
549 					 priv->SendCtl[i].len,
550 					 PCI_DMA_TODEVICE);
551 			dev_kfree_skb(priv->SendCtl[i].skb);
552 		}
553 	}
554 
555 	kfree(priv->SendCtl);
556 	kfree(priv->mpt_txfidx);
557 
558 	atomic_set(&priv->buckets_out, 0);
559 
560 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
561 			IOC_AND_NETDEV_NAMES_s_s(dev));
562 
563 	return 0;
564 }
565 
566 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
567 static struct net_device_stats *
568 mpt_lan_get_stats(struct net_device *dev)
569 {
570 	struct mpt_lan_priv *priv = netdev_priv(dev);
571 
572 	return (struct net_device_stats *) &priv->stats;
573 }
574 
575 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
576 static int
577 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
578 {
579 	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
580 		return -EINVAL;
581 	dev->mtu = new_mtu;
582 	return 0;
583 }
584 
585 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
586 /* Tx timeout handler. */
587 static void
588 mpt_lan_tx_timeout(struct net_device *dev)
589 {
590 	struct mpt_lan_priv *priv = netdev_priv(dev);
591 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
592 
593 	if (mpt_dev->active) {
594 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
595 		netif_wake_queue(dev);
596 	}
597 }
598 
599 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
600 //static inline int
601 static int
602 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
603 {
604 	struct mpt_lan_priv *priv = netdev_priv(dev);
605 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
606 	struct sk_buff *sent;
607 	unsigned long flags;
608 	u32 ctx;
609 
610 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
611 	sent = priv->SendCtl[ctx].skb;
612 
613 	priv->stats.tx_packets++;
614 	priv->stats.tx_bytes += sent->len;
615 
616 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
617 			IOC_AND_NETDEV_NAMES_s_s(dev),
618 			__FUNCTION__, sent));
619 
620 	priv->SendCtl[ctx].skb = NULL;
621 	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
622 			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
623 	dev_kfree_skb_irq(sent);
624 
625 	spin_lock_irqsave(&priv->txfidx_lock, flags);
626 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
627 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
628 
629 	netif_wake_queue(dev);
630 	return 0;
631 }
632 
633 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
634 static int
635 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
636 {
637 	struct mpt_lan_priv *priv = netdev_priv(dev);
638 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
639 	struct sk_buff *sent;
640 	unsigned long flags;
641 	int FreeReqFrame = 0;
642 	u32 *pContext;
643 	u32 ctx;
644 	u8 count;
645 
646 	count = pSendRep->NumberOfContexts;
647 
648 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
649 		 le16_to_cpu(pSendRep->IOCStatus)));
650 
651 	/* Add check for Loginfo Flag in IOCStatus */
652 
653 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
654 	case MPI_IOCSTATUS_SUCCESS:
655 		priv->stats.tx_packets += count;
656 		break;
657 
658 	case MPI_IOCSTATUS_LAN_CANCELED:
659 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
660 		break;
661 
662 	case MPI_IOCSTATUS_INVALID_SGL:
663 		priv->stats.tx_errors += count;
664 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
665 				IOC_AND_NETDEV_NAMES_s_s(dev));
666 		goto out;
667 
668 	default:
669 		priv->stats.tx_errors += count;
670 		break;
671 	}
672 
673 	pContext = &pSendRep->BufferContext;
674 
675 	spin_lock_irqsave(&priv->txfidx_lock, flags);
676 	while (count > 0) {
677 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
678 
679 		sent = priv->SendCtl[ctx].skb;
680 		priv->stats.tx_bytes += sent->len;
681 
682 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
683 				IOC_AND_NETDEV_NAMES_s_s(dev),
684 				__FUNCTION__, sent));
685 
686 		priv->SendCtl[ctx].skb = NULL;
687 		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
688 				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
689 		dev_kfree_skb_irq(sent);
690 
691 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
692 
693 		pContext++;
694 		count--;
695 	}
696 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
697 
698 out:
699 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
700 		FreeReqFrame = 1;
701 
702 	netif_wake_queue(dev);
703 	return FreeReqFrame;
704 }
705 
706 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
707 static int
708 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
709 {
710 	struct mpt_lan_priv *priv = netdev_priv(dev);
711 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
712 	MPT_FRAME_HDR *mf;
713 	LANSendRequest_t *pSendReq;
714 	SGETransaction32_t *pTrans;
715 	SGESimple64_t *pSimple;
716 	dma_addr_t dma;
717 	unsigned long flags;
718 	int ctx;
719 	u16 cur_naa = 0x1000;
720 
721 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
722 			__FUNCTION__, skb));
723 
724 	spin_lock_irqsave(&priv->txfidx_lock, flags);
725 	if (priv->mpt_txfidx_tail < 0) {
726 		netif_stop_queue(dev);
727 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
728 
729 		printk (KERN_ERR "%s: no tx context available: %u\n",
730 			__FUNCTION__, priv->mpt_txfidx_tail);
731 		return 1;
732 	}
733 
734 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
735 	if (mf == NULL) {
736 		netif_stop_queue(dev);
737 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
738 
739 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
740 			__FUNCTION__);
741 		return 1;
742 	}
743 
744 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
745 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
746 
747 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
748 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
749 
750 	pSendReq = (LANSendRequest_t *) mf;
751 
752 	/* Set the mac.raw pointer, since this apparently isn't getting
753 	 * done before we get the skb. Pull the data pointer past the mac data.
754 	 */
755 	skb->mac.raw = skb->data;
756 	skb_pull(skb, 12);
757 
758         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
759 			     PCI_DMA_TODEVICE);
760 
761 	priv->SendCtl[ctx].skb = skb;
762 	priv->SendCtl[ctx].dma = dma;
763 	priv->SendCtl[ctx].len = skb->len;
764 
765 	/* Message Header */
766 	pSendReq->Reserved    = 0;
767 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
768 	pSendReq->ChainOffset = 0;
769 	pSendReq->Reserved2   = 0;
770 	pSendReq->MsgFlags    = 0;
771 	pSendReq->PortNumber  = priv->pnum;
772 
773 	/* Transaction Context Element */
774 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
775 
776 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
777 	pTrans->ContextSize   = sizeof(u32);
778 	pTrans->DetailsLength = 2 * sizeof(u32);
779 	pTrans->Flags         = 0;
780 	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
781 
782 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
783 //			IOC_AND_NETDEV_NAMES_s_s(dev),
784 //			ctx, skb, skb->data));
785 
786 #ifdef QLOGIC_NAA_WORKAROUND
787 {
788 	struct NAA_Hosed *nh;
789 
790 	/* Munge the NAA for Tx packets to QLogic boards, which don't follow
791 	   RFC 2625. The longer I look at this, the more my opinion of Qlogic
792 	   drops. */
793 	read_lock_irq(&bad_naa_lock);
794 	for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
795 		if ((nh->ieee[0] == skb->mac.raw[0]) &&
796 		    (nh->ieee[1] == skb->mac.raw[1]) &&
797 		    (nh->ieee[2] == skb->mac.raw[2]) &&
798 		    (nh->ieee[3] == skb->mac.raw[3]) &&
799 		    (nh->ieee[4] == skb->mac.raw[4]) &&
800 		    (nh->ieee[5] == skb->mac.raw[5])) {
801 			cur_naa = nh->NAA;
802 			dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
803 				  "= %04x.\n", cur_naa));
804 			break;
805 		}
806 	}
807 	read_unlock_irq(&bad_naa_lock);
808 }
809 #endif
810 
811 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
812 						    (skb->mac.raw[0] <<  8) |
813 						    (skb->mac.raw[1] <<  0));
814 	pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
815 						    (skb->mac.raw[3] << 16) |
816 						    (skb->mac.raw[4] <<  8) |
817 						    (skb->mac.raw[5] <<  0));
818 
819 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
820 
821 	/* If we ever decide to send more than one Simple SGE per LANSend, then
822 	   we will need to make sure that LAST_ELEMENT only gets set on the
823 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
824 	pSimple->FlagsLength = cpu_to_le32(
825 			((MPI_SGE_FLAGS_LAST_ELEMENT |
826 			  MPI_SGE_FLAGS_END_OF_BUFFER |
827 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
828 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
829 			  MPI_SGE_FLAGS_HOST_TO_IOC |
830 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
831 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
832 			skb->len);
833 	pSimple->Address.Low = cpu_to_le32((u32) dma);
834 	if (sizeof(dma_addr_t) > sizeof(u32))
835 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
836 	else
837 		pSimple->Address.High = 0;
838 
839 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
840 	dev->trans_start = jiffies;
841 
842 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
843 			IOC_AND_NETDEV_NAMES_s_s(dev),
844 			le32_to_cpu(pSimple->FlagsLength)));
845 
846 	return 0;
847 }
848 
849 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
850 static void
851 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
852 /*
853  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
854  */
855 {
856 	struct mpt_lan_priv *priv = dev->priv;
857 
858 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
859 		if (priority) {
860 			schedule_delayed_work(&priv->post_buckets_task, 0);
861 		} else {
862 			schedule_delayed_work(&priv->post_buckets_task, 1);
863 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
864 				   "timer.\n"));
865 		}
866 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
867 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
868 	}
869 }
870 
871 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
872 static int
873 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
874 {
875 	struct mpt_lan_priv *priv = dev->priv;
876 
877 	skb->protocol = mpt_lan_type_trans(skb, dev);
878 
879 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
880 		 "delivered to upper level.\n",
881 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
882 
883 	priv->stats.rx_bytes += skb->len;
884 	priv->stats.rx_packets++;
885 
886 	skb->dev = dev;
887 	netif_rx(skb);
888 
889 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
890 		 atomic_read(&priv->buckets_out)));
891 
892 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
893 		mpt_lan_wake_post_buckets_task(dev, 1);
894 
895 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
896 		  "remaining, %d received back since sod\n",
897 		  atomic_read(&priv->buckets_out), priv->total_received));
898 
899 	return 0;
900 }
901 
902 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
903 //static inline int
904 static int
905 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
906 {
907 	struct mpt_lan_priv *priv = dev->priv;
908 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
909 	struct sk_buff *skb, *old_skb;
910 	unsigned long flags;
911 	u32 ctx, len;
912 
913 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
914 	skb = priv->RcvCtl[ctx].skb;
915 
916 	len = GET_LAN_PACKET_LENGTH(tmsg);
917 
918 	if (len < MPT_LAN_RX_COPYBREAK) {
919 		old_skb = skb;
920 
921 		skb = (struct sk_buff *)dev_alloc_skb(len);
922 		if (!skb) {
923 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
924 					IOC_AND_NETDEV_NAMES_s_s(dev),
925 					__FILE__, __LINE__);
926 			return -ENOMEM;
927 		}
928 
929 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
930 					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
931 
932 		memcpy(skb_put(skb, len), old_skb->data, len);
933 
934 		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
935 					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
936 		goto out;
937 	}
938 
939 	skb_put(skb, len);
940 
941 	priv->RcvCtl[ctx].skb = NULL;
942 
943 	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
944 			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
945 
946 out:
947 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
948 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
949 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
950 
951 	atomic_dec(&priv->buckets_out);
952 	priv->total_received++;
953 
954 	return mpt_lan_receive_skb(dev, skb);
955 }
956 
957 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
958 static int
959 mpt_lan_receive_post_free(struct net_device *dev,
960 			  LANReceivePostReply_t *pRecvRep)
961 {
962 	struct mpt_lan_priv *priv = dev->priv;
963 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
964 	unsigned long flags;
965 	struct sk_buff *skb;
966 	u32 ctx;
967 	int count;
968 	int i;
969 
970 	count = pRecvRep->NumberOfContexts;
971 
972 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
973 		  "IOC returned %d buckets, freeing them...\n", count));
974 
975 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
976 	for (i = 0; i < count; i++) {
977 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
978 
979 		skb = priv->RcvCtl[ctx].skb;
980 
981 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
982 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
983 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
984 //				priv, &(priv->buckets_out)));
985 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
986 
987 		priv->RcvCtl[ctx].skb = NULL;
988 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
989 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
990 		dev_kfree_skb_any(skb);
991 
992 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
993 	}
994 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
995 
996 	atomic_sub(count, &priv->buckets_out);
997 
998 //	for (i = 0; i < priv->max_buckets_out; i++)
999 //		if (priv->RcvCtl[i].skb != NULL)
1000 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1001 //				  "is still out\n", i));
1002 
1003 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1004 		  count));
1005 */
1006 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1007 /**/		  "remaining, %d received back since sod.\n",
1008 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
1009 	return 0;
1010 }
1011 
1012 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1013 static int
1014 mpt_lan_receive_post_reply(struct net_device *dev,
1015 			   LANReceivePostReply_t *pRecvRep)
1016 {
1017 	struct mpt_lan_priv *priv = dev->priv;
1018 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1019 	struct sk_buff *skb, *old_skb;
1020 	unsigned long flags;
1021 	u32 len, ctx, offset;
1022 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1023 	int count;
1024 	int i, l;
1025 
1026 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1027 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1028 		 le16_to_cpu(pRecvRep->IOCStatus)));
1029 
1030 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1031 						MPI_IOCSTATUS_LAN_CANCELED)
1032 		return mpt_lan_receive_post_free(dev, pRecvRep);
1033 
1034 	len = le32_to_cpu(pRecvRep->PacketLength);
1035 	if (len == 0) {
1036 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1037 			"ReceivePostReply w/ PacketLength zero!\n",
1038 				IOC_AND_NETDEV_NAMES_s_s(dev));
1039 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1040 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1041 		return -1;
1042 	}
1043 
1044 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1045 	count  = pRecvRep->NumberOfContexts;
1046 	skb    = priv->RcvCtl[ctx].skb;
1047 
1048 	offset = le32_to_cpu(pRecvRep->PacketOffset);
1049 //	if (offset != 0) {
1050 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1051 //			"w/ PacketOffset %u\n",
1052 //				IOC_AND_NETDEV_NAMES_s_s(dev),
1053 //				offset);
1054 //	}
1055 
1056 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1057 			IOC_AND_NETDEV_NAMES_s_s(dev),
1058 			offset, len));
1059 
1060 	if (count > 1) {
1061 		int szrem = len;
1062 
1063 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1064 //			"for single packet, concatenating...\n",
1065 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1066 
1067 		skb = (struct sk_buff *)dev_alloc_skb(len);
1068 		if (!skb) {
1069 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1070 					IOC_AND_NETDEV_NAMES_s_s(dev),
1071 					__FILE__, __LINE__);
1072 			return -ENOMEM;
1073 		}
1074 
1075 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1076 		for (i = 0; i < count; i++) {
1077 
1078 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1079 			old_skb = priv->RcvCtl[ctx].skb;
1080 
1081 			l = priv->RcvCtl[ctx].len;
1082 			if (szrem < l)
1083 				l = szrem;
1084 
1085 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1086 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1087 //					i, l));
1088 
1089 			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1090 						    priv->RcvCtl[ctx].dma,
1091 						    priv->RcvCtl[ctx].len,
1092 						    PCI_DMA_FROMDEVICE);
1093 			memcpy(skb_put(skb, l), old_skb->data, l);
1094 
1095 			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1096 						       priv->RcvCtl[ctx].dma,
1097 						       priv->RcvCtl[ctx].len,
1098 						       PCI_DMA_FROMDEVICE);
1099 
1100 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1101 			szrem -= l;
1102 		}
1103 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1104 
1105 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1106 
1107 		old_skb = skb;
1108 
1109 		skb = (struct sk_buff *)dev_alloc_skb(len);
1110 		if (!skb) {
1111 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1112 					IOC_AND_NETDEV_NAMES_s_s(dev),
1113 					__FILE__, __LINE__);
1114 			return -ENOMEM;
1115 		}
1116 
1117 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1118 					    priv->RcvCtl[ctx].dma,
1119 					    priv->RcvCtl[ctx].len,
1120 					    PCI_DMA_FROMDEVICE);
1121 
1122 		memcpy(skb_put(skb, len), old_skb->data, len);
1123 
1124 		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1125 					       priv->RcvCtl[ctx].dma,
1126 					       priv->RcvCtl[ctx].len,
1127 					       PCI_DMA_FROMDEVICE);
1128 
1129 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1130 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1131 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1132 
1133 	} else {
1134 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1135 
1136 		priv->RcvCtl[ctx].skb = NULL;
1137 
1138 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1139 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1140 		priv->RcvCtl[ctx].dma = 0;
1141 
1142 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1143 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1144 
1145 		skb_put(skb,len);
1146 	}
1147 
1148 	atomic_sub(count, &priv->buckets_out);
1149 	priv->total_received += count;
1150 
1151 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1152 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1153 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1154 				IOC_AND_NETDEV_NAMES_s_s(dev),
1155 				priv->mpt_rxfidx_tail,
1156 				MPT_LAN_MAX_BUCKETS_OUT);
1157 
1158 		return -1;
1159 	}
1160 
1161 	if (remaining == 0)
1162 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1163 			"(priv->buckets_out = %d)\n",
1164 			IOC_AND_NETDEV_NAMES_s_s(dev),
1165 			atomic_read(&priv->buckets_out));
1166 	else if (remaining < 10)
1167 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1168 			"(priv->buckets_out = %d)\n",
1169 			IOC_AND_NETDEV_NAMES_s_s(dev),
1170 			remaining, atomic_read(&priv->buckets_out));
1171 
1172 	if ((remaining < priv->bucketthresh) &&
1173 	    ((atomic_read(&priv->buckets_out) - remaining) >
1174 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1175 
1176 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1177 			"buckets_out count and fw's BucketsRemaining "
1178 			"count has crossed the threshold, issuing a "
1179 			"LanReset to clear the fw's hashtable. You may "
1180 			"want to check your /var/log/messages for \"CRC "
1181 			"error\" event notifications.\n");
1182 
1183 		mpt_lan_reset(dev);
1184 		mpt_lan_wake_post_buckets_task(dev, 0);
1185 	}
1186 
1187 	return mpt_lan_receive_skb(dev, skb);
1188 }
1189 
1190 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1191 /* Simple SGE's only at the moment */
1192 
1193 static void
1194 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1195 {
1196 	struct net_device *dev = priv->dev;
1197 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1198 	MPT_FRAME_HDR *mf;
1199 	LANReceivePostRequest_t *pRecvReq;
1200 	SGETransaction32_t *pTrans;
1201 	SGESimple64_t *pSimple;
1202 	struct sk_buff *skb;
1203 	dma_addr_t dma;
1204 	u32 curr, buckets, count, max;
1205 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1206 	unsigned long flags;
1207 	int i;
1208 
1209 	curr = atomic_read(&priv->buckets_out);
1210 	buckets = (priv->max_buckets_out - curr);
1211 
1212 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1213 			IOC_AND_NETDEV_NAMES_s_s(dev),
1214 			__FUNCTION__, buckets, curr));
1215 
1216 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1217 			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1218 
1219 	while (buckets) {
1220 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1221 		if (mf == NULL) {
1222 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1223 				__FUNCTION__);
1224 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1225 				 __FUNCTION__, buckets));
1226 			goto out;
1227 		}
1228 		pRecvReq = (LANReceivePostRequest_t *) mf;
1229 
1230 		count = buckets;
1231 		if (count > max)
1232 			count = max;
1233 
1234 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1235 		pRecvReq->ChainOffset = 0;
1236 		pRecvReq->MsgFlags    = 0;
1237 		pRecvReq->PortNumber  = priv->pnum;
1238 
1239 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1240 		pSimple = NULL;
1241 
1242 		for (i = 0; i < count; i++) {
1243 			int ctx;
1244 
1245 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1246 			if (priv->mpt_rxfidx_tail < 0) {
1247 				printk (KERN_ERR "%s: Can't alloc context\n",
1248 					__FUNCTION__);
1249 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1250 						       flags);
1251 				break;
1252 			}
1253 
1254 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1255 
1256 			skb = priv->RcvCtl[ctx].skb;
1257 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1258 				pci_unmap_single(mpt_dev->pcidev,
1259 						 priv->RcvCtl[ctx].dma,
1260 						 priv->RcvCtl[ctx].len,
1261 						 PCI_DMA_FROMDEVICE);
1262 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1263 				skb = priv->RcvCtl[ctx].skb = NULL;
1264 			}
1265 
1266 			if (skb == NULL) {
1267 				skb = dev_alloc_skb(len);
1268 				if (skb == NULL) {
1269 					printk (KERN_WARNING
1270 						MYNAM "/%s: Can't alloc skb\n",
1271 						__FUNCTION__);
1272 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1273 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1274 					break;
1275 				}
1276 
1277 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1278 						     len, PCI_DMA_FROMDEVICE);
1279 
1280 				priv->RcvCtl[ctx].skb = skb;
1281 				priv->RcvCtl[ctx].dma = dma;
1282 				priv->RcvCtl[ctx].len = len;
1283 			}
1284 
1285 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1286 
1287 			pTrans->ContextSize   = sizeof(u32);
1288 			pTrans->DetailsLength = 0;
1289 			pTrans->Flags         = 0;
1290 			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1291 
1292 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1293 
1294 			pSimple->FlagsLength = cpu_to_le32(
1295 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1296 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1297 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1298 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1299 			if (sizeof(dma_addr_t) > sizeof(u32))
1300 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1301 			else
1302 				pSimple->Address.High = 0;
1303 
1304 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1305 		}
1306 
1307 		if (pSimple == NULL) {
1308 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1309 /**/				__FUNCTION__);
1310 			mpt_free_msg_frame(mpt_dev, mf);
1311 			goto out;
1312 		}
1313 
1314 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1315 
1316 		pRecvReq->BucketCount = cpu_to_le32(i);
1317 
1318 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1319  *	for (i = 0; i < j + 2; i ++)
1320  *	    printk (" %08x", le32_to_cpu(msg[i]));
1321  *	printk ("\n");
1322  */
1323 
1324 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1325 
1326 		priv->total_posted += i;
1327 		buckets -= i;
1328 		atomic_add(i, &priv->buckets_out);
1329 	}
1330 
1331 out:
1332 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1333 		  __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1334 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1335 	__FUNCTION__, priv->total_posted, priv->total_received));
1336 
1337 	clear_bit(0, &priv->post_buckets_active);
1338 }
1339 
1340 static void
1341 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1342 {
1343 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1344 						  post_buckets_task.work));
1345 }
1346 
1347 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1348 static struct net_device *
1349 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1350 {
1351 	struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1352 	struct mpt_lan_priv *priv = NULL;
1353 	u8 HWaddr[FC_ALEN], *a;
1354 
1355 	if (!dev)
1356 		return NULL;
1357 
1358 	dev->mtu = MPT_LAN_MTU;
1359 
1360 	priv = netdev_priv(dev);
1361 
1362 	priv->dev = dev;
1363 	priv->mpt_dev = mpt_dev;
1364 	priv->pnum = pnum;
1365 
1366 	memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
1367 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1368 			  mpt_lan_post_receive_buckets_work);
1369 	priv->post_buckets_active = 0;
1370 
1371 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1372 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1373 
1374 	atomic_set(&priv->buckets_out, 0);
1375 	priv->total_posted = 0;
1376 	priv->total_received = 0;
1377 	priv->max_buckets_out = max_buckets_out;
1378 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1379 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1380 
1381 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1382 			__LINE__,
1383 			mpt_dev->pfacts[0].MaxLanBuckets,
1384 			max_buckets_out,
1385 			priv->max_buckets_out));
1386 
1387 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1388 	spin_lock_init(&priv->txfidx_lock);
1389 	spin_lock_init(&priv->rxfidx_lock);
1390 
1391 	memset(&priv->stats, 0, sizeof(priv->stats));
1392 
1393 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1394 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1395 
1396 	HWaddr[0] = a[5];
1397 	HWaddr[1] = a[4];
1398 	HWaddr[2] = a[3];
1399 	HWaddr[3] = a[2];
1400 	HWaddr[4] = a[1];
1401 	HWaddr[5] = a[0];
1402 
1403 	dev->addr_len = FC_ALEN;
1404 	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1405 	memset(dev->broadcast, 0xff, FC_ALEN);
1406 
1407 	/* The Tx queue is 127 deep on the 909.
1408 	 * Give ourselves some breathing room.
1409 	 */
1410 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1411 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1412 
1413 	dev->open = mpt_lan_open;
1414 	dev->stop = mpt_lan_close;
1415 	dev->get_stats = mpt_lan_get_stats;
1416 	dev->set_multicast_list = NULL;
1417 	dev->change_mtu = mpt_lan_change_mtu;
1418 	dev->hard_start_xmit = mpt_lan_sdu_send;
1419 
1420 /* Not in 2.3.42. Need 2.3.45+ */
1421 	dev->tx_timeout = mpt_lan_tx_timeout;
1422 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1423 
1424 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1425 		"and setting initial values\n"));
1426 
1427 	SET_MODULE_OWNER(dev);
1428 
1429 	if (register_netdev(dev) != 0) {
1430 		free_netdev(dev);
1431 		dev = NULL;
1432 	}
1433 	return dev;
1434 }
1435 
1436 static int
1437 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1438 {
1439 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1440 	struct net_device	*dev;
1441 	int			i;
1442 
1443 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1444 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1445 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1446 		       ioc->name, ioc->pfacts[i].PortNumber,
1447 		       ioc->pfacts[i].ProtocolFlags,
1448 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1449 			       ioc->pfacts[i].ProtocolFlags));
1450 
1451 		if (!(ioc->pfacts[i].ProtocolFlags &
1452 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1453 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1454 			       "seems to be disabled on this adapter port!\n",
1455 			       ioc->name);
1456 			continue;
1457 		}
1458 
1459 		dev = mpt_register_lan_device(ioc, i);
1460 		if (!dev) {
1461 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1462 			       "port%d as a LAN device\n", ioc->name,
1463 			       ioc->pfacts[i].PortNumber);
1464 			continue;
1465 		}
1466 
1467 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1468 		       "registered as '%s'\n", ioc->name, dev->name);
1469 		printk(KERN_INFO MYNAM ": %s/%s: "
1470 		       "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1471 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1472 		       dev->dev_addr[0], dev->dev_addr[1],
1473 		       dev->dev_addr[2], dev->dev_addr[3],
1474 		       dev->dev_addr[4], dev->dev_addr[5]);
1475 
1476 		ioc->netdev = dev;
1477 
1478 		return 0;
1479 	}
1480 
1481 	return -ENODEV;
1482 }
1483 
1484 static void
1485 mptlan_remove(struct pci_dev *pdev)
1486 {
1487 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1488 	struct net_device	*dev = ioc->netdev;
1489 
1490 	if(dev != NULL) {
1491 		unregister_netdev(dev);
1492 		free_netdev(dev);
1493 	}
1494 }
1495 
1496 static struct mpt_pci_driver mptlan_driver = {
1497 	.probe		= mptlan_probe,
1498 	.remove		= mptlan_remove,
1499 };
1500 
1501 static int __init mpt_lan_init (void)
1502 {
1503 	show_mptmod_ver(LANAME, LANVER);
1504 
1505 	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1506 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1507 		return -EBUSY;
1508 	}
1509 
1510 	/* Set the callback index to be used by driver core for turbo replies */
1511 	mpt_lan_index = LanCtx;
1512 
1513 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1514 
1515 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1516 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1517 		       "handler with mptbase! The world is at an end! "
1518 		       "Everything is fading to black! Goodbye.\n");
1519 		return -EBUSY;
1520 	}
1521 
1522 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1523 
1524 	if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
1525 		dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
1526 	return 0;
1527 }
1528 
1529 static void __exit mpt_lan_exit(void)
1530 {
1531 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1532 	mpt_reset_deregister(LanCtx);
1533 
1534 	if (LanCtx >= 0) {
1535 		mpt_deregister(LanCtx);
1536 		LanCtx = -1;
1537 		mpt_lan_index = 0;
1538 	}
1539 }
1540 
1541 module_init(mpt_lan_init);
1542 module_exit(mpt_lan_exit);
1543 
1544 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1545 static unsigned short
1546 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1547 {
1548 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1549 	struct fcllc *fcllc;
1550 
1551 	skb->mac.raw = skb->data;
1552 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1553 
1554 	if (fch->dtype == htons(0xffff)) {
1555 		u32 *p = (u32 *) fch;
1556 
1557 		swab32s(p + 0);
1558 		swab32s(p + 1);
1559 		swab32s(p + 2);
1560 		swab32s(p + 3);
1561 
1562 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1563 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1564 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1565 				fch->saddr[0], fch->saddr[1], fch->saddr[2],
1566 				fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1567 	}
1568 
1569 	if (*fch->daddr & 1) {
1570 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1571 			skb->pkt_type = PACKET_BROADCAST;
1572 		} else {
1573 			skb->pkt_type = PACKET_MULTICAST;
1574 		}
1575 	} else {
1576 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1577 			skb->pkt_type = PACKET_OTHERHOST;
1578 		} else {
1579 			skb->pkt_type = PACKET_HOST;
1580 		}
1581 	}
1582 
1583 	fcllc = (struct fcllc *)skb->data;
1584 
1585 #ifdef QLOGIC_NAA_WORKAROUND
1586 {
1587 	u16 source_naa = fch->stype, found = 0;
1588 
1589 	/* Workaround for QLogic not following RFC 2625 in regards to the NAA
1590 	   value. */
1591 
1592 	if ((source_naa & 0xF000) == 0)
1593 		source_naa = swab16(source_naa);
1594 
1595 	if (fcllc->ethertype == htons(ETH_P_ARP))
1596 	    dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1597 		      "%04x.\n", source_naa));
1598 
1599 	if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1600 	   ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1601 		struct NAA_Hosed *nh, *prevnh;
1602 		int i;
1603 
1604 		dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1605 			  "system with non-RFC 2625 NAA value (%04x).\n",
1606 			  source_naa));
1607 
1608 		write_lock_irq(&bad_naa_lock);
1609 		for (prevnh = nh = mpt_bad_naa; nh != NULL;
1610 		     prevnh=nh, nh=nh->next) {
1611 			if ((nh->ieee[0] == fch->saddr[0]) &&
1612 			    (nh->ieee[1] == fch->saddr[1]) &&
1613 			    (nh->ieee[2] == fch->saddr[2]) &&
1614 			    (nh->ieee[3] == fch->saddr[3]) &&
1615 			    (nh->ieee[4] == fch->saddr[4]) &&
1616 			    (nh->ieee[5] == fch->saddr[5])) {
1617 				found = 1;
1618 				dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1619 					 "q/Rep w/ bad NAA from system already"
1620 					 " in DB.\n"));
1621 				break;
1622 			}
1623 		}
1624 
1625 		if ((!found) && (nh == NULL)) {
1626 
1627 			nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1628 			dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1629 				 " bad NAA from system not yet in DB.\n"));
1630 
1631 			if (nh != NULL) {
1632 				nh->next = NULL;
1633 				if (!mpt_bad_naa)
1634 					mpt_bad_naa = nh;
1635 				if (prevnh)
1636 					prevnh->next = nh;
1637 
1638 				nh->NAA = source_naa; /* Set the S_NAA value. */
1639 				for (i = 0; i < FC_ALEN; i++)
1640 					nh->ieee[i] = fch->saddr[i];
1641 				dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1642 					  "%02x:%02x with non-compliant S_NAA value.\n",
1643 					  fch->saddr[0], fch->saddr[1], fch->saddr[2],
1644 					  fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1645 			} else {
1646 				printk (KERN_ERR "mptlan/type_trans: Unable to"
1647 					" kmalloc a NAA_Hosed struct.\n");
1648 			}
1649 		} else if (!found) {
1650 			printk (KERN_ERR "mptlan/type_trans: found not"
1651 				" set, but nh isn't null. Evil "
1652 				"funkiness abounds.\n");
1653 		}
1654 		write_unlock_irq(&bad_naa_lock);
1655 	}
1656 }
1657 #endif
1658 
1659 	/* Strip the SNAP header from ARP packets since we don't
1660 	 * pass them through to the 802.2/SNAP layers.
1661 	 */
1662 	if (fcllc->dsap == EXTENDED_SAP &&
1663 		(fcllc->ethertype == htons(ETH_P_IP) ||
1664 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1665 		skb_pull(skb, sizeof(struct fcllc));
1666 		return fcllc->ethertype;
1667 	}
1668 
1669 	return htons(ETH_P_802_2);
1670 }
1671 
1672 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1673