xref: /linux/drivers/message/fusion/mptlan.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
61 
62 #define my_VERSION	MPT_LINUX_VERSION_COMMON
63 #define MYNAM		"mptlan"
64 
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(my_VERSION);
67 
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * MPT LAN message sizes without variable part.
71  */
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74 
75 /*
76  *  Fusion MPT LAN private structures
77  */
78 
79 struct BufferControl {
80 	struct sk_buff	*skb;
81 	dma_addr_t	dma;
82 	unsigned int	len;
83 };
84 
85 struct mpt_lan_priv {
86 	MPT_ADAPTER *mpt_dev;
87 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
88 
89 	atomic_t buckets_out;		/* number of unused buckets on IOC */
90 	int bucketthresh;		/* Send more when this many left */
91 
92 	int *mpt_txfidx; /* Free Tx Context list */
93 	int mpt_txfidx_tail;
94 	spinlock_t txfidx_lock;
95 
96 	int *mpt_rxfidx; /* Free Rx Context list */
97 	int mpt_rxfidx_tail;
98 	spinlock_t rxfidx_lock;
99 
100 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
101 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
102 
103 	int max_buckets_out;		/* Max buckets to send to IOC */
104 	int tx_max_out;			/* IOC's Tx queue len */
105 
106 	u32 total_posted;
107 	u32 total_received;
108 
109 	struct delayed_work post_buckets_task;
110 	struct net_device *dev;
111 	unsigned long post_buckets_active;
112 };
113 
114 struct mpt_lan_ohdr {
115 	u16	dtype;
116 	u8	daddr[FC_ALEN];
117 	u16	stype;
118 	u8	saddr[FC_ALEN];
119 };
120 
121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
122 
123 /*
124  *  Forward protos...
125  */
126 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
127 		       MPT_FRAME_HDR *reply);
128 static int  mpt_lan_open(struct net_device *dev);
129 static int  mpt_lan_reset(struct net_device *dev);
130 static int  mpt_lan_close(struct net_device *dev);
131 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
132 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
133 					   int priority);
134 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
135 static int  mpt_lan_receive_post_reply(struct net_device *dev,
136 				       LANReceivePostReply_t *pRecvRep);
137 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
138 static int  mpt_lan_send_reply(struct net_device *dev,
139 			       LANSendReply_t *pSendRep);
140 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
141 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
142 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
143 					 struct net_device *dev);
144 
145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146 /*
147  *  Fusion MPT LAN private data
148  */
149 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
150 
151 static u32 max_buckets_out = 127;
152 static u32 tx_max_out_p = 127 - 16;
153 
154 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
155 /**
156  *	lan_reply - Handle all data sent from the hardware.
157  *	@ioc: Pointer to MPT_ADAPTER structure
158  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
159  *	@reply: Pointer to MPT reply frame
160  *
161  *	Returns 1 indicating original alloc'd request frame ptr
162  *	should be freed, or 0 if it shouldn't.
163  */
164 static int
165 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
166 {
167 	struct net_device *dev = ioc->netdev;
168 	int FreeReqFrame = 0;
169 
170 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
171 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
172 
173 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
174 //			mf, reply));
175 
176 	if (mf == NULL) {
177 		u32 tmsg = CAST_PTR_TO_U32(reply);
178 
179 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
180 				IOC_AND_NETDEV_NAMES_s_s(dev),
181 				tmsg));
182 
183 		switch (GET_LAN_FORM(tmsg)) {
184 
185 		// NOTE!  (Optimization) First case here is now caught in
186 		//  mptbase.c::mpt_interrupt() routine and callcack here
187 		//  is now skipped for this case!
188 #if 0
189 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
190 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
191 //				  "MessageContext turbo reply received\n"));
192 			FreeReqFrame = 1;
193 			break;
194 #endif
195 
196 		case LAN_REPLY_FORM_SEND_SINGLE:
197 //			dioprintk((MYNAM "/lan_reply: "
198 //				  "calling mpt_lan_send_reply (turbo)\n"));
199 
200 			// Potential BUG here?
201 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
202 			//  If/when mpt_lan_send_turbo would return 1 here,
203 			//  calling routine (mptbase.c|mpt_interrupt)
204 			//  would Oops because mf has already been set
205 			//  to NULL.  So after return from this func,
206 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
207 			//  item back onto its adapter FreeQ - Oops!:-(
208 			//  It's Ok, since mpt_lan_send_turbo() *currently*
209 			//  always returns 0, but..., just in case:
210 
211 			(void) mpt_lan_send_turbo(dev, tmsg);
212 			FreeReqFrame = 0;
213 
214 			break;
215 
216 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
217 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
218 //				  "rcv-Turbo = %08x\n", tmsg));
219 			mpt_lan_receive_post_turbo(dev, tmsg);
220 			break;
221 
222 		default:
223 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
224 				"that I don't know what to do with\n");
225 
226 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
227 
228 			break;
229 		}
230 
231 		return FreeReqFrame;
232 	}
233 
234 //	msg = (u32 *) reply;
235 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
236 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
237 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
238 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
239 //		  reply->u.hdr.Function));
240 
241 	switch (reply->u.hdr.Function) {
242 
243 	case MPI_FUNCTION_LAN_SEND:
244 	{
245 		LANSendReply_t *pSendRep;
246 
247 		pSendRep = (LANSendReply_t *) reply;
248 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
249 		break;
250 	}
251 
252 	case MPI_FUNCTION_LAN_RECEIVE:
253 	{
254 		LANReceivePostReply_t *pRecvRep;
255 
256 		pRecvRep = (LANReceivePostReply_t *) reply;
257 		if (pRecvRep->NumberOfContexts) {
258 			mpt_lan_receive_post_reply(dev, pRecvRep);
259 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
260 				FreeReqFrame = 1;
261 		} else
262 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
263 				  "ReceivePostReply received.\n"));
264 		break;
265 	}
266 
267 	case MPI_FUNCTION_LAN_RESET:
268 		/* Just a default reply. Might want to check it to
269 		 * make sure that everything went ok.
270 		 */
271 		FreeReqFrame = 1;
272 		break;
273 
274 	case MPI_FUNCTION_EVENT_NOTIFICATION:
275 	case MPI_FUNCTION_EVENT_ACK:
276 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
277 		 *  Should be routed to mpt_lan_event_process(), but just in case...
278 		 */
279 		FreeReqFrame = 1;
280 		break;
281 
282 	default:
283 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
284 			"reply that I don't know what to do with\n");
285 
286 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
287 		FreeReqFrame = 1;
288 
289 		break;
290 	}
291 
292 	return FreeReqFrame;
293 }
294 
295 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
296 static int
297 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
298 {
299 	struct net_device *dev = ioc->netdev;
300 	struct mpt_lan_priv *priv;
301 
302 	if (dev == NULL)
303 		return(1);
304 	else
305 		priv = netdev_priv(dev);
306 
307 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
308 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
309 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
310 
311 	if (priv->mpt_rxfidx == NULL)
312 		return (1);
313 
314 	if (reset_phase == MPT_IOC_SETUP_RESET) {
315 		;
316 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
317 		int i;
318 		unsigned long flags;
319 
320 		netif_stop_queue(dev);
321 
322 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
323 
324 		atomic_set(&priv->buckets_out, 0);
325 
326 		/* Reset Rx Free Tail index and re-populate the queue. */
327 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
328 		priv->mpt_rxfidx_tail = -1;
329 		for (i = 0; i < priv->max_buckets_out; i++)
330 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
331 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
332 	} else {
333 		mpt_lan_post_receive_buckets(priv);
334 		netif_wake_queue(dev);
335 	}
336 
337 	return 1;
338 }
339 
340 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
341 static int
342 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
343 {
344 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
345 
346 	switch (le32_to_cpu(pEvReply->Event)) {
347 	case MPI_EVENT_NONE:				/* 00 */
348 	case MPI_EVENT_LOG_DATA:			/* 01 */
349 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
350 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
351 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
352 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
353 	case MPI_EVENT_RESCAN:				/* 06 */
354 		/* Ok, do we need to do anything here? As far as
355 		   I can tell, this is when a new device gets added
356 		   to the loop. */
357 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
358 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
359 	case MPI_EVENT_LOGOUT:				/* 09 */
360 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
361 	default:
362 		break;
363 	}
364 
365 	/*
366 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
367 	 *  Do NOT do it here now!
368 	 */
369 
370 	return 1;
371 }
372 
373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
374 static int
375 mpt_lan_open(struct net_device *dev)
376 {
377 	struct mpt_lan_priv *priv = netdev_priv(dev);
378 	int i;
379 
380 	if (mpt_lan_reset(dev) != 0) {
381 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
382 
383 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
384 
385 		if (mpt_dev->active)
386 			printk ("The ioc is active. Perhaps it needs to be"
387 				" reset?\n");
388 		else
389 			printk ("The ioc in inactive, most likely in the "
390 				"process of being reset. Please try again in "
391 				"a moment.\n");
392 	}
393 
394 	priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
395 					 GFP_KERNEL);
396 	if (priv->mpt_txfidx == NULL)
397 		goto out;
398 	priv->mpt_txfidx_tail = -1;
399 
400 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
401 				GFP_KERNEL);
402 	if (priv->SendCtl == NULL)
403 		goto out_mpt_txfidx;
404 	for (i = 0; i < priv->tx_max_out; i++)
405 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
406 
407 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
408 
409 	priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
410 					 GFP_KERNEL);
411 	if (priv->mpt_rxfidx == NULL)
412 		goto out_SendCtl;
413 	priv->mpt_rxfidx_tail = -1;
414 
415 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
416 			       sizeof(struct BufferControl),
417 			       GFP_KERNEL);
418 	if (priv->RcvCtl == NULL)
419 		goto out_mpt_rxfidx;
420 	for (i = 0; i < priv->max_buckets_out; i++)
421 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
422 
423 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
424 /**/	for (i = 0; i < priv->tx_max_out; i++)
425 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
426 /**/	dlprintk(("\n"));
427 
428 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
429 
430 	mpt_lan_post_receive_buckets(priv);
431 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
432 			IOC_AND_NETDEV_NAMES_s_s(dev));
433 
434 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
435 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
436 			" Notifications. This is a bad thing! We're not going "
437 			"to go ahead, but I'd be leery of system stability at "
438 			"this point.\n");
439 	}
440 
441 	netif_start_queue(dev);
442 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
443 
444 	return 0;
445 out_mpt_rxfidx:
446 	kfree(priv->mpt_rxfidx);
447 	priv->mpt_rxfidx = NULL;
448 out_SendCtl:
449 	kfree(priv->SendCtl);
450 	priv->SendCtl = NULL;
451 out_mpt_txfidx:
452 	kfree(priv->mpt_txfidx);
453 	priv->mpt_txfidx = NULL;
454 out:	return -ENOMEM;
455 }
456 
457 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
458 /* Send a LanReset message to the FW. This should result in the FW returning
459    any buckets it still has. */
460 static int
461 mpt_lan_reset(struct net_device *dev)
462 {
463 	MPT_FRAME_HDR *mf;
464 	LANResetRequest_t *pResetReq;
465 	struct mpt_lan_priv *priv = netdev_priv(dev);
466 
467 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
468 
469 	if (mf == NULL) {
470 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
471 		"Unable to allocate a request frame.\n"));
472 */
473 		return -1;
474 	}
475 
476 	pResetReq = (LANResetRequest_t *) mf;
477 
478 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
479 	pResetReq->ChainOffset	= 0;
480 	pResetReq->Reserved	= 0;
481 	pResetReq->PortNumber	= priv->pnum;
482 	pResetReq->MsgFlags	= 0;
483 	pResetReq->Reserved2	= 0;
484 
485 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
486 
487 	return 0;
488 }
489 
490 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
491 static int
492 mpt_lan_close(struct net_device *dev)
493 {
494 	struct mpt_lan_priv *priv = netdev_priv(dev);
495 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
496 	unsigned long timeout;
497 	int i;
498 
499 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
500 
501 	mpt_event_deregister(LanCtx);
502 
503 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
504 		  "since driver was loaded, %d still out\n",
505 		  priv->total_posted,atomic_read(&priv->buckets_out)));
506 
507 	netif_stop_queue(dev);
508 
509 	mpt_lan_reset(dev);
510 
511 	timeout = jiffies + 2 * HZ;
512 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
513 		schedule_timeout_interruptible(1);
514 
515 	for (i = 0; i < priv->max_buckets_out; i++) {
516 		if (priv->RcvCtl[i].skb != NULL) {
517 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
518 /**/				  "is still out\n", i));
519 			dma_unmap_single(&mpt_dev->pcidev->dev,
520 					 priv->RcvCtl[i].dma,
521 					 priv->RcvCtl[i].len, DMA_FROM_DEVICE);
522 			dev_kfree_skb(priv->RcvCtl[i].skb);
523 		}
524 	}
525 
526 	kfree(priv->RcvCtl);
527 	kfree(priv->mpt_rxfidx);
528 
529 	for (i = 0; i < priv->tx_max_out; i++) {
530 		if (priv->SendCtl[i].skb != NULL) {
531 			dma_unmap_single(&mpt_dev->pcidev->dev,
532 					 priv->SendCtl[i].dma,
533 					 priv->SendCtl[i].len, DMA_TO_DEVICE);
534 			dev_kfree_skb(priv->SendCtl[i].skb);
535 		}
536 	}
537 
538 	kfree(priv->SendCtl);
539 	kfree(priv->mpt_txfidx);
540 
541 	atomic_set(&priv->buckets_out, 0);
542 
543 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
544 			IOC_AND_NETDEV_NAMES_s_s(dev));
545 
546 	return 0;
547 }
548 
549 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
550 /* Tx timeout handler. */
551 static void
552 mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
553 {
554 	struct mpt_lan_priv *priv = netdev_priv(dev);
555 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
556 
557 	if (mpt_dev->active) {
558 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
559 		netif_wake_queue(dev);
560 	}
561 }
562 
563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
564 //static inline int
565 static int
566 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
567 {
568 	struct mpt_lan_priv *priv = netdev_priv(dev);
569 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
570 	struct sk_buff *sent;
571 	unsigned long flags;
572 	u32 ctx;
573 
574 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
575 	sent = priv->SendCtl[ctx].skb;
576 
577 	dev->stats.tx_packets++;
578 	dev->stats.tx_bytes += sent->len;
579 
580 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
581 			IOC_AND_NETDEV_NAMES_s_s(dev),
582 			__func__, sent));
583 
584 	priv->SendCtl[ctx].skb = NULL;
585 	dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
586 			 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
587 	dev_kfree_skb_irq(sent);
588 
589 	spin_lock_irqsave(&priv->txfidx_lock, flags);
590 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
591 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
592 
593 	netif_wake_queue(dev);
594 	return 0;
595 }
596 
597 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
598 static int
599 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
600 {
601 	struct mpt_lan_priv *priv = netdev_priv(dev);
602 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
603 	struct sk_buff *sent;
604 	unsigned long flags;
605 	int FreeReqFrame = 0;
606 	u32 *pContext;
607 	u32 ctx;
608 	u8 count;
609 
610 	count = pSendRep->NumberOfContexts;
611 
612 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
613 		 le16_to_cpu(pSendRep->IOCStatus)));
614 
615 	/* Add check for Loginfo Flag in IOCStatus */
616 
617 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
618 	case MPI_IOCSTATUS_SUCCESS:
619 		dev->stats.tx_packets += count;
620 		break;
621 
622 	case MPI_IOCSTATUS_LAN_CANCELED:
623 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
624 		break;
625 
626 	case MPI_IOCSTATUS_INVALID_SGL:
627 		dev->stats.tx_errors += count;
628 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
629 				IOC_AND_NETDEV_NAMES_s_s(dev));
630 		goto out;
631 
632 	default:
633 		dev->stats.tx_errors += count;
634 		break;
635 	}
636 
637 	pContext = &pSendRep->BufferContext;
638 
639 	spin_lock_irqsave(&priv->txfidx_lock, flags);
640 	while (count > 0) {
641 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
642 
643 		sent = priv->SendCtl[ctx].skb;
644 		dev->stats.tx_bytes += sent->len;
645 
646 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
647 				IOC_AND_NETDEV_NAMES_s_s(dev),
648 				__func__, sent));
649 
650 		priv->SendCtl[ctx].skb = NULL;
651 		dma_unmap_single(&mpt_dev->pcidev->dev,
652 				 priv->SendCtl[ctx].dma,
653 				 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
654 		dev_kfree_skb_irq(sent);
655 
656 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
657 
658 		pContext++;
659 		count--;
660 	}
661 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
662 
663 out:
664 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
665 		FreeReqFrame = 1;
666 
667 	netif_wake_queue(dev);
668 	return FreeReqFrame;
669 }
670 
671 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
672 static netdev_tx_t
673 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
674 {
675 	struct mpt_lan_priv *priv = netdev_priv(dev);
676 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
677 	MPT_FRAME_HDR *mf;
678 	LANSendRequest_t *pSendReq;
679 	SGETransaction32_t *pTrans;
680 	SGESimple64_t *pSimple;
681 	const unsigned char *mac;
682 	dma_addr_t dma;
683 	unsigned long flags;
684 	int ctx;
685 	u16 cur_naa = 0x1000;
686 
687 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
688 			__func__, skb));
689 
690 	spin_lock_irqsave(&priv->txfidx_lock, flags);
691 	if (priv->mpt_txfidx_tail < 0) {
692 		netif_stop_queue(dev);
693 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
694 
695 		printk (KERN_ERR "%s: no tx context available: %u\n",
696 			__func__, priv->mpt_txfidx_tail);
697 		return NETDEV_TX_BUSY;
698 	}
699 
700 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
701 	if (mf == NULL) {
702 		netif_stop_queue(dev);
703 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
704 
705 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
706 			__func__);
707 		return NETDEV_TX_BUSY;
708 	}
709 
710 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
711 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
712 
713 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
714 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
715 
716 	pSendReq = (LANSendRequest_t *) mf;
717 
718 	/* Set the mac.raw pointer, since this apparently isn't getting
719 	 * done before we get the skb. Pull the data pointer past the mac data.
720 	 */
721 	skb_reset_mac_header(skb);
722 	skb_pull(skb, 12);
723 
724 	dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
725 			     DMA_TO_DEVICE);
726 
727 	priv->SendCtl[ctx].skb = skb;
728 	priv->SendCtl[ctx].dma = dma;
729 	priv->SendCtl[ctx].len = skb->len;
730 
731 	/* Message Header */
732 	pSendReq->Reserved    = 0;
733 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
734 	pSendReq->ChainOffset = 0;
735 	pSendReq->Reserved2   = 0;
736 	pSendReq->MsgFlags    = 0;
737 	pSendReq->PortNumber  = priv->pnum;
738 
739 	/* Transaction Context Element */
740 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
741 
742 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
743 	pTrans->ContextSize   = sizeof(u32);
744 	pTrans->DetailsLength = 2 * sizeof(u32);
745 	pTrans->Flags         = 0;
746 	pTrans->TransactionContext = cpu_to_le32(ctx);
747 
748 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
749 //			IOC_AND_NETDEV_NAMES_s_s(dev),
750 //			ctx, skb, skb->data));
751 
752 	mac = skb_mac_header(skb);
753 
754 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
755 						    (mac[0] <<  8) |
756 						    (mac[1] <<  0));
757 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
758 						    (mac[3] << 16) |
759 						    (mac[4] <<  8) |
760 						    (mac[5] <<  0));
761 
762 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
763 
764 	/* If we ever decide to send more than one Simple SGE per LANSend, then
765 	   we will need to make sure that LAST_ELEMENT only gets set on the
766 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
767 	pSimple->FlagsLength = cpu_to_le32(
768 			((MPI_SGE_FLAGS_LAST_ELEMENT |
769 			  MPI_SGE_FLAGS_END_OF_BUFFER |
770 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
771 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
772 			  MPI_SGE_FLAGS_HOST_TO_IOC |
773 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
774 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
775 			skb->len);
776 	pSimple->Address.Low = cpu_to_le32((u32) dma);
777 	if (sizeof(dma_addr_t) > sizeof(u32))
778 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
779 	else
780 		pSimple->Address.High = 0;
781 
782 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
783 	netif_trans_update(dev);
784 
785 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
786 			IOC_AND_NETDEV_NAMES_s_s(dev),
787 			le32_to_cpu(pSimple->FlagsLength)));
788 
789 	return NETDEV_TX_OK;
790 }
791 
792 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
793 static void
794 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
795 /*
796  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
797  */
798 {
799 	struct mpt_lan_priv *priv = netdev_priv(dev);
800 
801 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
802 		if (priority) {
803 			schedule_delayed_work(&priv->post_buckets_task, 0);
804 		} else {
805 			schedule_delayed_work(&priv->post_buckets_task, 1);
806 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
807 				   "timer.\n"));
808 		}
809 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
810 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
811 	}
812 }
813 
814 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
815 static int
816 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
817 {
818 	struct mpt_lan_priv *priv = netdev_priv(dev);
819 
820 	skb->protocol = mpt_lan_type_trans(skb, dev);
821 
822 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
823 		 "delivered to upper level.\n",
824 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
825 
826 	dev->stats.rx_bytes += skb->len;
827 	dev->stats.rx_packets++;
828 
829 	skb->dev = dev;
830 	netif_rx(skb);
831 
832 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
833 		 atomic_read(&priv->buckets_out)));
834 
835 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
836 		mpt_lan_wake_post_buckets_task(dev, 1);
837 
838 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
839 		  "remaining, %d received back since sod\n",
840 		  atomic_read(&priv->buckets_out), priv->total_received));
841 
842 	return 0;
843 }
844 
845 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
846 //static inline int
847 static int
848 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
849 {
850 	struct mpt_lan_priv *priv = netdev_priv(dev);
851 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
852 	struct sk_buff *skb, *old_skb;
853 	unsigned long flags;
854 	u32 ctx, len;
855 
856 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
857 	skb = priv->RcvCtl[ctx].skb;
858 
859 	len = GET_LAN_PACKET_LENGTH(tmsg);
860 
861 	if (len < MPT_LAN_RX_COPYBREAK) {
862 		old_skb = skb;
863 
864 		skb = (struct sk_buff *)dev_alloc_skb(len);
865 		if (!skb) {
866 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
867 					IOC_AND_NETDEV_NAMES_s_s(dev),
868 					__FILE__, __LINE__);
869 			return -ENOMEM;
870 		}
871 
872 		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
873 					priv->RcvCtl[ctx].dma,
874 					priv->RcvCtl[ctx].len,
875 					DMA_FROM_DEVICE);
876 
877 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
878 
879 		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
880 					   priv->RcvCtl[ctx].dma,
881 					   priv->RcvCtl[ctx].len,
882 					   DMA_FROM_DEVICE);
883 		goto out;
884 	}
885 
886 	skb_put(skb, len);
887 
888 	priv->RcvCtl[ctx].skb = NULL;
889 
890 	dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
891 			 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
892 
893 out:
894 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
895 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
896 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
897 
898 	atomic_dec(&priv->buckets_out);
899 	priv->total_received++;
900 
901 	return mpt_lan_receive_skb(dev, skb);
902 }
903 
904 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
905 static int
906 mpt_lan_receive_post_free(struct net_device *dev,
907 			  LANReceivePostReply_t *pRecvRep)
908 {
909 	struct mpt_lan_priv *priv = netdev_priv(dev);
910 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
911 	unsigned long flags;
912 	struct sk_buff *skb;
913 	u32 ctx;
914 	int count;
915 	int i;
916 
917 	count = pRecvRep->NumberOfContexts;
918 
919 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
920 		  "IOC returned %d buckets, freeing them...\n", count));
921 
922 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
923 	for (i = 0; i < count; i++) {
924 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
925 
926 		skb = priv->RcvCtl[ctx].skb;
927 
928 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
929 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
930 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
931 //				priv, &(priv->buckets_out)));
932 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
933 
934 		priv->RcvCtl[ctx].skb = NULL;
935 		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
936 				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
937 		dev_kfree_skb_any(skb);
938 
939 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
940 	}
941 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
942 
943 	atomic_sub(count, &priv->buckets_out);
944 
945 //	for (i = 0; i < priv->max_buckets_out; i++)
946 //		if (priv->RcvCtl[i].skb != NULL)
947 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
948 //				  "is still out\n", i));
949 
950 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
951 		  count));
952 */
953 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
954 /**/		  "remaining, %d received back since sod.\n",
955 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
956 	return 0;
957 }
958 
959 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
960 static int
961 mpt_lan_receive_post_reply(struct net_device *dev,
962 			   LANReceivePostReply_t *pRecvRep)
963 {
964 	struct mpt_lan_priv *priv = netdev_priv(dev);
965 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
966 	struct sk_buff *skb, *old_skb;
967 	unsigned long flags;
968 	u32 len, ctx, offset;
969 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
970 	int count;
971 	int i, l;
972 
973 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
974 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
975 		 le16_to_cpu(pRecvRep->IOCStatus)));
976 
977 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
978 						MPI_IOCSTATUS_LAN_CANCELED)
979 		return mpt_lan_receive_post_free(dev, pRecvRep);
980 
981 	len = le32_to_cpu(pRecvRep->PacketLength);
982 	if (len == 0) {
983 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
984 			"ReceivePostReply w/ PacketLength zero!\n",
985 				IOC_AND_NETDEV_NAMES_s_s(dev));
986 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
987 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
988 		return -1;
989 	}
990 
991 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
992 	count  = pRecvRep->NumberOfContexts;
993 	skb    = priv->RcvCtl[ctx].skb;
994 
995 	offset = le32_to_cpu(pRecvRep->PacketOffset);
996 //	if (offset != 0) {
997 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
998 //			"w/ PacketOffset %u\n",
999 //				IOC_AND_NETDEV_NAMES_s_s(dev),
1000 //				offset);
1001 //	}
1002 
1003 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1004 			IOC_AND_NETDEV_NAMES_s_s(dev),
1005 			offset, len));
1006 
1007 	if (count > 1) {
1008 		int szrem = len;
1009 
1010 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1011 //			"for single packet, concatenating...\n",
1012 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1013 
1014 		skb = (struct sk_buff *)dev_alloc_skb(len);
1015 		if (!skb) {
1016 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1017 					IOC_AND_NETDEV_NAMES_s_s(dev),
1018 					__FILE__, __LINE__);
1019 			return -ENOMEM;
1020 		}
1021 
1022 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1023 		for (i = 0; i < count; i++) {
1024 
1025 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1026 			old_skb = priv->RcvCtl[ctx].skb;
1027 
1028 			l = priv->RcvCtl[ctx].len;
1029 			if (szrem < l)
1030 				l = szrem;
1031 
1032 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1033 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1034 //					i, l));
1035 
1036 			dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1037 						priv->RcvCtl[ctx].dma,
1038 						priv->RcvCtl[ctx].len,
1039 						DMA_FROM_DEVICE);
1040 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1041 
1042 			dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1043 						   priv->RcvCtl[ctx].dma,
1044 						   priv->RcvCtl[ctx].len,
1045 						   DMA_FROM_DEVICE);
1046 
1047 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1048 			szrem -= l;
1049 		}
1050 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1051 
1052 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1053 
1054 		old_skb = skb;
1055 
1056 		skb = (struct sk_buff *)dev_alloc_skb(len);
1057 		if (!skb) {
1058 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1059 					IOC_AND_NETDEV_NAMES_s_s(dev),
1060 					__FILE__, __LINE__);
1061 			return -ENOMEM;
1062 		}
1063 
1064 		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1065 					priv->RcvCtl[ctx].dma,
1066 					priv->RcvCtl[ctx].len,
1067 					DMA_FROM_DEVICE);
1068 
1069 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1070 
1071 		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1072 					   priv->RcvCtl[ctx].dma,
1073 					   priv->RcvCtl[ctx].len,
1074 					   DMA_FROM_DEVICE);
1075 
1076 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1077 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1078 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1079 
1080 	} else {
1081 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1082 
1083 		priv->RcvCtl[ctx].skb = NULL;
1084 
1085 		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
1086 				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
1087 		priv->RcvCtl[ctx].dma = 0;
1088 
1089 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1090 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1091 
1092 		skb_put(skb,len);
1093 	}
1094 
1095 	atomic_sub(count, &priv->buckets_out);
1096 	priv->total_received += count;
1097 
1098 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1099 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1100 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1101 				IOC_AND_NETDEV_NAMES_s_s(dev),
1102 				priv->mpt_rxfidx_tail,
1103 				MPT_LAN_MAX_BUCKETS_OUT);
1104 
1105 		return -1;
1106 	}
1107 
1108 	if (remaining == 0)
1109 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1110 			"(priv->buckets_out = %d)\n",
1111 			IOC_AND_NETDEV_NAMES_s_s(dev),
1112 			atomic_read(&priv->buckets_out));
1113 	else if (remaining < 10)
1114 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1115 			"(priv->buckets_out = %d)\n",
1116 			IOC_AND_NETDEV_NAMES_s_s(dev),
1117 			remaining, atomic_read(&priv->buckets_out));
1118 
1119 	if ((remaining < priv->bucketthresh) &&
1120 	    ((atomic_read(&priv->buckets_out) - remaining) >
1121 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1122 
1123 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1124 			"buckets_out count and fw's BucketsRemaining "
1125 			"count has crossed the threshold, issuing a "
1126 			"LanReset to clear the fw's hashtable. You may "
1127 			"want to check your /var/log/messages for \"CRC "
1128 			"error\" event notifications.\n");
1129 
1130 		mpt_lan_reset(dev);
1131 		mpt_lan_wake_post_buckets_task(dev, 0);
1132 	}
1133 
1134 	return mpt_lan_receive_skb(dev, skb);
1135 }
1136 
1137 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1138 /* Simple SGE's only at the moment */
1139 
1140 static void
1141 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1142 {
1143 	struct net_device *dev = priv->dev;
1144 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1145 	MPT_FRAME_HDR *mf;
1146 	LANReceivePostRequest_t *pRecvReq;
1147 	SGETransaction32_t *pTrans;
1148 	SGESimple64_t *pSimple;
1149 	struct sk_buff *skb;
1150 	dma_addr_t dma;
1151 	u32 curr, buckets, count, max;
1152 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1153 	unsigned long flags;
1154 	int i;
1155 
1156 	curr = atomic_read(&priv->buckets_out);
1157 	buckets = (priv->max_buckets_out - curr);
1158 
1159 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1160 			IOC_AND_NETDEV_NAMES_s_s(dev),
1161 			__func__, buckets, curr));
1162 
1163 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1164 			(sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
1165 
1166 	while (buckets) {
1167 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1168 		if (mf == NULL) {
1169 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1170 				__func__);
1171 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1172 				 __func__, buckets));
1173 			goto out;
1174 		}
1175 		pRecvReq = (LANReceivePostRequest_t *) mf;
1176 
1177 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1178 		mpt_dev->RequestNB[i] = 0;
1179 		count = buckets;
1180 		if (count > max)
1181 			count = max;
1182 
1183 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1184 		pRecvReq->ChainOffset = 0;
1185 		pRecvReq->MsgFlags    = 0;
1186 		pRecvReq->PortNumber  = priv->pnum;
1187 
1188 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1189 		pSimple = NULL;
1190 
1191 		for (i = 0; i < count; i++) {
1192 			int ctx;
1193 
1194 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1195 			if (priv->mpt_rxfidx_tail < 0) {
1196 				printk (KERN_ERR "%s: Can't alloc context\n",
1197 					__func__);
1198 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1199 						       flags);
1200 				break;
1201 			}
1202 
1203 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1204 
1205 			skb = priv->RcvCtl[ctx].skb;
1206 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1207 				dma_unmap_single(&mpt_dev->pcidev->dev,
1208 						 priv->RcvCtl[ctx].dma,
1209 						 priv->RcvCtl[ctx].len,
1210 						 DMA_FROM_DEVICE);
1211 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1212 				skb = priv->RcvCtl[ctx].skb = NULL;
1213 			}
1214 
1215 			if (skb == NULL) {
1216 				skb = dev_alloc_skb(len);
1217 				if (skb == NULL) {
1218 					printk (KERN_WARNING
1219 						MYNAM "/%s: Can't alloc skb\n",
1220 						__func__);
1221 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1222 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1223 					break;
1224 				}
1225 
1226 				dma = dma_map_single(&mpt_dev->pcidev->dev,
1227 						     skb->data, len,
1228 						     DMA_FROM_DEVICE);
1229 
1230 				priv->RcvCtl[ctx].skb = skb;
1231 				priv->RcvCtl[ctx].dma = dma;
1232 				priv->RcvCtl[ctx].len = len;
1233 			}
1234 
1235 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1236 
1237 			pTrans->ContextSize   = sizeof(u32);
1238 			pTrans->DetailsLength = 0;
1239 			pTrans->Flags         = 0;
1240 			pTrans->TransactionContext = cpu_to_le32(ctx);
1241 
1242 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1243 
1244 			pSimple->FlagsLength = cpu_to_le32(
1245 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1246 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1247 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1248 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1249 			if (sizeof(dma_addr_t) > sizeof(u32))
1250 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1251 			else
1252 				pSimple->Address.High = 0;
1253 
1254 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1255 		}
1256 
1257 		if (pSimple == NULL) {
1258 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1259 /**/				__func__);
1260 			mpt_free_msg_frame(mpt_dev, mf);
1261 			goto out;
1262 		}
1263 
1264 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1265 
1266 		pRecvReq->BucketCount = cpu_to_le32(i);
1267 
1268 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1269  *	for (i = 0; i < j + 2; i ++)
1270  *	    printk (" %08x", le32_to_cpu(msg[i]));
1271  *	printk ("\n");
1272  */
1273 
1274 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1275 
1276 		priv->total_posted += i;
1277 		buckets -= i;
1278 		atomic_add(i, &priv->buckets_out);
1279 	}
1280 
1281 out:
1282 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1283 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1284 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1285 	__func__, priv->total_posted, priv->total_received));
1286 
1287 	clear_bit(0, &priv->post_buckets_active);
1288 }
1289 
1290 static void
1291 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1292 {
1293 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1294 						  post_buckets_task.work));
1295 }
1296 
1297 static const struct net_device_ops mpt_netdev_ops = {
1298 	.ndo_open       = mpt_lan_open,
1299 	.ndo_stop       = mpt_lan_close,
1300 	.ndo_start_xmit = mpt_lan_sdu_send,
1301 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1302 };
1303 
1304 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1305 static struct net_device *
1306 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1307 {
1308 	struct net_device *dev;
1309 	struct mpt_lan_priv *priv;
1310 	u8 HWaddr[FC_ALEN], *a;
1311 
1312 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1313 	if (!dev)
1314 		return NULL;
1315 
1316 	dev->mtu = MPT_LAN_MTU;
1317 
1318 	priv = netdev_priv(dev);
1319 
1320 	priv->dev = dev;
1321 	priv->mpt_dev = mpt_dev;
1322 	priv->pnum = pnum;
1323 
1324 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1325 			  mpt_lan_post_receive_buckets_work);
1326 	priv->post_buckets_active = 0;
1327 
1328 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1329 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1330 
1331 	atomic_set(&priv->buckets_out, 0);
1332 	priv->total_posted = 0;
1333 	priv->total_received = 0;
1334 	priv->max_buckets_out = max_buckets_out;
1335 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1336 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1337 
1338 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1339 			__LINE__,
1340 			mpt_dev->pfacts[0].MaxLanBuckets,
1341 			max_buckets_out,
1342 			priv->max_buckets_out));
1343 
1344 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1345 	spin_lock_init(&priv->txfidx_lock);
1346 	spin_lock_init(&priv->rxfidx_lock);
1347 
1348 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1349 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1350 
1351 	HWaddr[0] = a[5];
1352 	HWaddr[1] = a[4];
1353 	HWaddr[2] = a[3];
1354 	HWaddr[3] = a[2];
1355 	HWaddr[4] = a[1];
1356 	HWaddr[5] = a[0];
1357 
1358 	dev->addr_len = FC_ALEN;
1359 	dev_addr_set(dev, HWaddr);
1360 	memset(dev->broadcast, 0xff, FC_ALEN);
1361 
1362 	/* The Tx queue is 127 deep on the 909.
1363 	 * Give ourselves some breathing room.
1364 	 */
1365 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1366 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1367 
1368 	dev->netdev_ops = &mpt_netdev_ops;
1369 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1370 
1371 	/* MTU range: 96 - 65280 */
1372 	dev->min_mtu = MPT_LAN_MIN_MTU;
1373 	dev->max_mtu = MPT_LAN_MAX_MTU;
1374 
1375 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1376 		"and setting initial values\n"));
1377 
1378 	if (register_netdev(dev) != 0) {
1379 		free_netdev(dev);
1380 		dev = NULL;
1381 	}
1382 	return dev;
1383 }
1384 
1385 static int
1386 mptlan_probe(struct pci_dev *pdev)
1387 {
1388 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1389 	struct net_device	*dev;
1390 	int			i;
1391 
1392 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1393 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1394 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1395 		       ioc->name, ioc->pfacts[i].PortNumber,
1396 		       ioc->pfacts[i].ProtocolFlags,
1397 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1398 			       ioc->pfacts[i].ProtocolFlags));
1399 
1400 		if (!(ioc->pfacts[i].ProtocolFlags &
1401 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1402 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1403 			       "seems to be disabled on this adapter port!\n",
1404 			       ioc->name);
1405 			continue;
1406 		}
1407 
1408 		dev = mpt_register_lan_device(ioc, i);
1409 		if (!dev) {
1410 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1411 			       "port%d as a LAN device\n", ioc->name,
1412 			       ioc->pfacts[i].PortNumber);
1413 			continue;
1414 		}
1415 
1416 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1417 		       "registered as '%s'\n", ioc->name, dev->name);
1418 		printk(KERN_INFO MYNAM ": %s/%s: "
1419 		       "LanAddr = %pM\n",
1420 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1421 		       dev->dev_addr);
1422 
1423 		ioc->netdev = dev;
1424 
1425 		return 0;
1426 	}
1427 
1428 	return -ENODEV;
1429 }
1430 
1431 static void
1432 mptlan_remove(struct pci_dev *pdev)
1433 {
1434 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1435 	struct net_device	*dev = ioc->netdev;
1436 
1437 	if(dev != NULL) {
1438 		unregister_netdev(dev);
1439 		free_netdev(dev);
1440 	}
1441 }
1442 
1443 static struct mpt_pci_driver mptlan_driver = {
1444 	.probe		= mptlan_probe,
1445 	.remove		= mptlan_remove,
1446 };
1447 
1448 static int __init mpt_lan_init (void)
1449 {
1450 	show_mptmod_ver(LANAME, LANVER);
1451 
1452 	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1453 				"lan_reply");
1454 	if (LanCtx <= 0) {
1455 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1456 		return -EBUSY;
1457 	}
1458 
1459 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1460 
1461 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1462 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1463 		       "handler with mptbase! The world is at an end! "
1464 		       "Everything is fading to black! Goodbye.\n");
1465 		return -EBUSY;
1466 	}
1467 
1468 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1469 
1470 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1471 	return 0;
1472 }
1473 
1474 static void __exit mpt_lan_exit(void)
1475 {
1476 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1477 	mpt_reset_deregister(LanCtx);
1478 
1479 	if (LanCtx) {
1480 		mpt_deregister(LanCtx);
1481 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1482 	}
1483 }
1484 
1485 module_init(mpt_lan_init);
1486 module_exit(mpt_lan_exit);
1487 
1488 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1489 static unsigned short
1490 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1491 {
1492 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1493 	struct fcllc *fcllc;
1494 
1495 	skb_reset_mac_header(skb);
1496 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1497 
1498 	if (fch->dtype == htons(0xffff)) {
1499 		u32 *p = (u32 *) fch;
1500 
1501 		swab32s(p + 0);
1502 		swab32s(p + 1);
1503 		swab32s(p + 2);
1504 		swab32s(p + 3);
1505 
1506 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1507 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1508 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1509 				fch->saddr);
1510 	}
1511 
1512 	if (*fch->daddr & 1) {
1513 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1514 			skb->pkt_type = PACKET_BROADCAST;
1515 		} else {
1516 			skb->pkt_type = PACKET_MULTICAST;
1517 		}
1518 	} else {
1519 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1520 			skb->pkt_type = PACKET_OTHERHOST;
1521 		} else {
1522 			skb->pkt_type = PACKET_HOST;
1523 		}
1524 	}
1525 
1526 	fcllc = (struct fcllc *)skb->data;
1527 
1528 	/* Strip the SNAP header from ARP packets since we don't
1529 	 * pass them through to the 802.2/SNAP layers.
1530 	 */
1531 	if (fcllc->dsap == EXTENDED_SAP &&
1532 		(fcllc->ethertype == htons(ETH_P_IP) ||
1533 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1534 		skb_pull(skb, sizeof(struct fcllc));
1535 		return fcllc->ethertype;
1536 	}
1537 
1538 	return htons(ETH_P_802_2);
1539 }
1540 
1541 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1542