xref: /linux/drivers/message/fusion/mptlan.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
61 
62 #define my_VERSION	MPT_LINUX_VERSION_COMMON
63 #define MYNAM		"mptlan"
64 
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(my_VERSION);
67 
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * MPT LAN message sizes without variable part.
71  */
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74 
75 /*
76  *  Fusion MPT LAN private structures
77  */
78 
79 struct BufferControl {
80 	struct sk_buff	*skb;
81 	dma_addr_t	dma;
82 	unsigned int	len;
83 };
84 
85 struct mpt_lan_priv {
86 	MPT_ADAPTER *mpt_dev;
87 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
88 
89 	atomic_t buckets_out;		/* number of unused buckets on IOC */
90 	int bucketthresh;		/* Send more when this many left */
91 
92 	int *mpt_txfidx; /* Free Tx Context list */
93 	int mpt_txfidx_tail;
94 	spinlock_t txfidx_lock;
95 
96 	int *mpt_rxfidx; /* Free Rx Context list */
97 	int mpt_rxfidx_tail;
98 	spinlock_t rxfidx_lock;
99 
100 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
101 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
102 
103 	int max_buckets_out;		/* Max buckets to send to IOC */
104 	int tx_max_out;			/* IOC's Tx queue len */
105 
106 	u32 total_posted;
107 	u32 total_received;
108 
109 	struct delayed_work post_buckets_task;
110 	struct net_device *dev;
111 	unsigned long post_buckets_active;
112 };
113 
114 struct mpt_lan_ohdr {
115 	u16	dtype;
116 	u8	daddr[FC_ALEN];
117 	u16	stype;
118 	u8	saddr[FC_ALEN];
119 };
120 
121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
122 
123 /*
124  *  Forward protos...
125  */
126 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
127 		       MPT_FRAME_HDR *reply);
128 static int  mpt_lan_open(struct net_device *dev);
129 static int  mpt_lan_reset(struct net_device *dev);
130 static int  mpt_lan_close(struct net_device *dev);
131 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
132 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
133 					   int priority);
134 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
135 static int  mpt_lan_receive_post_reply(struct net_device *dev,
136 				       LANReceivePostReply_t *pRecvRep);
137 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
138 static int  mpt_lan_send_reply(struct net_device *dev,
139 			       LANSendReply_t *pSendRep);
140 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
141 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
142 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
143 					 struct net_device *dev);
144 
145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146 /*
147  *  Fusion MPT LAN private data
148  */
149 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
150 
151 static u32 max_buckets_out = 127;
152 static u32 tx_max_out_p = 127 - 16;
153 
154 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
155 /**
156  *	lan_reply - Handle all data sent from the hardware.
157  *	@ioc: Pointer to MPT_ADAPTER structure
158  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
159  *	@reply: Pointer to MPT reply frame
160  *
161  *	Returns 1 indicating original alloc'd request frame ptr
162  *	should be freed, or 0 if it shouldn't.
163  */
164 static int
165 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
166 {
167 	struct net_device *dev = ioc->netdev;
168 	int FreeReqFrame = 0;
169 
170 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
171 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
172 
173 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
174 //			mf, reply));
175 
176 	if (mf == NULL) {
177 		u32 tmsg = CAST_PTR_TO_U32(reply);
178 
179 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
180 				IOC_AND_NETDEV_NAMES_s_s(dev),
181 				tmsg));
182 
183 		switch (GET_LAN_FORM(tmsg)) {
184 
185 		// NOTE!  (Optimization) First case here is now caught in
186 		//  mptbase.c::mpt_interrupt() routine and callcack here
187 		//  is now skipped for this case!
188 #if 0
189 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
190 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
191 //				  "MessageContext turbo reply received\n"));
192 			FreeReqFrame = 1;
193 			break;
194 #endif
195 
196 		case LAN_REPLY_FORM_SEND_SINGLE:
197 //			dioprintk((MYNAM "/lan_reply: "
198 //				  "calling mpt_lan_send_reply (turbo)\n"));
199 
200 			// Potential BUG here?
201 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
202 			//  If/when mpt_lan_send_turbo would return 1 here,
203 			//  calling routine (mptbase.c|mpt_interrupt)
204 			//  would Oops because mf has already been set
205 			//  to NULL.  So after return from this func,
206 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
207 			//  item back onto its adapter FreeQ - Oops!:-(
208 			//  It's Ok, since mpt_lan_send_turbo() *currently*
209 			//  always returns 0, but..., just in case:
210 
211 			(void) mpt_lan_send_turbo(dev, tmsg);
212 			FreeReqFrame = 0;
213 
214 			break;
215 
216 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
217 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
218 //				  "rcv-Turbo = %08x\n", tmsg));
219 			mpt_lan_receive_post_turbo(dev, tmsg);
220 			break;
221 
222 		default:
223 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
224 				"that I don't know what to do with\n");
225 
226 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
227 
228 			break;
229 		}
230 
231 		return FreeReqFrame;
232 	}
233 
234 //	msg = (u32 *) reply;
235 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
236 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
237 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
238 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
239 //		  reply->u.hdr.Function));
240 
241 	switch (reply->u.hdr.Function) {
242 
243 	case MPI_FUNCTION_LAN_SEND:
244 	{
245 		LANSendReply_t *pSendRep;
246 
247 		pSendRep = (LANSendReply_t *) reply;
248 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
249 		break;
250 	}
251 
252 	case MPI_FUNCTION_LAN_RECEIVE:
253 	{
254 		LANReceivePostReply_t *pRecvRep;
255 
256 		pRecvRep = (LANReceivePostReply_t *) reply;
257 		if (pRecvRep->NumberOfContexts) {
258 			mpt_lan_receive_post_reply(dev, pRecvRep);
259 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
260 				FreeReqFrame = 1;
261 		} else
262 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
263 				  "ReceivePostReply received.\n"));
264 		break;
265 	}
266 
267 	case MPI_FUNCTION_LAN_RESET:
268 		/* Just a default reply. Might want to check it to
269 		 * make sure that everything went ok.
270 		 */
271 		FreeReqFrame = 1;
272 		break;
273 
274 	case MPI_FUNCTION_EVENT_NOTIFICATION:
275 	case MPI_FUNCTION_EVENT_ACK:
276 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
277 		 *  Should be routed to mpt_lan_event_process(), but just in case...
278 		 */
279 		FreeReqFrame = 1;
280 		break;
281 
282 	default:
283 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
284 			"reply that I don't know what to do with\n");
285 
286 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
287 		FreeReqFrame = 1;
288 
289 		break;
290 	}
291 
292 	return FreeReqFrame;
293 }
294 
295 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
296 static int
297 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
298 {
299 	struct net_device *dev = ioc->netdev;
300 	struct mpt_lan_priv *priv;
301 
302 	if (dev == NULL)
303 		return(1);
304 	else
305 		priv = netdev_priv(dev);
306 
307 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
308 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
309 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
310 
311 	if (priv->mpt_rxfidx == NULL)
312 		return (1);
313 
314 	if (reset_phase == MPT_IOC_SETUP_RESET) {
315 		;
316 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
317 		int i;
318 		unsigned long flags;
319 
320 		netif_stop_queue(dev);
321 
322 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
323 
324 		atomic_set(&priv->buckets_out, 0);
325 
326 		/* Reset Rx Free Tail index and re-populate the queue. */
327 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
328 		priv->mpt_rxfidx_tail = -1;
329 		for (i = 0; i < priv->max_buckets_out; i++)
330 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
331 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
332 	} else {
333 		mpt_lan_post_receive_buckets(priv);
334 		netif_wake_queue(dev);
335 	}
336 
337 	return 1;
338 }
339 
340 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
341 static int
342 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
343 {
344 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
345 
346 	switch (le32_to_cpu(pEvReply->Event)) {
347 	case MPI_EVENT_NONE:				/* 00 */
348 	case MPI_EVENT_LOG_DATA:			/* 01 */
349 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
350 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
351 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
352 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
353 	case MPI_EVENT_RESCAN:				/* 06 */
354 		/* Ok, do we need to do anything here? As far as
355 		   I can tell, this is when a new device gets added
356 		   to the loop. */
357 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
358 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
359 	case MPI_EVENT_LOGOUT:				/* 09 */
360 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
361 	default:
362 		break;
363 	}
364 
365 	/*
366 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
367 	 *  Do NOT do it here now!
368 	 */
369 
370 	return 1;
371 }
372 
373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
374 static int
375 mpt_lan_open(struct net_device *dev)
376 {
377 	struct mpt_lan_priv *priv = netdev_priv(dev);
378 	int i;
379 
380 	if (mpt_lan_reset(dev) != 0) {
381 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
382 
383 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
384 
385 		if (mpt_dev->active)
386 			printk ("The ioc is active. Perhaps it needs to be"
387 				" reset?\n");
388 		else
389 			printk ("The ioc in inactive, most likely in the "
390 				"process of being reset. Please try again in "
391 				"a moment.\n");
392 	}
393 
394 	priv->mpt_txfidx = kmalloc_objs(int, priv->tx_max_out, GFP_KERNEL);
395 	if (priv->mpt_txfidx == NULL)
396 		goto out;
397 	priv->mpt_txfidx_tail = -1;
398 
399 	priv->SendCtl = kzalloc_objs(struct BufferControl, priv->tx_max_out,
400 				     GFP_KERNEL);
401 	if (priv->SendCtl == NULL)
402 		goto out_mpt_txfidx;
403 	for (i = 0; i < priv->tx_max_out; i++)
404 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
405 
406 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
407 
408 	priv->mpt_rxfidx = kmalloc_objs(int, priv->max_buckets_out, GFP_KERNEL);
409 	if (priv->mpt_rxfidx == NULL)
410 		goto out_SendCtl;
411 	priv->mpt_rxfidx_tail = -1;
412 
413 	priv->RcvCtl = kzalloc_objs(struct BufferControl, priv->max_buckets_out,
414 				    GFP_KERNEL);
415 	if (priv->RcvCtl == NULL)
416 		goto out_mpt_rxfidx;
417 	for (i = 0; i < priv->max_buckets_out; i++)
418 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
419 
420 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
421 /**/	for (i = 0; i < priv->tx_max_out; i++)
422 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
423 /**/	dlprintk(("\n"));
424 
425 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
426 
427 	mpt_lan_post_receive_buckets(priv);
428 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
429 			IOC_AND_NETDEV_NAMES_s_s(dev));
430 
431 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
432 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
433 			" Notifications. This is a bad thing! We're not going "
434 			"to go ahead, but I'd be leery of system stability at "
435 			"this point.\n");
436 	}
437 
438 	netif_start_queue(dev);
439 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
440 
441 	return 0;
442 out_mpt_rxfidx:
443 	kfree(priv->mpt_rxfidx);
444 	priv->mpt_rxfidx = NULL;
445 out_SendCtl:
446 	kfree(priv->SendCtl);
447 	priv->SendCtl = NULL;
448 out_mpt_txfidx:
449 	kfree(priv->mpt_txfidx);
450 	priv->mpt_txfidx = NULL;
451 out:	return -ENOMEM;
452 }
453 
454 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
455 /* Send a LanReset message to the FW. This should result in the FW returning
456    any buckets it still has. */
457 static int
458 mpt_lan_reset(struct net_device *dev)
459 {
460 	MPT_FRAME_HDR *mf;
461 	LANResetRequest_t *pResetReq;
462 	struct mpt_lan_priv *priv = netdev_priv(dev);
463 
464 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
465 
466 	if (mf == NULL) {
467 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
468 		"Unable to allocate a request frame.\n"));
469 */
470 		return -1;
471 	}
472 
473 	pResetReq = (LANResetRequest_t *) mf;
474 
475 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
476 	pResetReq->ChainOffset	= 0;
477 	pResetReq->Reserved	= 0;
478 	pResetReq->PortNumber	= priv->pnum;
479 	pResetReq->MsgFlags	= 0;
480 	pResetReq->Reserved2	= 0;
481 
482 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
483 
484 	return 0;
485 }
486 
487 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
488 static int
489 mpt_lan_close(struct net_device *dev)
490 {
491 	struct mpt_lan_priv *priv = netdev_priv(dev);
492 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
493 	unsigned long timeout;
494 	int i;
495 
496 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
497 
498 	mpt_event_deregister(LanCtx);
499 
500 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
501 		  "since driver was loaded, %d still out\n",
502 		  priv->total_posted,atomic_read(&priv->buckets_out)));
503 
504 	netif_stop_queue(dev);
505 
506 	mpt_lan_reset(dev);
507 
508 	timeout = jiffies + 2 * HZ;
509 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
510 		schedule_timeout_interruptible(1);
511 
512 	for (i = 0; i < priv->max_buckets_out; i++) {
513 		if (priv->RcvCtl[i].skb != NULL) {
514 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
515 /**/				  "is still out\n", i));
516 			dma_unmap_single(&mpt_dev->pcidev->dev,
517 					 priv->RcvCtl[i].dma,
518 					 priv->RcvCtl[i].len, DMA_FROM_DEVICE);
519 			dev_kfree_skb(priv->RcvCtl[i].skb);
520 		}
521 	}
522 
523 	kfree(priv->RcvCtl);
524 	kfree(priv->mpt_rxfidx);
525 
526 	for (i = 0; i < priv->tx_max_out; i++) {
527 		if (priv->SendCtl[i].skb != NULL) {
528 			dma_unmap_single(&mpt_dev->pcidev->dev,
529 					 priv->SendCtl[i].dma,
530 					 priv->SendCtl[i].len, DMA_TO_DEVICE);
531 			dev_kfree_skb(priv->SendCtl[i].skb);
532 		}
533 	}
534 
535 	kfree(priv->SendCtl);
536 	kfree(priv->mpt_txfidx);
537 
538 	atomic_set(&priv->buckets_out, 0);
539 
540 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
541 			IOC_AND_NETDEV_NAMES_s_s(dev));
542 
543 	return 0;
544 }
545 
546 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
547 /* Tx timeout handler. */
548 static void
549 mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
550 {
551 	struct mpt_lan_priv *priv = netdev_priv(dev);
552 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
553 
554 	if (mpt_dev->active) {
555 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
556 		netif_wake_queue(dev);
557 	}
558 }
559 
560 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
561 //static inline int
562 static int
563 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
564 {
565 	struct mpt_lan_priv *priv = netdev_priv(dev);
566 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
567 	struct sk_buff *sent;
568 	unsigned long flags;
569 	u32 ctx;
570 
571 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
572 	sent = priv->SendCtl[ctx].skb;
573 
574 	dev->stats.tx_packets++;
575 	dev->stats.tx_bytes += sent->len;
576 
577 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
578 			IOC_AND_NETDEV_NAMES_s_s(dev),
579 			__func__, sent));
580 
581 	priv->SendCtl[ctx].skb = NULL;
582 	dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
583 			 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
584 	dev_kfree_skb_irq(sent);
585 
586 	spin_lock_irqsave(&priv->txfidx_lock, flags);
587 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
588 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
589 
590 	netif_wake_queue(dev);
591 	return 0;
592 }
593 
594 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
595 static int
596 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
597 {
598 	struct mpt_lan_priv *priv = netdev_priv(dev);
599 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
600 	struct sk_buff *sent;
601 	unsigned long flags;
602 	int FreeReqFrame = 0;
603 	u32 *pContext;
604 	u32 ctx;
605 	u8 count;
606 
607 	count = pSendRep->NumberOfContexts;
608 
609 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
610 		 le16_to_cpu(pSendRep->IOCStatus)));
611 
612 	/* Add check for Loginfo Flag in IOCStatus */
613 
614 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
615 	case MPI_IOCSTATUS_SUCCESS:
616 		dev->stats.tx_packets += count;
617 		break;
618 
619 	case MPI_IOCSTATUS_LAN_CANCELED:
620 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
621 		break;
622 
623 	case MPI_IOCSTATUS_INVALID_SGL:
624 		dev->stats.tx_errors += count;
625 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
626 				IOC_AND_NETDEV_NAMES_s_s(dev));
627 		goto out;
628 
629 	default:
630 		dev->stats.tx_errors += count;
631 		break;
632 	}
633 
634 	pContext = &pSendRep->BufferContext;
635 
636 	spin_lock_irqsave(&priv->txfidx_lock, flags);
637 	while (count > 0) {
638 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
639 
640 		sent = priv->SendCtl[ctx].skb;
641 		dev->stats.tx_bytes += sent->len;
642 
643 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
644 				IOC_AND_NETDEV_NAMES_s_s(dev),
645 				__func__, sent));
646 
647 		priv->SendCtl[ctx].skb = NULL;
648 		dma_unmap_single(&mpt_dev->pcidev->dev,
649 				 priv->SendCtl[ctx].dma,
650 				 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
651 		dev_kfree_skb_irq(sent);
652 
653 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
654 
655 		pContext++;
656 		count--;
657 	}
658 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
659 
660 out:
661 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
662 		FreeReqFrame = 1;
663 
664 	netif_wake_queue(dev);
665 	return FreeReqFrame;
666 }
667 
668 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
669 static netdev_tx_t
670 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
671 {
672 	struct mpt_lan_priv *priv = netdev_priv(dev);
673 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
674 	MPT_FRAME_HDR *mf;
675 	LANSendRequest_t *pSendReq;
676 	SGETransaction32_t *pTrans;
677 	SGESimple64_t *pSimple;
678 	const unsigned char *mac;
679 	dma_addr_t dma;
680 	unsigned long flags;
681 	int ctx;
682 	u16 cur_naa = 0x1000;
683 
684 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
685 			__func__, skb));
686 
687 	spin_lock_irqsave(&priv->txfidx_lock, flags);
688 	if (priv->mpt_txfidx_tail < 0) {
689 		netif_stop_queue(dev);
690 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
691 
692 		printk (KERN_ERR "%s: no tx context available: %u\n",
693 			__func__, priv->mpt_txfidx_tail);
694 		return NETDEV_TX_BUSY;
695 	}
696 
697 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
698 	if (mf == NULL) {
699 		netif_stop_queue(dev);
700 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
701 
702 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
703 			__func__);
704 		return NETDEV_TX_BUSY;
705 	}
706 
707 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
708 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
709 
710 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
711 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
712 
713 	pSendReq = (LANSendRequest_t *) mf;
714 
715 	/* Set the mac.raw pointer, since this apparently isn't getting
716 	 * done before we get the skb. Pull the data pointer past the mac data.
717 	 */
718 	skb_reset_mac_header(skb);
719 	skb_pull(skb, 12);
720 
721 	dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
722 			     DMA_TO_DEVICE);
723 
724 	priv->SendCtl[ctx].skb = skb;
725 	priv->SendCtl[ctx].dma = dma;
726 	priv->SendCtl[ctx].len = skb->len;
727 
728 	/* Message Header */
729 	pSendReq->Reserved    = 0;
730 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
731 	pSendReq->ChainOffset = 0;
732 	pSendReq->Reserved2   = 0;
733 	pSendReq->MsgFlags    = 0;
734 	pSendReq->PortNumber  = priv->pnum;
735 
736 	/* Transaction Context Element */
737 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
738 
739 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
740 	pTrans->ContextSize   = sizeof(u32);
741 	pTrans->DetailsLength = 2 * sizeof(u32);
742 	pTrans->Flags         = 0;
743 	pTrans->TransactionContext = cpu_to_le32(ctx);
744 
745 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
746 //			IOC_AND_NETDEV_NAMES_s_s(dev),
747 //			ctx, skb, skb->data));
748 
749 	mac = skb_mac_header(skb);
750 
751 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
752 						    (mac[0] <<  8) |
753 						    (mac[1] <<  0));
754 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
755 						    (mac[3] << 16) |
756 						    (mac[4] <<  8) |
757 						    (mac[5] <<  0));
758 
759 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
760 
761 	/* If we ever decide to send more than one Simple SGE per LANSend, then
762 	   we will need to make sure that LAST_ELEMENT only gets set on the
763 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
764 	pSimple->FlagsLength = cpu_to_le32(
765 			((MPI_SGE_FLAGS_LAST_ELEMENT |
766 			  MPI_SGE_FLAGS_END_OF_BUFFER |
767 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
768 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
769 			  MPI_SGE_FLAGS_HOST_TO_IOC |
770 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
771 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
772 			skb->len);
773 	pSimple->Address.Low = cpu_to_le32((u32) dma);
774 	if (sizeof(dma_addr_t) > sizeof(u32))
775 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
776 	else
777 		pSimple->Address.High = 0;
778 
779 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
780 	netif_trans_update(dev);
781 
782 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
783 			IOC_AND_NETDEV_NAMES_s_s(dev),
784 			le32_to_cpu(pSimple->FlagsLength)));
785 
786 	return NETDEV_TX_OK;
787 }
788 
789 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
790 static void
791 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
792 /*
793  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
794  */
795 {
796 	struct mpt_lan_priv *priv = netdev_priv(dev);
797 
798 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
799 		if (priority) {
800 			schedule_delayed_work(&priv->post_buckets_task, 0);
801 		} else {
802 			schedule_delayed_work(&priv->post_buckets_task, 1);
803 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
804 				   "timer.\n"));
805 		}
806 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
807 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
808 	}
809 }
810 
811 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
812 static int
813 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
814 {
815 	struct mpt_lan_priv *priv = netdev_priv(dev);
816 
817 	skb->protocol = mpt_lan_type_trans(skb, dev);
818 
819 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
820 		 "delivered to upper level.\n",
821 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
822 
823 	dev->stats.rx_bytes += skb->len;
824 	dev->stats.rx_packets++;
825 
826 	skb->dev = dev;
827 	netif_rx(skb);
828 
829 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
830 		 atomic_read(&priv->buckets_out)));
831 
832 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
833 		mpt_lan_wake_post_buckets_task(dev, 1);
834 
835 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
836 		  "remaining, %d received back since sod\n",
837 		  atomic_read(&priv->buckets_out), priv->total_received));
838 
839 	return 0;
840 }
841 
842 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
843 //static inline int
844 static int
845 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
846 {
847 	struct mpt_lan_priv *priv = netdev_priv(dev);
848 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
849 	struct sk_buff *skb, *old_skb;
850 	unsigned long flags;
851 	u32 ctx, len;
852 
853 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
854 	skb = priv->RcvCtl[ctx].skb;
855 
856 	len = GET_LAN_PACKET_LENGTH(tmsg);
857 
858 	if (len < MPT_LAN_RX_COPYBREAK) {
859 		old_skb = skb;
860 
861 		skb = (struct sk_buff *)dev_alloc_skb(len);
862 		if (!skb) {
863 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
864 					IOC_AND_NETDEV_NAMES_s_s(dev),
865 					__FILE__, __LINE__);
866 			return -ENOMEM;
867 		}
868 
869 		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
870 					priv->RcvCtl[ctx].dma,
871 					priv->RcvCtl[ctx].len,
872 					DMA_FROM_DEVICE);
873 
874 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
875 
876 		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
877 					   priv->RcvCtl[ctx].dma,
878 					   priv->RcvCtl[ctx].len,
879 					   DMA_FROM_DEVICE);
880 		goto out;
881 	}
882 
883 	skb_put(skb, len);
884 
885 	priv->RcvCtl[ctx].skb = NULL;
886 
887 	dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
888 			 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
889 
890 out:
891 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
892 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
893 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
894 
895 	atomic_dec(&priv->buckets_out);
896 	priv->total_received++;
897 
898 	return mpt_lan_receive_skb(dev, skb);
899 }
900 
901 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
902 static int
903 mpt_lan_receive_post_free(struct net_device *dev,
904 			  LANReceivePostReply_t *pRecvRep)
905 {
906 	struct mpt_lan_priv *priv = netdev_priv(dev);
907 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
908 	unsigned long flags;
909 	struct sk_buff *skb;
910 	u32 ctx;
911 	int count;
912 	int i;
913 
914 	count = pRecvRep->NumberOfContexts;
915 
916 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
917 		  "IOC returned %d buckets, freeing them...\n", count));
918 
919 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
920 	for (i = 0; i < count; i++) {
921 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
922 
923 		skb = priv->RcvCtl[ctx].skb;
924 
925 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
926 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
927 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
928 //				priv, &(priv->buckets_out)));
929 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
930 
931 		priv->RcvCtl[ctx].skb = NULL;
932 		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
933 				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
934 		dev_kfree_skb_any(skb);
935 
936 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
937 	}
938 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
939 
940 	atomic_sub(count, &priv->buckets_out);
941 
942 //	for (i = 0; i < priv->max_buckets_out; i++)
943 //		if (priv->RcvCtl[i].skb != NULL)
944 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
945 //				  "is still out\n", i));
946 
947 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
948 		  count));
949 */
950 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
951 /**/		  "remaining, %d received back since sod.\n",
952 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
953 	return 0;
954 }
955 
956 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
957 static int
958 mpt_lan_receive_post_reply(struct net_device *dev,
959 			   LANReceivePostReply_t *pRecvRep)
960 {
961 	struct mpt_lan_priv *priv = netdev_priv(dev);
962 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
963 	struct sk_buff *skb, *old_skb;
964 	unsigned long flags;
965 	u32 len, ctx, offset;
966 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
967 	int count;
968 	int i, l;
969 
970 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
971 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
972 		 le16_to_cpu(pRecvRep->IOCStatus)));
973 
974 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
975 						MPI_IOCSTATUS_LAN_CANCELED)
976 		return mpt_lan_receive_post_free(dev, pRecvRep);
977 
978 	len = le32_to_cpu(pRecvRep->PacketLength);
979 	if (len == 0) {
980 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
981 			"ReceivePostReply w/ PacketLength zero!\n",
982 				IOC_AND_NETDEV_NAMES_s_s(dev));
983 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
984 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
985 		return -1;
986 	}
987 
988 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
989 	count  = pRecvRep->NumberOfContexts;
990 	skb    = priv->RcvCtl[ctx].skb;
991 
992 	offset = le32_to_cpu(pRecvRep->PacketOffset);
993 //	if (offset != 0) {
994 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
995 //			"w/ PacketOffset %u\n",
996 //				IOC_AND_NETDEV_NAMES_s_s(dev),
997 //				offset);
998 //	}
999 
1000 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1001 			IOC_AND_NETDEV_NAMES_s_s(dev),
1002 			offset, len));
1003 
1004 	if (count > 1) {
1005 		int szrem = len;
1006 
1007 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1008 //			"for single packet, concatenating...\n",
1009 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1010 
1011 		skb = (struct sk_buff *)dev_alloc_skb(len);
1012 		if (!skb) {
1013 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1014 					IOC_AND_NETDEV_NAMES_s_s(dev),
1015 					__FILE__, __LINE__);
1016 			return -ENOMEM;
1017 		}
1018 
1019 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1020 		for (i = 0; i < count; i++) {
1021 
1022 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1023 			old_skb = priv->RcvCtl[ctx].skb;
1024 
1025 			l = priv->RcvCtl[ctx].len;
1026 			if (szrem < l)
1027 				l = szrem;
1028 
1029 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1030 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1031 //					i, l));
1032 
1033 			dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1034 						priv->RcvCtl[ctx].dma,
1035 						priv->RcvCtl[ctx].len,
1036 						DMA_FROM_DEVICE);
1037 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1038 
1039 			dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1040 						   priv->RcvCtl[ctx].dma,
1041 						   priv->RcvCtl[ctx].len,
1042 						   DMA_FROM_DEVICE);
1043 
1044 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1045 			szrem -= l;
1046 		}
1047 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1048 
1049 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1050 
1051 		old_skb = skb;
1052 
1053 		skb = (struct sk_buff *)dev_alloc_skb(len);
1054 		if (!skb) {
1055 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1056 					IOC_AND_NETDEV_NAMES_s_s(dev),
1057 					__FILE__, __LINE__);
1058 			return -ENOMEM;
1059 		}
1060 
1061 		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1062 					priv->RcvCtl[ctx].dma,
1063 					priv->RcvCtl[ctx].len,
1064 					DMA_FROM_DEVICE);
1065 
1066 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1067 
1068 		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1069 					   priv->RcvCtl[ctx].dma,
1070 					   priv->RcvCtl[ctx].len,
1071 					   DMA_FROM_DEVICE);
1072 
1073 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1074 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1075 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1076 
1077 	} else {
1078 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1079 
1080 		priv->RcvCtl[ctx].skb = NULL;
1081 
1082 		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
1083 				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
1084 		priv->RcvCtl[ctx].dma = 0;
1085 
1086 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1087 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1088 
1089 		skb_put(skb,len);
1090 	}
1091 
1092 	atomic_sub(count, &priv->buckets_out);
1093 	priv->total_received += count;
1094 
1095 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1096 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1097 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1098 				IOC_AND_NETDEV_NAMES_s_s(dev),
1099 				priv->mpt_rxfidx_tail,
1100 				MPT_LAN_MAX_BUCKETS_OUT);
1101 
1102 		return -1;
1103 	}
1104 
1105 	if (remaining == 0)
1106 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1107 			"(priv->buckets_out = %d)\n",
1108 			IOC_AND_NETDEV_NAMES_s_s(dev),
1109 			atomic_read(&priv->buckets_out));
1110 	else if (remaining < 10)
1111 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1112 			"(priv->buckets_out = %d)\n",
1113 			IOC_AND_NETDEV_NAMES_s_s(dev),
1114 			remaining, atomic_read(&priv->buckets_out));
1115 
1116 	if ((remaining < priv->bucketthresh) &&
1117 	    ((atomic_read(&priv->buckets_out) - remaining) >
1118 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1119 
1120 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1121 			"buckets_out count and fw's BucketsRemaining "
1122 			"count has crossed the threshold, issuing a "
1123 			"LanReset to clear the fw's hashtable. You may "
1124 			"want to check your /var/log/messages for \"CRC "
1125 			"error\" event notifications.\n");
1126 
1127 		mpt_lan_reset(dev);
1128 		mpt_lan_wake_post_buckets_task(dev, 0);
1129 	}
1130 
1131 	return mpt_lan_receive_skb(dev, skb);
1132 }
1133 
1134 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1135 /* Simple SGE's only at the moment */
1136 
1137 static void
1138 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1139 {
1140 	struct net_device *dev = priv->dev;
1141 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1142 	MPT_FRAME_HDR *mf;
1143 	LANReceivePostRequest_t *pRecvReq;
1144 	SGETransaction32_t *pTrans;
1145 	SGESimple64_t *pSimple;
1146 	struct sk_buff *skb;
1147 	dma_addr_t dma;
1148 	u32 curr, buckets, count, max;
1149 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1150 	unsigned long flags;
1151 	int i;
1152 
1153 	curr = atomic_read(&priv->buckets_out);
1154 	buckets = (priv->max_buckets_out - curr);
1155 
1156 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1157 			IOC_AND_NETDEV_NAMES_s_s(dev),
1158 			__func__, buckets, curr));
1159 
1160 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1161 			(sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
1162 
1163 	while (buckets) {
1164 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1165 		if (mf == NULL) {
1166 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1167 				__func__);
1168 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1169 				 __func__, buckets));
1170 			goto out;
1171 		}
1172 		pRecvReq = (LANReceivePostRequest_t *) mf;
1173 
1174 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1175 		mpt_dev->RequestNB[i] = 0;
1176 		count = buckets;
1177 		if (count > max)
1178 			count = max;
1179 
1180 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1181 		pRecvReq->ChainOffset = 0;
1182 		pRecvReq->MsgFlags    = 0;
1183 		pRecvReq->PortNumber  = priv->pnum;
1184 
1185 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1186 		pSimple = NULL;
1187 
1188 		for (i = 0; i < count; i++) {
1189 			int ctx;
1190 
1191 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1192 			if (priv->mpt_rxfidx_tail < 0) {
1193 				printk (KERN_ERR "%s: Can't alloc context\n",
1194 					__func__);
1195 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1196 						       flags);
1197 				break;
1198 			}
1199 
1200 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1201 
1202 			skb = priv->RcvCtl[ctx].skb;
1203 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1204 				dma_unmap_single(&mpt_dev->pcidev->dev,
1205 						 priv->RcvCtl[ctx].dma,
1206 						 priv->RcvCtl[ctx].len,
1207 						 DMA_FROM_DEVICE);
1208 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1209 				skb = priv->RcvCtl[ctx].skb = NULL;
1210 			}
1211 
1212 			if (skb == NULL) {
1213 				skb = dev_alloc_skb(len);
1214 				if (skb == NULL) {
1215 					printk (KERN_WARNING
1216 						MYNAM "/%s: Can't alloc skb\n",
1217 						__func__);
1218 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1219 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1220 					break;
1221 				}
1222 
1223 				dma = dma_map_single(&mpt_dev->pcidev->dev,
1224 						     skb->data, len,
1225 						     DMA_FROM_DEVICE);
1226 
1227 				priv->RcvCtl[ctx].skb = skb;
1228 				priv->RcvCtl[ctx].dma = dma;
1229 				priv->RcvCtl[ctx].len = len;
1230 			}
1231 
1232 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1233 
1234 			pTrans->ContextSize   = sizeof(u32);
1235 			pTrans->DetailsLength = 0;
1236 			pTrans->Flags         = 0;
1237 			pTrans->TransactionContext = cpu_to_le32(ctx);
1238 
1239 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1240 
1241 			pSimple->FlagsLength = cpu_to_le32(
1242 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1243 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1244 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1245 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1246 			if (sizeof(dma_addr_t) > sizeof(u32))
1247 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1248 			else
1249 				pSimple->Address.High = 0;
1250 
1251 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1252 		}
1253 
1254 		if (pSimple == NULL) {
1255 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1256 /**/				__func__);
1257 			mpt_free_msg_frame(mpt_dev, mf);
1258 			goto out;
1259 		}
1260 
1261 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1262 
1263 		pRecvReq->BucketCount = cpu_to_le32(i);
1264 
1265 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1266  *	for (i = 0; i < j + 2; i ++)
1267  *	    printk (" %08x", le32_to_cpu(msg[i]));
1268  *	printk ("\n");
1269  */
1270 
1271 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1272 
1273 		priv->total_posted += i;
1274 		buckets -= i;
1275 		atomic_add(i, &priv->buckets_out);
1276 	}
1277 
1278 out:
1279 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1280 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1281 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1282 	__func__, priv->total_posted, priv->total_received));
1283 
1284 	clear_bit(0, &priv->post_buckets_active);
1285 }
1286 
1287 static void
1288 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1289 {
1290 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1291 						  post_buckets_task.work));
1292 }
1293 
1294 static const struct net_device_ops mpt_netdev_ops = {
1295 	.ndo_open       = mpt_lan_open,
1296 	.ndo_stop       = mpt_lan_close,
1297 	.ndo_start_xmit = mpt_lan_sdu_send,
1298 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1299 };
1300 
1301 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1302 static struct net_device *
1303 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1304 {
1305 	struct net_device *dev;
1306 	struct mpt_lan_priv *priv;
1307 	u8 HWaddr[FC_ALEN], *a;
1308 
1309 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1310 	if (!dev)
1311 		return NULL;
1312 
1313 	dev->mtu = MPT_LAN_MTU;
1314 
1315 	priv = netdev_priv(dev);
1316 
1317 	priv->dev = dev;
1318 	priv->mpt_dev = mpt_dev;
1319 	priv->pnum = pnum;
1320 
1321 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1322 			  mpt_lan_post_receive_buckets_work);
1323 	priv->post_buckets_active = 0;
1324 
1325 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1326 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1327 
1328 	atomic_set(&priv->buckets_out, 0);
1329 	priv->total_posted = 0;
1330 	priv->total_received = 0;
1331 	priv->max_buckets_out = max_buckets_out;
1332 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1333 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1334 
1335 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1336 			__LINE__,
1337 			mpt_dev->pfacts[0].MaxLanBuckets,
1338 			max_buckets_out,
1339 			priv->max_buckets_out));
1340 
1341 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1342 	spin_lock_init(&priv->txfidx_lock);
1343 	spin_lock_init(&priv->rxfidx_lock);
1344 
1345 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1346 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1347 
1348 	HWaddr[0] = a[5];
1349 	HWaddr[1] = a[4];
1350 	HWaddr[2] = a[3];
1351 	HWaddr[3] = a[2];
1352 	HWaddr[4] = a[1];
1353 	HWaddr[5] = a[0];
1354 
1355 	dev->addr_len = FC_ALEN;
1356 	dev_addr_set(dev, HWaddr);
1357 	memset(dev->broadcast, 0xff, FC_ALEN);
1358 
1359 	/* The Tx queue is 127 deep on the 909.
1360 	 * Give ourselves some breathing room.
1361 	 */
1362 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1363 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1364 
1365 	dev->netdev_ops = &mpt_netdev_ops;
1366 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1367 
1368 	/* MTU range: 96 - 65280 */
1369 	dev->min_mtu = MPT_LAN_MIN_MTU;
1370 	dev->max_mtu = MPT_LAN_MAX_MTU;
1371 
1372 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1373 		"and setting initial values\n"));
1374 
1375 	if (register_netdev(dev) != 0) {
1376 		free_netdev(dev);
1377 		dev = NULL;
1378 	}
1379 	return dev;
1380 }
1381 
1382 static int
1383 mptlan_probe(struct pci_dev *pdev)
1384 {
1385 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1386 	struct net_device	*dev;
1387 	int			i;
1388 
1389 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1390 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1391 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1392 		       ioc->name, ioc->pfacts[i].PortNumber,
1393 		       ioc->pfacts[i].ProtocolFlags,
1394 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1395 			       ioc->pfacts[i].ProtocolFlags));
1396 
1397 		if (!(ioc->pfacts[i].ProtocolFlags &
1398 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1399 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1400 			       "seems to be disabled on this adapter port!\n",
1401 			       ioc->name);
1402 			continue;
1403 		}
1404 
1405 		dev = mpt_register_lan_device(ioc, i);
1406 		if (!dev) {
1407 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1408 			       "port%d as a LAN device\n", ioc->name,
1409 			       ioc->pfacts[i].PortNumber);
1410 			continue;
1411 		}
1412 
1413 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1414 		       "registered as '%s'\n", ioc->name, dev->name);
1415 		printk(KERN_INFO MYNAM ": %s/%s: "
1416 		       "LanAddr = %pM\n",
1417 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1418 		       dev->dev_addr);
1419 
1420 		ioc->netdev = dev;
1421 
1422 		return 0;
1423 	}
1424 
1425 	return -ENODEV;
1426 }
1427 
1428 static void
1429 mptlan_remove(struct pci_dev *pdev)
1430 {
1431 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1432 	struct net_device	*dev = ioc->netdev;
1433 	struct mpt_lan_priv *priv = netdev_priv(dev);
1434 
1435 	cancel_delayed_work_sync(&priv->post_buckets_task);
1436 	if(dev != NULL) {
1437 		unregister_netdev(dev);
1438 		free_netdev(dev);
1439 	}
1440 }
1441 
1442 static struct mpt_pci_driver mptlan_driver = {
1443 	.probe		= mptlan_probe,
1444 	.remove		= mptlan_remove,
1445 };
1446 
1447 static int __init mpt_lan_init (void)
1448 {
1449 	show_mptmod_ver(LANAME, LANVER);
1450 
1451 	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1452 				"lan_reply");
1453 	if (LanCtx <= 0) {
1454 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1455 		return -EBUSY;
1456 	}
1457 
1458 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1459 
1460 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1461 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1462 		       "handler with mptbase! The world is at an end! "
1463 		       "Everything is fading to black! Goodbye.\n");
1464 		return -EBUSY;
1465 	}
1466 
1467 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1468 
1469 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1470 	return 0;
1471 }
1472 
1473 static void __exit mpt_lan_exit(void)
1474 {
1475 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1476 	mpt_reset_deregister(LanCtx);
1477 
1478 	if (LanCtx) {
1479 		mpt_deregister(LanCtx);
1480 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1481 	}
1482 }
1483 
1484 module_init(mpt_lan_init);
1485 module_exit(mpt_lan_exit);
1486 
1487 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1488 static unsigned short
1489 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1490 {
1491 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1492 	struct fcllc *fcllc;
1493 
1494 	skb_reset_mac_header(skb);
1495 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1496 
1497 	if (fch->dtype == htons(0xffff)) {
1498 		u32 *p = (u32 *) fch;
1499 
1500 		swab32s(p + 0);
1501 		swab32s(p + 1);
1502 		swab32s(p + 2);
1503 		swab32s(p + 3);
1504 
1505 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1506 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1507 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1508 				fch->saddr);
1509 	}
1510 
1511 	if (*fch->daddr & 1) {
1512 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1513 			skb->pkt_type = PACKET_BROADCAST;
1514 		} else {
1515 			skb->pkt_type = PACKET_MULTICAST;
1516 		}
1517 	} else {
1518 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1519 			skb->pkt_type = PACKET_OTHERHOST;
1520 		} else {
1521 			skb->pkt_type = PACKET_HOST;
1522 		}
1523 	}
1524 
1525 	fcllc = (struct fcllc *)skb->data;
1526 
1527 	/* Strip the SNAP header from ARP packets since we don't
1528 	 * pass them through to the 802.2/SNAP layers.
1529 	 */
1530 	if (fcllc->dsap == EXTENDED_SAP &&
1531 		(fcllc->ethertype == htons(ETH_P_IP) ||
1532 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1533 		skb_pull(skb, sizeof(struct fcllc));
1534 		return fcllc->ethertype;
1535 	}
1536 
1537 	return htons(ETH_P_802_2);
1538 }
1539 
1540 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1541