xref: /linux/drivers/message/fusion/mptlan.c (revision 93d546399c2b7d66a54d5fbd5eee17de19246bf6)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 
60 #define my_VERSION	MPT_LINUX_VERSION_COMMON
61 #define MYNAM		"mptlan"
62 
63 MODULE_LICENSE("GPL");
64 MODULE_VERSION(my_VERSION);
65 
66 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
67 /*
68  * MPT LAN message sizes without variable part.
69  */
70 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
71 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
72 
73 #define MPT_LAN_TRANSACTION32_SIZE \
74 	(sizeof(SGETransaction32_t) - sizeof(u32))
75 
76 /*
77  *  Fusion MPT LAN private structures
78  */
79 
80 struct BufferControl {
81 	struct sk_buff	*skb;
82 	dma_addr_t	dma;
83 	unsigned int	len;
84 };
85 
86 struct mpt_lan_priv {
87 	MPT_ADAPTER *mpt_dev;
88 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
89 
90 	atomic_t buckets_out;		/* number of unused buckets on IOC */
91 	int bucketthresh;		/* Send more when this many left */
92 
93 	int *mpt_txfidx; /* Free Tx Context list */
94 	int mpt_txfidx_tail;
95 	spinlock_t txfidx_lock;
96 
97 	int *mpt_rxfidx; /* Free Rx Context list */
98 	int mpt_rxfidx_tail;
99 	spinlock_t rxfidx_lock;
100 
101 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
102 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
103 
104 	int max_buckets_out;		/* Max buckets to send to IOC */
105 	int tx_max_out;			/* IOC's Tx queue len */
106 
107 	u32 total_posted;
108 	u32 total_received;
109 	struct net_device_stats stats;	/* Per device statistics */
110 
111 	struct delayed_work post_buckets_task;
112 	struct net_device *dev;
113 	unsigned long post_buckets_active;
114 };
115 
116 struct mpt_lan_ohdr {
117 	u16	dtype;
118 	u8	daddr[FC_ALEN];
119 	u16	stype;
120 	u8	saddr[FC_ALEN];
121 };
122 
123 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
124 
125 /*
126  *  Forward protos...
127  */
128 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
129 		       MPT_FRAME_HDR *reply);
130 static int  mpt_lan_open(struct net_device *dev);
131 static int  mpt_lan_reset(struct net_device *dev);
132 static int  mpt_lan_close(struct net_device *dev);
133 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
134 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
135 					   int priority);
136 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
137 static int  mpt_lan_receive_post_reply(struct net_device *dev,
138 				       LANReceivePostReply_t *pRecvRep);
139 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
140 static int  mpt_lan_send_reply(struct net_device *dev,
141 			       LANSendReply_t *pSendRep);
142 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
143 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
144 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
145 					 struct net_device *dev);
146 
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
148 /*
149  *  Fusion MPT LAN private data
150  */
151 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
152 
153 static u32 max_buckets_out = 127;
154 static u32 tx_max_out_p = 127 - 16;
155 
156 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
157 /**
158  *	lan_reply - Handle all data sent from the hardware.
159  *	@ioc: Pointer to MPT_ADAPTER structure
160  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
161  *	@reply: Pointer to MPT reply frame
162  *
163  *	Returns 1 indicating original alloc'd request frame ptr
164  *	should be freed, or 0 if it shouldn't.
165  */
166 static int
167 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
168 {
169 	struct net_device *dev = ioc->netdev;
170 	int FreeReqFrame = 0;
171 
172 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
173 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
174 
175 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
176 //			mf, reply));
177 
178 	if (mf == NULL) {
179 		u32 tmsg = CAST_PTR_TO_U32(reply);
180 
181 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
182 				IOC_AND_NETDEV_NAMES_s_s(dev),
183 				tmsg));
184 
185 		switch (GET_LAN_FORM(tmsg)) {
186 
187 		// NOTE!  (Optimization) First case here is now caught in
188 		//  mptbase.c::mpt_interrupt() routine and callcack here
189 		//  is now skipped for this case!
190 #if 0
191 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
192 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
193 //				  "MessageContext turbo reply received\n"));
194 			FreeReqFrame = 1;
195 			break;
196 #endif
197 
198 		case LAN_REPLY_FORM_SEND_SINGLE:
199 //			dioprintk((MYNAM "/lan_reply: "
200 //				  "calling mpt_lan_send_reply (turbo)\n"));
201 
202 			// Potential BUG here?
203 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
204 			//  If/when mpt_lan_send_turbo would return 1 here,
205 			//  calling routine (mptbase.c|mpt_interrupt)
206 			//  would Oops because mf has already been set
207 			//  to NULL.  So after return from this func,
208 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
209 			//  item back onto its adapter FreeQ - Oops!:-(
210 			//  It's Ok, since mpt_lan_send_turbo() *currently*
211 			//  always returns 0, but..., just in case:
212 
213 			(void) mpt_lan_send_turbo(dev, tmsg);
214 			FreeReqFrame = 0;
215 
216 			break;
217 
218 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
219 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
220 //				  "rcv-Turbo = %08x\n", tmsg));
221 			mpt_lan_receive_post_turbo(dev, tmsg);
222 			break;
223 
224 		default:
225 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
226 				"that I don't know what to do with\n");
227 
228 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
229 
230 			break;
231 		}
232 
233 		return FreeReqFrame;
234 	}
235 
236 //	msg = (u32 *) reply;
237 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
238 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
239 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
240 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
241 //		  reply->u.hdr.Function));
242 
243 	switch (reply->u.hdr.Function) {
244 
245 	case MPI_FUNCTION_LAN_SEND:
246 	{
247 		LANSendReply_t *pSendRep;
248 
249 		pSendRep = (LANSendReply_t *) reply;
250 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
251 		break;
252 	}
253 
254 	case MPI_FUNCTION_LAN_RECEIVE:
255 	{
256 		LANReceivePostReply_t *pRecvRep;
257 
258 		pRecvRep = (LANReceivePostReply_t *) reply;
259 		if (pRecvRep->NumberOfContexts) {
260 			mpt_lan_receive_post_reply(dev, pRecvRep);
261 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
262 				FreeReqFrame = 1;
263 		} else
264 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
265 				  "ReceivePostReply received.\n"));
266 		break;
267 	}
268 
269 	case MPI_FUNCTION_LAN_RESET:
270 		/* Just a default reply. Might want to check it to
271 		 * make sure that everything went ok.
272 		 */
273 		FreeReqFrame = 1;
274 		break;
275 
276 	case MPI_FUNCTION_EVENT_NOTIFICATION:
277 	case MPI_FUNCTION_EVENT_ACK:
278 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
279 		 *  Should be routed to mpt_lan_event_process(), but just in case...
280 		 */
281 		FreeReqFrame = 1;
282 		break;
283 
284 	default:
285 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
286 			"reply that I don't know what to do with\n");
287 
288 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
289 		FreeReqFrame = 1;
290 
291 		break;
292 	}
293 
294 	return FreeReqFrame;
295 }
296 
297 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
298 static int
299 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
300 {
301 	struct net_device *dev = ioc->netdev;
302 	struct mpt_lan_priv *priv;
303 
304 	if (dev == NULL)
305 		return(1);
306 	else
307 		priv = netdev_priv(dev);
308 
309 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
310 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
311 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
312 
313 	if (priv->mpt_rxfidx == NULL)
314 		return (1);
315 
316 	if (reset_phase == MPT_IOC_SETUP_RESET) {
317 		;
318 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
319 		int i;
320 		unsigned long flags;
321 
322 		netif_stop_queue(dev);
323 
324 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
325 
326 		atomic_set(&priv->buckets_out, 0);
327 
328 		/* Reset Rx Free Tail index and re-populate the queue. */
329 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
330 		priv->mpt_rxfidx_tail = -1;
331 		for (i = 0; i < priv->max_buckets_out; i++)
332 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
333 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
334 	} else {
335 		mpt_lan_post_receive_buckets(priv);
336 		netif_wake_queue(dev);
337 	}
338 
339 	return 1;
340 }
341 
342 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
343 static int
344 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
345 {
346 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
347 
348 	switch (le32_to_cpu(pEvReply->Event)) {
349 	case MPI_EVENT_NONE:				/* 00 */
350 	case MPI_EVENT_LOG_DATA:			/* 01 */
351 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
352 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
353 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
354 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
355 	case MPI_EVENT_RESCAN:				/* 06 */
356 		/* Ok, do we need to do anything here? As far as
357 		   I can tell, this is when a new device gets added
358 		   to the loop. */
359 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
360 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
361 	case MPI_EVENT_LOGOUT:				/* 09 */
362 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
363 	default:
364 		break;
365 	}
366 
367 	/*
368 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
369 	 *  Do NOT do it here now!
370 	 */
371 
372 	return 1;
373 }
374 
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
376 static int
377 mpt_lan_open(struct net_device *dev)
378 {
379 	struct mpt_lan_priv *priv = netdev_priv(dev);
380 	int i;
381 
382 	if (mpt_lan_reset(dev) != 0) {
383 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
384 
385 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
386 
387 		if (mpt_dev->active)
388 			printk ("The ioc is active. Perhaps it needs to be"
389 				" reset?\n");
390 		else
391 			printk ("The ioc in inactive, most likely in the "
392 				"process of being reset. Please try again in "
393 				"a moment.\n");
394 	}
395 
396 	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
397 	if (priv->mpt_txfidx == NULL)
398 		goto out;
399 	priv->mpt_txfidx_tail = -1;
400 
401 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
402 				GFP_KERNEL);
403 	if (priv->SendCtl == NULL)
404 		goto out_mpt_txfidx;
405 	for (i = 0; i < priv->tx_max_out; i++)
406 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
407 
408 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
409 
410 	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
411 				   GFP_KERNEL);
412 	if (priv->mpt_rxfidx == NULL)
413 		goto out_SendCtl;
414 	priv->mpt_rxfidx_tail = -1;
415 
416 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
417 			       sizeof(struct BufferControl),
418 			       GFP_KERNEL);
419 	if (priv->RcvCtl == NULL)
420 		goto out_mpt_rxfidx;
421 	for (i = 0; i < priv->max_buckets_out; i++)
422 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
423 
424 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
425 /**/	for (i = 0; i < priv->tx_max_out; i++)
426 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
427 /**/	dlprintk(("\n"));
428 
429 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
430 
431 	mpt_lan_post_receive_buckets(priv);
432 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
433 			IOC_AND_NETDEV_NAMES_s_s(dev));
434 
435 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
436 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
437 			" Notifications. This is a bad thing! We're not going "
438 			"to go ahead, but I'd be leery of system stability at "
439 			"this point.\n");
440 	}
441 
442 	netif_start_queue(dev);
443 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
444 
445 	return 0;
446 out_mpt_rxfidx:
447 	kfree(priv->mpt_rxfidx);
448 	priv->mpt_rxfidx = NULL;
449 out_SendCtl:
450 	kfree(priv->SendCtl);
451 	priv->SendCtl = NULL;
452 out_mpt_txfidx:
453 	kfree(priv->mpt_txfidx);
454 	priv->mpt_txfidx = NULL;
455 out:	return -ENOMEM;
456 }
457 
458 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
459 /* Send a LanReset message to the FW. This should result in the FW returning
460    any buckets it still has. */
461 static int
462 mpt_lan_reset(struct net_device *dev)
463 {
464 	MPT_FRAME_HDR *mf;
465 	LANResetRequest_t *pResetReq;
466 	struct mpt_lan_priv *priv = netdev_priv(dev);
467 
468 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
469 
470 	if (mf == NULL) {
471 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
472 		"Unable to allocate a request frame.\n"));
473 */
474 		return -1;
475 	}
476 
477 	pResetReq = (LANResetRequest_t *) mf;
478 
479 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
480 	pResetReq->ChainOffset	= 0;
481 	pResetReq->Reserved	= 0;
482 	pResetReq->PortNumber	= priv->pnum;
483 	pResetReq->MsgFlags	= 0;
484 	pResetReq->Reserved2	= 0;
485 
486 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
487 
488 	return 0;
489 }
490 
491 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
492 static int
493 mpt_lan_close(struct net_device *dev)
494 {
495 	struct mpt_lan_priv *priv = netdev_priv(dev);
496 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
497 	unsigned long timeout;
498 	int i;
499 
500 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
501 
502 	mpt_event_deregister(LanCtx);
503 
504 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
505 		  "since driver was loaded, %d still out\n",
506 		  priv->total_posted,atomic_read(&priv->buckets_out)));
507 
508 	netif_stop_queue(dev);
509 
510 	mpt_lan_reset(dev);
511 
512 	timeout = jiffies + 2 * HZ;
513 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
514 		schedule_timeout_interruptible(1);
515 
516 	for (i = 0; i < priv->max_buckets_out; i++) {
517 		if (priv->RcvCtl[i].skb != NULL) {
518 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
519 /**/				  "is still out\n", i));
520 			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
521 					 priv->RcvCtl[i].len,
522 					 PCI_DMA_FROMDEVICE);
523 			dev_kfree_skb(priv->RcvCtl[i].skb);
524 		}
525 	}
526 
527 	kfree(priv->RcvCtl);
528 	kfree(priv->mpt_rxfidx);
529 
530 	for (i = 0; i < priv->tx_max_out; i++) {
531 		if (priv->SendCtl[i].skb != NULL) {
532 			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
533 					 priv->SendCtl[i].len,
534 					 PCI_DMA_TODEVICE);
535 			dev_kfree_skb(priv->SendCtl[i].skb);
536 		}
537 	}
538 
539 	kfree(priv->SendCtl);
540 	kfree(priv->mpt_txfidx);
541 
542 	atomic_set(&priv->buckets_out, 0);
543 
544 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
545 			IOC_AND_NETDEV_NAMES_s_s(dev));
546 
547 	return 0;
548 }
549 
550 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
551 static struct net_device_stats *
552 mpt_lan_get_stats(struct net_device *dev)
553 {
554 	struct mpt_lan_priv *priv = netdev_priv(dev);
555 
556 	return (struct net_device_stats *) &priv->stats;
557 }
558 
559 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
560 static int
561 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
562 {
563 	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
564 		return -EINVAL;
565 	dev->mtu = new_mtu;
566 	return 0;
567 }
568 
569 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
570 /* Tx timeout handler. */
571 static void
572 mpt_lan_tx_timeout(struct net_device *dev)
573 {
574 	struct mpt_lan_priv *priv = netdev_priv(dev);
575 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
576 
577 	if (mpt_dev->active) {
578 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
579 		netif_wake_queue(dev);
580 	}
581 }
582 
583 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
584 //static inline int
585 static int
586 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
587 {
588 	struct mpt_lan_priv *priv = netdev_priv(dev);
589 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
590 	struct sk_buff *sent;
591 	unsigned long flags;
592 	u32 ctx;
593 
594 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
595 	sent = priv->SendCtl[ctx].skb;
596 
597 	priv->stats.tx_packets++;
598 	priv->stats.tx_bytes += sent->len;
599 
600 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
601 			IOC_AND_NETDEV_NAMES_s_s(dev),
602 			__func__, sent));
603 
604 	priv->SendCtl[ctx].skb = NULL;
605 	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
606 			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
607 	dev_kfree_skb_irq(sent);
608 
609 	spin_lock_irqsave(&priv->txfidx_lock, flags);
610 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
611 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
612 
613 	netif_wake_queue(dev);
614 	return 0;
615 }
616 
617 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
618 static int
619 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
620 {
621 	struct mpt_lan_priv *priv = netdev_priv(dev);
622 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
623 	struct sk_buff *sent;
624 	unsigned long flags;
625 	int FreeReqFrame = 0;
626 	u32 *pContext;
627 	u32 ctx;
628 	u8 count;
629 
630 	count = pSendRep->NumberOfContexts;
631 
632 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
633 		 le16_to_cpu(pSendRep->IOCStatus)));
634 
635 	/* Add check for Loginfo Flag in IOCStatus */
636 
637 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
638 	case MPI_IOCSTATUS_SUCCESS:
639 		priv->stats.tx_packets += count;
640 		break;
641 
642 	case MPI_IOCSTATUS_LAN_CANCELED:
643 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
644 		break;
645 
646 	case MPI_IOCSTATUS_INVALID_SGL:
647 		priv->stats.tx_errors += count;
648 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
649 				IOC_AND_NETDEV_NAMES_s_s(dev));
650 		goto out;
651 
652 	default:
653 		priv->stats.tx_errors += count;
654 		break;
655 	}
656 
657 	pContext = &pSendRep->BufferContext;
658 
659 	spin_lock_irqsave(&priv->txfidx_lock, flags);
660 	while (count > 0) {
661 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
662 
663 		sent = priv->SendCtl[ctx].skb;
664 		priv->stats.tx_bytes += sent->len;
665 
666 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
667 				IOC_AND_NETDEV_NAMES_s_s(dev),
668 				__func__, sent));
669 
670 		priv->SendCtl[ctx].skb = NULL;
671 		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
672 				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
673 		dev_kfree_skb_irq(sent);
674 
675 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
676 
677 		pContext++;
678 		count--;
679 	}
680 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
681 
682 out:
683 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
684 		FreeReqFrame = 1;
685 
686 	netif_wake_queue(dev);
687 	return FreeReqFrame;
688 }
689 
690 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
691 static int
692 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
693 {
694 	struct mpt_lan_priv *priv = netdev_priv(dev);
695 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
696 	MPT_FRAME_HDR *mf;
697 	LANSendRequest_t *pSendReq;
698 	SGETransaction32_t *pTrans;
699 	SGESimple64_t *pSimple;
700 	const unsigned char *mac;
701 	dma_addr_t dma;
702 	unsigned long flags;
703 	int ctx;
704 	u16 cur_naa = 0x1000;
705 
706 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
707 			__func__, skb));
708 
709 	spin_lock_irqsave(&priv->txfidx_lock, flags);
710 	if (priv->mpt_txfidx_tail < 0) {
711 		netif_stop_queue(dev);
712 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
713 
714 		printk (KERN_ERR "%s: no tx context available: %u\n",
715 			__func__, priv->mpt_txfidx_tail);
716 		return 1;
717 	}
718 
719 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
720 	if (mf == NULL) {
721 		netif_stop_queue(dev);
722 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
723 
724 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
725 			__func__);
726 		return 1;
727 	}
728 
729 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
730 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
731 
732 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
733 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
734 
735 	pSendReq = (LANSendRequest_t *) mf;
736 
737 	/* Set the mac.raw pointer, since this apparently isn't getting
738 	 * done before we get the skb. Pull the data pointer past the mac data.
739 	 */
740 	skb_reset_mac_header(skb);
741 	skb_pull(skb, 12);
742 
743         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
744 			     PCI_DMA_TODEVICE);
745 
746 	priv->SendCtl[ctx].skb = skb;
747 	priv->SendCtl[ctx].dma = dma;
748 	priv->SendCtl[ctx].len = skb->len;
749 
750 	/* Message Header */
751 	pSendReq->Reserved    = 0;
752 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
753 	pSendReq->ChainOffset = 0;
754 	pSendReq->Reserved2   = 0;
755 	pSendReq->MsgFlags    = 0;
756 	pSendReq->PortNumber  = priv->pnum;
757 
758 	/* Transaction Context Element */
759 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
760 
761 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
762 	pTrans->ContextSize   = sizeof(u32);
763 	pTrans->DetailsLength = 2 * sizeof(u32);
764 	pTrans->Flags         = 0;
765 	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
766 
767 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
768 //			IOC_AND_NETDEV_NAMES_s_s(dev),
769 //			ctx, skb, skb->data));
770 
771 	mac = skb_mac_header(skb);
772 
773 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
774 						    (mac[0] <<  8) |
775 						    (mac[1] <<  0));
776 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
777 						    (mac[3] << 16) |
778 						    (mac[4] <<  8) |
779 						    (mac[5] <<  0));
780 
781 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
782 
783 	/* If we ever decide to send more than one Simple SGE per LANSend, then
784 	   we will need to make sure that LAST_ELEMENT only gets set on the
785 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
786 	pSimple->FlagsLength = cpu_to_le32(
787 			((MPI_SGE_FLAGS_LAST_ELEMENT |
788 			  MPI_SGE_FLAGS_END_OF_BUFFER |
789 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
790 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
791 			  MPI_SGE_FLAGS_HOST_TO_IOC |
792 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
793 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
794 			skb->len);
795 	pSimple->Address.Low = cpu_to_le32((u32) dma);
796 	if (sizeof(dma_addr_t) > sizeof(u32))
797 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
798 	else
799 		pSimple->Address.High = 0;
800 
801 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
802 	dev->trans_start = jiffies;
803 
804 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
805 			IOC_AND_NETDEV_NAMES_s_s(dev),
806 			le32_to_cpu(pSimple->FlagsLength)));
807 
808 	return 0;
809 }
810 
811 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
812 static void
813 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
814 /*
815  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
816  */
817 {
818 	struct mpt_lan_priv *priv = dev->priv;
819 
820 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
821 		if (priority) {
822 			schedule_delayed_work(&priv->post_buckets_task, 0);
823 		} else {
824 			schedule_delayed_work(&priv->post_buckets_task, 1);
825 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
826 				   "timer.\n"));
827 		}
828 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
829 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
830 	}
831 }
832 
833 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
834 static int
835 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
836 {
837 	struct mpt_lan_priv *priv = dev->priv;
838 
839 	skb->protocol = mpt_lan_type_trans(skb, dev);
840 
841 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
842 		 "delivered to upper level.\n",
843 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
844 
845 	priv->stats.rx_bytes += skb->len;
846 	priv->stats.rx_packets++;
847 
848 	skb->dev = dev;
849 	netif_rx(skb);
850 
851 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
852 		 atomic_read(&priv->buckets_out)));
853 
854 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
855 		mpt_lan_wake_post_buckets_task(dev, 1);
856 
857 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
858 		  "remaining, %d received back since sod\n",
859 		  atomic_read(&priv->buckets_out), priv->total_received));
860 
861 	return 0;
862 }
863 
864 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
865 //static inline int
866 static int
867 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
868 {
869 	struct mpt_lan_priv *priv = dev->priv;
870 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
871 	struct sk_buff *skb, *old_skb;
872 	unsigned long flags;
873 	u32 ctx, len;
874 
875 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
876 	skb = priv->RcvCtl[ctx].skb;
877 
878 	len = GET_LAN_PACKET_LENGTH(tmsg);
879 
880 	if (len < MPT_LAN_RX_COPYBREAK) {
881 		old_skb = skb;
882 
883 		skb = (struct sk_buff *)dev_alloc_skb(len);
884 		if (!skb) {
885 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
886 					IOC_AND_NETDEV_NAMES_s_s(dev),
887 					__FILE__, __LINE__);
888 			return -ENOMEM;
889 		}
890 
891 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
892 					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
893 
894 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
895 
896 		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
897 					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
898 		goto out;
899 	}
900 
901 	skb_put(skb, len);
902 
903 	priv->RcvCtl[ctx].skb = NULL;
904 
905 	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
906 			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
907 
908 out:
909 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
910 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
911 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
912 
913 	atomic_dec(&priv->buckets_out);
914 	priv->total_received++;
915 
916 	return mpt_lan_receive_skb(dev, skb);
917 }
918 
919 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
920 static int
921 mpt_lan_receive_post_free(struct net_device *dev,
922 			  LANReceivePostReply_t *pRecvRep)
923 {
924 	struct mpt_lan_priv *priv = dev->priv;
925 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
926 	unsigned long flags;
927 	struct sk_buff *skb;
928 	u32 ctx;
929 	int count;
930 	int i;
931 
932 	count = pRecvRep->NumberOfContexts;
933 
934 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
935 		  "IOC returned %d buckets, freeing them...\n", count));
936 
937 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
938 	for (i = 0; i < count; i++) {
939 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
940 
941 		skb = priv->RcvCtl[ctx].skb;
942 
943 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
944 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
945 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
946 //				priv, &(priv->buckets_out)));
947 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
948 
949 		priv->RcvCtl[ctx].skb = NULL;
950 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
951 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
952 		dev_kfree_skb_any(skb);
953 
954 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
955 	}
956 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
957 
958 	atomic_sub(count, &priv->buckets_out);
959 
960 //	for (i = 0; i < priv->max_buckets_out; i++)
961 //		if (priv->RcvCtl[i].skb != NULL)
962 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
963 //				  "is still out\n", i));
964 
965 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
966 		  count));
967 */
968 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
969 /**/		  "remaining, %d received back since sod.\n",
970 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
971 	return 0;
972 }
973 
974 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
975 static int
976 mpt_lan_receive_post_reply(struct net_device *dev,
977 			   LANReceivePostReply_t *pRecvRep)
978 {
979 	struct mpt_lan_priv *priv = dev->priv;
980 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
981 	struct sk_buff *skb, *old_skb;
982 	unsigned long flags;
983 	u32 len, ctx, offset;
984 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
985 	int count;
986 	int i, l;
987 
988 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
989 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
990 		 le16_to_cpu(pRecvRep->IOCStatus)));
991 
992 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
993 						MPI_IOCSTATUS_LAN_CANCELED)
994 		return mpt_lan_receive_post_free(dev, pRecvRep);
995 
996 	len = le32_to_cpu(pRecvRep->PacketLength);
997 	if (len == 0) {
998 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
999 			"ReceivePostReply w/ PacketLength zero!\n",
1000 				IOC_AND_NETDEV_NAMES_s_s(dev));
1001 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1002 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1003 		return -1;
1004 	}
1005 
1006 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1007 	count  = pRecvRep->NumberOfContexts;
1008 	skb    = priv->RcvCtl[ctx].skb;
1009 
1010 	offset = le32_to_cpu(pRecvRep->PacketOffset);
1011 //	if (offset != 0) {
1012 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1013 //			"w/ PacketOffset %u\n",
1014 //				IOC_AND_NETDEV_NAMES_s_s(dev),
1015 //				offset);
1016 //	}
1017 
1018 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1019 			IOC_AND_NETDEV_NAMES_s_s(dev),
1020 			offset, len));
1021 
1022 	if (count > 1) {
1023 		int szrem = len;
1024 
1025 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1026 //			"for single packet, concatenating...\n",
1027 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1028 
1029 		skb = (struct sk_buff *)dev_alloc_skb(len);
1030 		if (!skb) {
1031 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1032 					IOC_AND_NETDEV_NAMES_s_s(dev),
1033 					__FILE__, __LINE__);
1034 			return -ENOMEM;
1035 		}
1036 
1037 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1038 		for (i = 0; i < count; i++) {
1039 
1040 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1041 			old_skb = priv->RcvCtl[ctx].skb;
1042 
1043 			l = priv->RcvCtl[ctx].len;
1044 			if (szrem < l)
1045 				l = szrem;
1046 
1047 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1048 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1049 //					i, l));
1050 
1051 			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1052 						    priv->RcvCtl[ctx].dma,
1053 						    priv->RcvCtl[ctx].len,
1054 						    PCI_DMA_FROMDEVICE);
1055 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1056 
1057 			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1058 						       priv->RcvCtl[ctx].dma,
1059 						       priv->RcvCtl[ctx].len,
1060 						       PCI_DMA_FROMDEVICE);
1061 
1062 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1063 			szrem -= l;
1064 		}
1065 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1066 
1067 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1068 
1069 		old_skb = skb;
1070 
1071 		skb = (struct sk_buff *)dev_alloc_skb(len);
1072 		if (!skb) {
1073 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1074 					IOC_AND_NETDEV_NAMES_s_s(dev),
1075 					__FILE__, __LINE__);
1076 			return -ENOMEM;
1077 		}
1078 
1079 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1080 					    priv->RcvCtl[ctx].dma,
1081 					    priv->RcvCtl[ctx].len,
1082 					    PCI_DMA_FROMDEVICE);
1083 
1084 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1085 
1086 		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1087 					       priv->RcvCtl[ctx].dma,
1088 					       priv->RcvCtl[ctx].len,
1089 					       PCI_DMA_FROMDEVICE);
1090 
1091 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1092 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1093 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1094 
1095 	} else {
1096 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1097 
1098 		priv->RcvCtl[ctx].skb = NULL;
1099 
1100 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1101 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1102 		priv->RcvCtl[ctx].dma = 0;
1103 
1104 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1105 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1106 
1107 		skb_put(skb,len);
1108 	}
1109 
1110 	atomic_sub(count, &priv->buckets_out);
1111 	priv->total_received += count;
1112 
1113 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1114 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1115 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1116 				IOC_AND_NETDEV_NAMES_s_s(dev),
1117 				priv->mpt_rxfidx_tail,
1118 				MPT_LAN_MAX_BUCKETS_OUT);
1119 
1120 		return -1;
1121 	}
1122 
1123 	if (remaining == 0)
1124 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1125 			"(priv->buckets_out = %d)\n",
1126 			IOC_AND_NETDEV_NAMES_s_s(dev),
1127 			atomic_read(&priv->buckets_out));
1128 	else if (remaining < 10)
1129 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1130 			"(priv->buckets_out = %d)\n",
1131 			IOC_AND_NETDEV_NAMES_s_s(dev),
1132 			remaining, atomic_read(&priv->buckets_out));
1133 
1134 	if ((remaining < priv->bucketthresh) &&
1135 	    ((atomic_read(&priv->buckets_out) - remaining) >
1136 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1137 
1138 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1139 			"buckets_out count and fw's BucketsRemaining "
1140 			"count has crossed the threshold, issuing a "
1141 			"LanReset to clear the fw's hashtable. You may "
1142 			"want to check your /var/log/messages for \"CRC "
1143 			"error\" event notifications.\n");
1144 
1145 		mpt_lan_reset(dev);
1146 		mpt_lan_wake_post_buckets_task(dev, 0);
1147 	}
1148 
1149 	return mpt_lan_receive_skb(dev, skb);
1150 }
1151 
1152 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1153 /* Simple SGE's only at the moment */
1154 
1155 static void
1156 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1157 {
1158 	struct net_device *dev = priv->dev;
1159 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1160 	MPT_FRAME_HDR *mf;
1161 	LANReceivePostRequest_t *pRecvReq;
1162 	SGETransaction32_t *pTrans;
1163 	SGESimple64_t *pSimple;
1164 	struct sk_buff *skb;
1165 	dma_addr_t dma;
1166 	u32 curr, buckets, count, max;
1167 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1168 	unsigned long flags;
1169 	int i;
1170 
1171 	curr = atomic_read(&priv->buckets_out);
1172 	buckets = (priv->max_buckets_out - curr);
1173 
1174 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1175 			IOC_AND_NETDEV_NAMES_s_s(dev),
1176 			__func__, buckets, curr));
1177 
1178 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1179 			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1180 
1181 	while (buckets) {
1182 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1183 		if (mf == NULL) {
1184 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1185 				__func__);
1186 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1187 				 __func__, buckets));
1188 			goto out;
1189 		}
1190 		pRecvReq = (LANReceivePostRequest_t *) mf;
1191 
1192 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1193 		mpt_dev->RequestNB[i] = 0;
1194 		count = buckets;
1195 		if (count > max)
1196 			count = max;
1197 
1198 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1199 		pRecvReq->ChainOffset = 0;
1200 		pRecvReq->MsgFlags    = 0;
1201 		pRecvReq->PortNumber  = priv->pnum;
1202 
1203 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1204 		pSimple = NULL;
1205 
1206 		for (i = 0; i < count; i++) {
1207 			int ctx;
1208 
1209 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1210 			if (priv->mpt_rxfidx_tail < 0) {
1211 				printk (KERN_ERR "%s: Can't alloc context\n",
1212 					__func__);
1213 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1214 						       flags);
1215 				break;
1216 			}
1217 
1218 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1219 
1220 			skb = priv->RcvCtl[ctx].skb;
1221 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1222 				pci_unmap_single(mpt_dev->pcidev,
1223 						 priv->RcvCtl[ctx].dma,
1224 						 priv->RcvCtl[ctx].len,
1225 						 PCI_DMA_FROMDEVICE);
1226 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1227 				skb = priv->RcvCtl[ctx].skb = NULL;
1228 			}
1229 
1230 			if (skb == NULL) {
1231 				skb = dev_alloc_skb(len);
1232 				if (skb == NULL) {
1233 					printk (KERN_WARNING
1234 						MYNAM "/%s: Can't alloc skb\n",
1235 						__func__);
1236 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1237 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1238 					break;
1239 				}
1240 
1241 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1242 						     len, PCI_DMA_FROMDEVICE);
1243 
1244 				priv->RcvCtl[ctx].skb = skb;
1245 				priv->RcvCtl[ctx].dma = dma;
1246 				priv->RcvCtl[ctx].len = len;
1247 			}
1248 
1249 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1250 
1251 			pTrans->ContextSize   = sizeof(u32);
1252 			pTrans->DetailsLength = 0;
1253 			pTrans->Flags         = 0;
1254 			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1255 
1256 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1257 
1258 			pSimple->FlagsLength = cpu_to_le32(
1259 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1260 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1261 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1262 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1263 			if (sizeof(dma_addr_t) > sizeof(u32))
1264 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1265 			else
1266 				pSimple->Address.High = 0;
1267 
1268 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1269 		}
1270 
1271 		if (pSimple == NULL) {
1272 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1273 /**/				__func__);
1274 			mpt_free_msg_frame(mpt_dev, mf);
1275 			goto out;
1276 		}
1277 
1278 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1279 
1280 		pRecvReq->BucketCount = cpu_to_le32(i);
1281 
1282 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1283  *	for (i = 0; i < j + 2; i ++)
1284  *	    printk (" %08x", le32_to_cpu(msg[i]));
1285  *	printk ("\n");
1286  */
1287 
1288 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1289 
1290 		priv->total_posted += i;
1291 		buckets -= i;
1292 		atomic_add(i, &priv->buckets_out);
1293 	}
1294 
1295 out:
1296 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1297 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1298 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1299 	__func__, priv->total_posted, priv->total_received));
1300 
1301 	clear_bit(0, &priv->post_buckets_active);
1302 }
1303 
1304 static void
1305 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1306 {
1307 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1308 						  post_buckets_task.work));
1309 }
1310 
1311 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1312 static struct net_device *
1313 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1314 {
1315 	struct net_device *dev;
1316 	struct mpt_lan_priv *priv;
1317 	u8 HWaddr[FC_ALEN], *a;
1318 
1319 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1320 	if (!dev)
1321 		return NULL;
1322 
1323 	dev->mtu = MPT_LAN_MTU;
1324 
1325 	priv = netdev_priv(dev);
1326 
1327 	priv->dev = dev;
1328 	priv->mpt_dev = mpt_dev;
1329 	priv->pnum = pnum;
1330 
1331 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1332 			  mpt_lan_post_receive_buckets_work);
1333 	priv->post_buckets_active = 0;
1334 
1335 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1336 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1337 
1338 	atomic_set(&priv->buckets_out, 0);
1339 	priv->total_posted = 0;
1340 	priv->total_received = 0;
1341 	priv->max_buckets_out = max_buckets_out;
1342 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1343 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1344 
1345 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1346 			__LINE__,
1347 			mpt_dev->pfacts[0].MaxLanBuckets,
1348 			max_buckets_out,
1349 			priv->max_buckets_out));
1350 
1351 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1352 	spin_lock_init(&priv->txfidx_lock);
1353 	spin_lock_init(&priv->rxfidx_lock);
1354 
1355 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1356 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1357 
1358 	HWaddr[0] = a[5];
1359 	HWaddr[1] = a[4];
1360 	HWaddr[2] = a[3];
1361 	HWaddr[3] = a[2];
1362 	HWaddr[4] = a[1];
1363 	HWaddr[5] = a[0];
1364 
1365 	dev->addr_len = FC_ALEN;
1366 	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1367 	memset(dev->broadcast, 0xff, FC_ALEN);
1368 
1369 	/* The Tx queue is 127 deep on the 909.
1370 	 * Give ourselves some breathing room.
1371 	 */
1372 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1373 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1374 
1375 	dev->open = mpt_lan_open;
1376 	dev->stop = mpt_lan_close;
1377 	dev->get_stats = mpt_lan_get_stats;
1378 	dev->set_multicast_list = NULL;
1379 	dev->change_mtu = mpt_lan_change_mtu;
1380 	dev->hard_start_xmit = mpt_lan_sdu_send;
1381 
1382 /* Not in 2.3.42. Need 2.3.45+ */
1383 	dev->tx_timeout = mpt_lan_tx_timeout;
1384 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1385 
1386 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1387 		"and setting initial values\n"));
1388 
1389 	if (register_netdev(dev) != 0) {
1390 		free_netdev(dev);
1391 		dev = NULL;
1392 	}
1393 	return dev;
1394 }
1395 
1396 static int
1397 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1398 {
1399 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1400 	struct net_device	*dev;
1401 	int			i;
1402 
1403 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1404 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1405 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1406 		       ioc->name, ioc->pfacts[i].PortNumber,
1407 		       ioc->pfacts[i].ProtocolFlags,
1408 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1409 			       ioc->pfacts[i].ProtocolFlags));
1410 
1411 		if (!(ioc->pfacts[i].ProtocolFlags &
1412 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1413 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1414 			       "seems to be disabled on this adapter port!\n",
1415 			       ioc->name);
1416 			continue;
1417 		}
1418 
1419 		dev = mpt_register_lan_device(ioc, i);
1420 		if (!dev) {
1421 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1422 			       "port%d as a LAN device\n", ioc->name,
1423 			       ioc->pfacts[i].PortNumber);
1424 			continue;
1425 		}
1426 
1427 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1428 		       "registered as '%s'\n", ioc->name, dev->name);
1429 		printk(KERN_INFO MYNAM ": %s/%s: "
1430 		       "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1431 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1432 		       dev->dev_addr[0], dev->dev_addr[1],
1433 		       dev->dev_addr[2], dev->dev_addr[3],
1434 		       dev->dev_addr[4], dev->dev_addr[5]);
1435 
1436 		ioc->netdev = dev;
1437 
1438 		return 0;
1439 	}
1440 
1441 	return -ENODEV;
1442 }
1443 
1444 static void
1445 mptlan_remove(struct pci_dev *pdev)
1446 {
1447 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1448 	struct net_device	*dev = ioc->netdev;
1449 
1450 	if(dev != NULL) {
1451 		unregister_netdev(dev);
1452 		free_netdev(dev);
1453 	}
1454 }
1455 
1456 static struct mpt_pci_driver mptlan_driver = {
1457 	.probe		= mptlan_probe,
1458 	.remove		= mptlan_remove,
1459 };
1460 
1461 static int __init mpt_lan_init (void)
1462 {
1463 	show_mptmod_ver(LANAME, LANVER);
1464 
1465 	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1466 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1467 		return -EBUSY;
1468 	}
1469 
1470 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1471 
1472 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1473 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1474 		       "handler with mptbase! The world is at an end! "
1475 		       "Everything is fading to black! Goodbye.\n");
1476 		return -EBUSY;
1477 	}
1478 
1479 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1480 
1481 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1482 	return 0;
1483 }
1484 
1485 static void __exit mpt_lan_exit(void)
1486 {
1487 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1488 	mpt_reset_deregister(LanCtx);
1489 
1490 	if (LanCtx) {
1491 		mpt_deregister(LanCtx);
1492 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1493 	}
1494 }
1495 
1496 module_init(mpt_lan_init);
1497 module_exit(mpt_lan_exit);
1498 
1499 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1500 static unsigned short
1501 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1502 {
1503 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1504 	struct fcllc *fcllc;
1505 
1506 	skb_reset_mac_header(skb);
1507 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1508 
1509 	if (fch->dtype == htons(0xffff)) {
1510 		u32 *p = (u32 *) fch;
1511 
1512 		swab32s(p + 0);
1513 		swab32s(p + 1);
1514 		swab32s(p + 2);
1515 		swab32s(p + 3);
1516 
1517 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1518 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1519 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1520 				fch->saddr[0], fch->saddr[1], fch->saddr[2],
1521 				fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1522 	}
1523 
1524 	if (*fch->daddr & 1) {
1525 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1526 			skb->pkt_type = PACKET_BROADCAST;
1527 		} else {
1528 			skb->pkt_type = PACKET_MULTICAST;
1529 		}
1530 	} else {
1531 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1532 			skb->pkt_type = PACKET_OTHERHOST;
1533 		} else {
1534 			skb->pkt_type = PACKET_HOST;
1535 		}
1536 	}
1537 
1538 	fcllc = (struct fcllc *)skb->data;
1539 
1540 
1541 	/* Strip the SNAP header from ARP packets since we don't
1542 	 * pass them through to the 802.2/SNAP layers.
1543 	 */
1544 	if (fcllc->dsap == EXTENDED_SAP &&
1545 		(fcllc->ethertype == htons(ETH_P_IP) ||
1546 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1547 		skb_pull(skb, sizeof(struct fcllc));
1548 		return fcllc->ethertype;
1549 	}
1550 
1551 	return htons(ETH_P_802_2);
1552 }
1553 
1554 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1555