xref: /linux/drivers/message/fusion/mptlan.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
61 
62 #define my_VERSION	MPT_LINUX_VERSION_COMMON
63 #define MYNAM		"mptlan"
64 
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(my_VERSION);
67 
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * MPT LAN message sizes without variable part.
71  */
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74 
75 /*
76  *  Fusion MPT LAN private structures
77  */
78 
79 struct BufferControl {
80 	struct sk_buff	*skb;
81 	dma_addr_t	dma;
82 	unsigned int	len;
83 };
84 
85 struct mpt_lan_priv {
86 	MPT_ADAPTER *mpt_dev;
87 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
88 
89 	atomic_t buckets_out;		/* number of unused buckets on IOC */
90 	int bucketthresh;		/* Send more when this many left */
91 
92 	int *mpt_txfidx; /* Free Tx Context list */
93 	int mpt_txfidx_tail;
94 	spinlock_t txfidx_lock;
95 
96 	int *mpt_rxfidx; /* Free Rx Context list */
97 	int mpt_rxfidx_tail;
98 	spinlock_t rxfidx_lock;
99 
100 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
101 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
102 
103 	int max_buckets_out;		/* Max buckets to send to IOC */
104 	int tx_max_out;			/* IOC's Tx queue len */
105 
106 	u32 total_posted;
107 	u32 total_received;
108 
109 	struct delayed_work post_buckets_task;
110 	struct net_device *dev;
111 	unsigned long post_buckets_active;
112 };
113 
114 struct mpt_lan_ohdr {
115 	u16	dtype;
116 	u8	daddr[FC_ALEN];
117 	u16	stype;
118 	u8	saddr[FC_ALEN];
119 };
120 
121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
122 
123 /*
124  *  Forward protos...
125  */
126 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
127 		       MPT_FRAME_HDR *reply);
128 static int  mpt_lan_open(struct net_device *dev);
129 static int  mpt_lan_reset(struct net_device *dev);
130 static int  mpt_lan_close(struct net_device *dev);
131 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
132 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
133 					   int priority);
134 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
135 static int  mpt_lan_receive_post_reply(struct net_device *dev,
136 				       LANReceivePostReply_t *pRecvRep);
137 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
138 static int  mpt_lan_send_reply(struct net_device *dev,
139 			       LANSendReply_t *pSendRep);
140 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
141 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
142 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
143 					 struct net_device *dev);
144 
145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146 /*
147  *  Fusion MPT LAN private data
148  */
149 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
150 
151 static u32 max_buckets_out = 127;
152 static u32 tx_max_out_p = 127 - 16;
153 
154 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
155 /**
156  *	lan_reply - Handle all data sent from the hardware.
157  *	@ioc: Pointer to MPT_ADAPTER structure
158  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
159  *	@reply: Pointer to MPT reply frame
160  *
161  *	Returns 1 indicating original alloc'd request frame ptr
162  *	should be freed, or 0 if it shouldn't.
163  */
164 static int
lan_reply(MPT_ADAPTER * ioc,MPT_FRAME_HDR * mf,MPT_FRAME_HDR * reply)165 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
166 {
167 	struct net_device *dev = ioc->netdev;
168 	int FreeReqFrame = 0;
169 
170 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
171 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
172 
173 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
174 //			mf, reply));
175 
176 	if (mf == NULL) {
177 		u32 tmsg = CAST_PTR_TO_U32(reply);
178 
179 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
180 				IOC_AND_NETDEV_NAMES_s_s(dev),
181 				tmsg));
182 
183 		switch (GET_LAN_FORM(tmsg)) {
184 
185 		// NOTE!  (Optimization) First case here is now caught in
186 		//  mptbase.c::mpt_interrupt() routine and callcack here
187 		//  is now skipped for this case!
188 #if 0
189 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
190 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
191 //				  "MessageContext turbo reply received\n"));
192 			FreeReqFrame = 1;
193 			break;
194 #endif
195 
196 		case LAN_REPLY_FORM_SEND_SINGLE:
197 //			dioprintk((MYNAM "/lan_reply: "
198 //				  "calling mpt_lan_send_reply (turbo)\n"));
199 
200 			// Potential BUG here?
201 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
202 			//  If/when mpt_lan_send_turbo would return 1 here,
203 			//  calling routine (mptbase.c|mpt_interrupt)
204 			//  would Oops because mf has already been set
205 			//  to NULL.  So after return from this func,
206 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
207 			//  item back onto its adapter FreeQ - Oops!:-(
208 			//  It's Ok, since mpt_lan_send_turbo() *currently*
209 			//  always returns 0, but..., just in case:
210 
211 			(void) mpt_lan_send_turbo(dev, tmsg);
212 			FreeReqFrame = 0;
213 
214 			break;
215 
216 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
217 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
218 //				  "rcv-Turbo = %08x\n", tmsg));
219 			mpt_lan_receive_post_turbo(dev, tmsg);
220 			break;
221 
222 		default:
223 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
224 				"that I don't know what to do with\n");
225 
226 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
227 
228 			break;
229 		}
230 
231 		return FreeReqFrame;
232 	}
233 
234 //	msg = (u32 *) reply;
235 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
236 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
237 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
238 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
239 //		  reply->u.hdr.Function));
240 
241 	switch (reply->u.hdr.Function) {
242 
243 	case MPI_FUNCTION_LAN_SEND:
244 	{
245 		LANSendReply_t *pSendRep;
246 
247 		pSendRep = (LANSendReply_t *) reply;
248 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
249 		break;
250 	}
251 
252 	case MPI_FUNCTION_LAN_RECEIVE:
253 	{
254 		LANReceivePostReply_t *pRecvRep;
255 
256 		pRecvRep = (LANReceivePostReply_t *) reply;
257 		if (pRecvRep->NumberOfContexts) {
258 			mpt_lan_receive_post_reply(dev, pRecvRep);
259 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
260 				FreeReqFrame = 1;
261 		} else
262 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
263 				  "ReceivePostReply received.\n"));
264 		break;
265 	}
266 
267 	case MPI_FUNCTION_LAN_RESET:
268 		/* Just a default reply. Might want to check it to
269 		 * make sure that everything went ok.
270 		 */
271 		FreeReqFrame = 1;
272 		break;
273 
274 	case MPI_FUNCTION_EVENT_NOTIFICATION:
275 	case MPI_FUNCTION_EVENT_ACK:
276 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
277 		 *  Should be routed to mpt_lan_event_process(), but just in case...
278 		 */
279 		FreeReqFrame = 1;
280 		break;
281 
282 	default:
283 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
284 			"reply that I don't know what to do with\n");
285 
286 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
287 		FreeReqFrame = 1;
288 
289 		break;
290 	}
291 
292 	return FreeReqFrame;
293 }
294 
295 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
296 static int
mpt_lan_ioc_reset(MPT_ADAPTER * ioc,int reset_phase)297 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
298 {
299 	struct net_device *dev = ioc->netdev;
300 	struct mpt_lan_priv *priv;
301 
302 	if (dev == NULL)
303 		return(1);
304 	else
305 		priv = netdev_priv(dev);
306 
307 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
308 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
309 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
310 
311 	if (priv->mpt_rxfidx == NULL)
312 		return (1);
313 
314 	if (reset_phase == MPT_IOC_SETUP_RESET) {
315 		;
316 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
317 		int i;
318 		unsigned long flags;
319 
320 		netif_stop_queue(dev);
321 
322 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
323 
324 		atomic_set(&priv->buckets_out, 0);
325 
326 		/* Reset Rx Free Tail index and re-populate the queue. */
327 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
328 		priv->mpt_rxfidx_tail = -1;
329 		for (i = 0; i < priv->max_buckets_out; i++)
330 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
331 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
332 	} else {
333 		mpt_lan_post_receive_buckets(priv);
334 		netif_wake_queue(dev);
335 	}
336 
337 	return 1;
338 }
339 
340 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
341 static int
mpt_lan_event_process(MPT_ADAPTER * ioc,EventNotificationReply_t * pEvReply)342 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
343 {
344 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
345 
346 	switch (le32_to_cpu(pEvReply->Event)) {
347 	case MPI_EVENT_NONE:				/* 00 */
348 	case MPI_EVENT_LOG_DATA:			/* 01 */
349 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
350 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
351 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
352 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
353 	case MPI_EVENT_RESCAN:				/* 06 */
354 		/* Ok, do we need to do anything here? As far as
355 		   I can tell, this is when a new device gets added
356 		   to the loop. */
357 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
358 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
359 	case MPI_EVENT_LOGOUT:				/* 09 */
360 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
361 	default:
362 		break;
363 	}
364 
365 	/*
366 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
367 	 *  Do NOT do it here now!
368 	 */
369 
370 	return 1;
371 }
372 
373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
374 static int
mpt_lan_open(struct net_device * dev)375 mpt_lan_open(struct net_device *dev)
376 {
377 	struct mpt_lan_priv *priv = netdev_priv(dev);
378 	int i;
379 
380 	if (mpt_lan_reset(dev) != 0) {
381 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
382 
383 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
384 
385 		if (mpt_dev->active)
386 			printk ("The ioc is active. Perhaps it needs to be"
387 				" reset?\n");
388 		else
389 			printk ("The ioc in inactive, most likely in the "
390 				"process of being reset. Please try again in "
391 				"a moment.\n");
392 	}
393 
394 	priv->mpt_txfidx = kmalloc_objs(int, priv->tx_max_out);
395 	if (priv->mpt_txfidx == NULL)
396 		goto out;
397 	priv->mpt_txfidx_tail = -1;
398 
399 	priv->SendCtl = kzalloc_objs(struct BufferControl, priv->tx_max_out);
400 	if (priv->SendCtl == NULL)
401 		goto out_mpt_txfidx;
402 	for (i = 0; i < priv->tx_max_out; i++)
403 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
404 
405 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
406 
407 	priv->mpt_rxfidx = kmalloc_objs(int, priv->max_buckets_out);
408 	if (priv->mpt_rxfidx == NULL)
409 		goto out_SendCtl;
410 	priv->mpt_rxfidx_tail = -1;
411 
412 	priv->RcvCtl = kzalloc_objs(struct BufferControl, priv->max_buckets_out);
413 	if (priv->RcvCtl == NULL)
414 		goto out_mpt_rxfidx;
415 	for (i = 0; i < priv->max_buckets_out; i++)
416 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
417 
418 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
419 /**/	for (i = 0; i < priv->tx_max_out; i++)
420 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
421 /**/	dlprintk(("\n"));
422 
423 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
424 
425 	mpt_lan_post_receive_buckets(priv);
426 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
427 			IOC_AND_NETDEV_NAMES_s_s(dev));
428 
429 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
430 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
431 			" Notifications. This is a bad thing! We're not going "
432 			"to go ahead, but I'd be leery of system stability at "
433 			"this point.\n");
434 	}
435 
436 	netif_start_queue(dev);
437 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
438 
439 	return 0;
440 out_mpt_rxfidx:
441 	kfree(priv->mpt_rxfidx);
442 	priv->mpt_rxfidx = NULL;
443 out_SendCtl:
444 	kfree(priv->SendCtl);
445 	priv->SendCtl = NULL;
446 out_mpt_txfidx:
447 	kfree(priv->mpt_txfidx);
448 	priv->mpt_txfidx = NULL;
449 out:	return -ENOMEM;
450 }
451 
452 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
453 /* Send a LanReset message to the FW. This should result in the FW returning
454    any buckets it still has. */
455 static int
mpt_lan_reset(struct net_device * dev)456 mpt_lan_reset(struct net_device *dev)
457 {
458 	MPT_FRAME_HDR *mf;
459 	LANResetRequest_t *pResetReq;
460 	struct mpt_lan_priv *priv = netdev_priv(dev);
461 
462 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
463 
464 	if (mf == NULL) {
465 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
466 		"Unable to allocate a request frame.\n"));
467 */
468 		return -1;
469 	}
470 
471 	pResetReq = (LANResetRequest_t *) mf;
472 
473 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
474 	pResetReq->ChainOffset	= 0;
475 	pResetReq->Reserved	= 0;
476 	pResetReq->PortNumber	= priv->pnum;
477 	pResetReq->MsgFlags	= 0;
478 	pResetReq->Reserved2	= 0;
479 
480 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
481 
482 	return 0;
483 }
484 
485 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
486 static int
mpt_lan_close(struct net_device * dev)487 mpt_lan_close(struct net_device *dev)
488 {
489 	struct mpt_lan_priv *priv = netdev_priv(dev);
490 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
491 	unsigned long timeout;
492 	int i;
493 
494 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
495 
496 	mpt_event_deregister(LanCtx);
497 
498 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
499 		  "since driver was loaded, %d still out\n",
500 		  priv->total_posted,atomic_read(&priv->buckets_out)));
501 
502 	netif_stop_queue(dev);
503 
504 	mpt_lan_reset(dev);
505 
506 	timeout = jiffies + 2 * HZ;
507 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
508 		schedule_timeout_interruptible(1);
509 
510 	for (i = 0; i < priv->max_buckets_out; i++) {
511 		if (priv->RcvCtl[i].skb != NULL) {
512 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
513 /**/				  "is still out\n", i));
514 			dma_unmap_single(&mpt_dev->pcidev->dev,
515 					 priv->RcvCtl[i].dma,
516 					 priv->RcvCtl[i].len, DMA_FROM_DEVICE);
517 			dev_kfree_skb(priv->RcvCtl[i].skb);
518 		}
519 	}
520 
521 	kfree(priv->RcvCtl);
522 	kfree(priv->mpt_rxfidx);
523 
524 	for (i = 0; i < priv->tx_max_out; i++) {
525 		if (priv->SendCtl[i].skb != NULL) {
526 			dma_unmap_single(&mpt_dev->pcidev->dev,
527 					 priv->SendCtl[i].dma,
528 					 priv->SendCtl[i].len, DMA_TO_DEVICE);
529 			dev_kfree_skb(priv->SendCtl[i].skb);
530 		}
531 	}
532 
533 	kfree(priv->SendCtl);
534 	kfree(priv->mpt_txfidx);
535 
536 	atomic_set(&priv->buckets_out, 0);
537 
538 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
539 			IOC_AND_NETDEV_NAMES_s_s(dev));
540 
541 	return 0;
542 }
543 
544 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
545 /* Tx timeout handler. */
546 static void
mpt_lan_tx_timeout(struct net_device * dev,unsigned int txqueue)547 mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
548 {
549 	struct mpt_lan_priv *priv = netdev_priv(dev);
550 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
551 
552 	if (mpt_dev->active) {
553 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
554 		netif_wake_queue(dev);
555 	}
556 }
557 
558 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
559 //static inline int
560 static int
mpt_lan_send_turbo(struct net_device * dev,u32 tmsg)561 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
562 {
563 	struct mpt_lan_priv *priv = netdev_priv(dev);
564 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
565 	struct sk_buff *sent;
566 	unsigned long flags;
567 	u32 ctx;
568 
569 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
570 	sent = priv->SendCtl[ctx].skb;
571 
572 	dev->stats.tx_packets++;
573 	dev->stats.tx_bytes += sent->len;
574 
575 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
576 			IOC_AND_NETDEV_NAMES_s_s(dev),
577 			__func__, sent));
578 
579 	priv->SendCtl[ctx].skb = NULL;
580 	dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
581 			 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
582 	dev_kfree_skb_irq(sent);
583 
584 	spin_lock_irqsave(&priv->txfidx_lock, flags);
585 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
586 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
587 
588 	netif_wake_queue(dev);
589 	return 0;
590 }
591 
592 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
593 static int
mpt_lan_send_reply(struct net_device * dev,LANSendReply_t * pSendRep)594 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
595 {
596 	struct mpt_lan_priv *priv = netdev_priv(dev);
597 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
598 	struct sk_buff *sent;
599 	unsigned long flags;
600 	int FreeReqFrame = 0;
601 	u32 *pContext;
602 	u32 ctx;
603 	u8 count;
604 
605 	count = pSendRep->NumberOfContexts;
606 
607 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
608 		 le16_to_cpu(pSendRep->IOCStatus)));
609 
610 	/* Add check for Loginfo Flag in IOCStatus */
611 
612 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
613 	case MPI_IOCSTATUS_SUCCESS:
614 		dev->stats.tx_packets += count;
615 		break;
616 
617 	case MPI_IOCSTATUS_LAN_CANCELED:
618 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
619 		break;
620 
621 	case MPI_IOCSTATUS_INVALID_SGL:
622 		dev->stats.tx_errors += count;
623 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
624 				IOC_AND_NETDEV_NAMES_s_s(dev));
625 		goto out;
626 
627 	default:
628 		dev->stats.tx_errors += count;
629 		break;
630 	}
631 
632 	pContext = &pSendRep->BufferContext;
633 
634 	spin_lock_irqsave(&priv->txfidx_lock, flags);
635 	while (count > 0) {
636 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
637 
638 		sent = priv->SendCtl[ctx].skb;
639 		dev->stats.tx_bytes += sent->len;
640 
641 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
642 				IOC_AND_NETDEV_NAMES_s_s(dev),
643 				__func__, sent));
644 
645 		priv->SendCtl[ctx].skb = NULL;
646 		dma_unmap_single(&mpt_dev->pcidev->dev,
647 				 priv->SendCtl[ctx].dma,
648 				 priv->SendCtl[ctx].len, DMA_TO_DEVICE);
649 		dev_kfree_skb_irq(sent);
650 
651 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
652 
653 		pContext++;
654 		count--;
655 	}
656 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
657 
658 out:
659 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
660 		FreeReqFrame = 1;
661 
662 	netif_wake_queue(dev);
663 	return FreeReqFrame;
664 }
665 
666 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
667 static netdev_tx_t
mpt_lan_sdu_send(struct sk_buff * skb,struct net_device * dev)668 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
669 {
670 	struct mpt_lan_priv *priv = netdev_priv(dev);
671 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
672 	MPT_FRAME_HDR *mf;
673 	LANSendRequest_t *pSendReq;
674 	SGETransaction32_t *pTrans;
675 	SGESimple64_t *pSimple;
676 	const unsigned char *mac;
677 	dma_addr_t dma;
678 	unsigned long flags;
679 	int ctx;
680 	u16 cur_naa = 0x1000;
681 
682 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
683 			__func__, skb));
684 
685 	spin_lock_irqsave(&priv->txfidx_lock, flags);
686 	if (priv->mpt_txfidx_tail < 0) {
687 		netif_stop_queue(dev);
688 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
689 
690 		printk (KERN_ERR "%s: no tx context available: %u\n",
691 			__func__, priv->mpt_txfidx_tail);
692 		return NETDEV_TX_BUSY;
693 	}
694 
695 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
696 	if (mf == NULL) {
697 		netif_stop_queue(dev);
698 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
699 
700 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
701 			__func__);
702 		return NETDEV_TX_BUSY;
703 	}
704 
705 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
706 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
707 
708 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
709 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
710 
711 	pSendReq = (LANSendRequest_t *) mf;
712 
713 	/* Set the mac.raw pointer, since this apparently isn't getting
714 	 * done before we get the skb. Pull the data pointer past the mac data.
715 	 */
716 	skb_reset_mac_header(skb);
717 	skb_pull(skb, 12);
718 
719 	dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
720 			     DMA_TO_DEVICE);
721 
722 	priv->SendCtl[ctx].skb = skb;
723 	priv->SendCtl[ctx].dma = dma;
724 	priv->SendCtl[ctx].len = skb->len;
725 
726 	/* Message Header */
727 	pSendReq->Reserved    = 0;
728 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
729 	pSendReq->ChainOffset = 0;
730 	pSendReq->Reserved2   = 0;
731 	pSendReq->MsgFlags    = 0;
732 	pSendReq->PortNumber  = priv->pnum;
733 
734 	/* Transaction Context Element */
735 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
736 
737 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
738 	pTrans->ContextSize   = sizeof(u32);
739 	pTrans->DetailsLength = 2 * sizeof(u32);
740 	pTrans->Flags         = 0;
741 	pTrans->TransactionContext = cpu_to_le32(ctx);
742 
743 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
744 //			IOC_AND_NETDEV_NAMES_s_s(dev),
745 //			ctx, skb, skb->data));
746 
747 	mac = skb_mac_header(skb);
748 
749 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
750 						    (mac[0] <<  8) |
751 						    (mac[1] <<  0));
752 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
753 						    (mac[3] << 16) |
754 						    (mac[4] <<  8) |
755 						    (mac[5] <<  0));
756 
757 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
758 
759 	/* If we ever decide to send more than one Simple SGE per LANSend, then
760 	   we will need to make sure that LAST_ELEMENT only gets set on the
761 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
762 	pSimple->FlagsLength = cpu_to_le32(
763 			((MPI_SGE_FLAGS_LAST_ELEMENT |
764 			  MPI_SGE_FLAGS_END_OF_BUFFER |
765 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
766 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
767 			  MPI_SGE_FLAGS_HOST_TO_IOC |
768 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
769 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
770 			skb->len);
771 	pSimple->Address.Low = cpu_to_le32((u32) dma);
772 	if (sizeof(dma_addr_t) > sizeof(u32))
773 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
774 	else
775 		pSimple->Address.High = 0;
776 
777 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
778 	netif_trans_update(dev);
779 
780 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
781 			IOC_AND_NETDEV_NAMES_s_s(dev),
782 			le32_to_cpu(pSimple->FlagsLength)));
783 
784 	return NETDEV_TX_OK;
785 }
786 
787 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
788 static void
mpt_lan_wake_post_buckets_task(struct net_device * dev,int priority)789 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
790 /*
791  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
792  */
793 {
794 	struct mpt_lan_priv *priv = netdev_priv(dev);
795 
796 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
797 		if (priority) {
798 			schedule_delayed_work(&priv->post_buckets_task, 0);
799 		} else {
800 			schedule_delayed_work(&priv->post_buckets_task, 1);
801 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
802 				   "timer.\n"));
803 		}
804 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
805 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
806 	}
807 }
808 
809 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
810 static int
mpt_lan_receive_skb(struct net_device * dev,struct sk_buff * skb)811 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
812 {
813 	struct mpt_lan_priv *priv = netdev_priv(dev);
814 
815 	skb->protocol = mpt_lan_type_trans(skb, dev);
816 
817 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
818 		 "delivered to upper level.\n",
819 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
820 
821 	dev->stats.rx_bytes += skb->len;
822 	dev->stats.rx_packets++;
823 
824 	skb->dev = dev;
825 	netif_rx(skb);
826 
827 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
828 		 atomic_read(&priv->buckets_out)));
829 
830 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
831 		mpt_lan_wake_post_buckets_task(dev, 1);
832 
833 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
834 		  "remaining, %d received back since sod\n",
835 		  atomic_read(&priv->buckets_out), priv->total_received));
836 
837 	return 0;
838 }
839 
840 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
841 //static inline int
842 static int
mpt_lan_receive_post_turbo(struct net_device * dev,u32 tmsg)843 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
844 {
845 	struct mpt_lan_priv *priv = netdev_priv(dev);
846 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
847 	struct sk_buff *skb, *old_skb;
848 	unsigned long flags;
849 	u32 ctx, len;
850 
851 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
852 	skb = priv->RcvCtl[ctx].skb;
853 
854 	len = GET_LAN_PACKET_LENGTH(tmsg);
855 
856 	if (len < MPT_LAN_RX_COPYBREAK) {
857 		old_skb = skb;
858 
859 		skb = (struct sk_buff *)dev_alloc_skb(len);
860 		if (!skb) {
861 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
862 					IOC_AND_NETDEV_NAMES_s_s(dev),
863 					__FILE__, __LINE__);
864 			return -ENOMEM;
865 		}
866 
867 		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
868 					priv->RcvCtl[ctx].dma,
869 					priv->RcvCtl[ctx].len,
870 					DMA_FROM_DEVICE);
871 
872 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
873 
874 		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
875 					   priv->RcvCtl[ctx].dma,
876 					   priv->RcvCtl[ctx].len,
877 					   DMA_FROM_DEVICE);
878 		goto out;
879 	}
880 
881 	skb_put(skb, len);
882 
883 	priv->RcvCtl[ctx].skb = NULL;
884 
885 	dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
886 			 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
887 
888 out:
889 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
890 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
891 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
892 
893 	atomic_dec(&priv->buckets_out);
894 	priv->total_received++;
895 
896 	return mpt_lan_receive_skb(dev, skb);
897 }
898 
899 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
900 static int
mpt_lan_receive_post_free(struct net_device * dev,LANReceivePostReply_t * pRecvRep)901 mpt_lan_receive_post_free(struct net_device *dev,
902 			  LANReceivePostReply_t *pRecvRep)
903 {
904 	struct mpt_lan_priv *priv = netdev_priv(dev);
905 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
906 	unsigned long flags;
907 	struct sk_buff *skb;
908 	u32 ctx;
909 	int count;
910 	int i;
911 
912 	count = pRecvRep->NumberOfContexts;
913 
914 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
915 		  "IOC returned %d buckets, freeing them...\n", count));
916 
917 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
918 	for (i = 0; i < count; i++) {
919 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
920 
921 		skb = priv->RcvCtl[ctx].skb;
922 
923 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
924 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
925 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
926 //				priv, &(priv->buckets_out)));
927 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
928 
929 		priv->RcvCtl[ctx].skb = NULL;
930 		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
931 				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
932 		dev_kfree_skb_any(skb);
933 
934 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
935 	}
936 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
937 
938 	atomic_sub(count, &priv->buckets_out);
939 
940 //	for (i = 0; i < priv->max_buckets_out; i++)
941 //		if (priv->RcvCtl[i].skb != NULL)
942 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
943 //				  "is still out\n", i));
944 
945 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
946 		  count));
947 */
948 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
949 /**/		  "remaining, %d received back since sod.\n",
950 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
951 	return 0;
952 }
953 
954 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
955 static int
mpt_lan_receive_post_reply(struct net_device * dev,LANReceivePostReply_t * pRecvRep)956 mpt_lan_receive_post_reply(struct net_device *dev,
957 			   LANReceivePostReply_t *pRecvRep)
958 {
959 	struct mpt_lan_priv *priv = netdev_priv(dev);
960 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
961 	struct sk_buff *skb, *old_skb;
962 	unsigned long flags;
963 	u32 len, ctx, offset;
964 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
965 	int count;
966 	int i, l;
967 
968 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
969 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
970 		 le16_to_cpu(pRecvRep->IOCStatus)));
971 
972 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
973 						MPI_IOCSTATUS_LAN_CANCELED)
974 		return mpt_lan_receive_post_free(dev, pRecvRep);
975 
976 	len = le32_to_cpu(pRecvRep->PacketLength);
977 	if (len == 0) {
978 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
979 			"ReceivePostReply w/ PacketLength zero!\n",
980 				IOC_AND_NETDEV_NAMES_s_s(dev));
981 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
982 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
983 		return -1;
984 	}
985 
986 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
987 	count  = pRecvRep->NumberOfContexts;
988 	skb    = priv->RcvCtl[ctx].skb;
989 
990 	offset = le32_to_cpu(pRecvRep->PacketOffset);
991 //	if (offset != 0) {
992 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
993 //			"w/ PacketOffset %u\n",
994 //				IOC_AND_NETDEV_NAMES_s_s(dev),
995 //				offset);
996 //	}
997 
998 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
999 			IOC_AND_NETDEV_NAMES_s_s(dev),
1000 			offset, len));
1001 
1002 	if (count > 1) {
1003 		int szrem = len;
1004 
1005 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1006 //			"for single packet, concatenating...\n",
1007 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1008 
1009 		skb = (struct sk_buff *)dev_alloc_skb(len);
1010 		if (!skb) {
1011 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1012 					IOC_AND_NETDEV_NAMES_s_s(dev),
1013 					__FILE__, __LINE__);
1014 			return -ENOMEM;
1015 		}
1016 
1017 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1018 		for (i = 0; i < count; i++) {
1019 
1020 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1021 			old_skb = priv->RcvCtl[ctx].skb;
1022 
1023 			l = priv->RcvCtl[ctx].len;
1024 			if (szrem < l)
1025 				l = szrem;
1026 
1027 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1028 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1029 //					i, l));
1030 
1031 			dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1032 						priv->RcvCtl[ctx].dma,
1033 						priv->RcvCtl[ctx].len,
1034 						DMA_FROM_DEVICE);
1035 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1036 
1037 			dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1038 						   priv->RcvCtl[ctx].dma,
1039 						   priv->RcvCtl[ctx].len,
1040 						   DMA_FROM_DEVICE);
1041 
1042 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1043 			szrem -= l;
1044 		}
1045 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1046 
1047 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1048 
1049 		old_skb = skb;
1050 
1051 		skb = (struct sk_buff *)dev_alloc_skb(len);
1052 		if (!skb) {
1053 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1054 					IOC_AND_NETDEV_NAMES_s_s(dev),
1055 					__FILE__, __LINE__);
1056 			return -ENOMEM;
1057 		}
1058 
1059 		dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
1060 					priv->RcvCtl[ctx].dma,
1061 					priv->RcvCtl[ctx].len,
1062 					DMA_FROM_DEVICE);
1063 
1064 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1065 
1066 		dma_sync_single_for_device(&mpt_dev->pcidev->dev,
1067 					   priv->RcvCtl[ctx].dma,
1068 					   priv->RcvCtl[ctx].len,
1069 					   DMA_FROM_DEVICE);
1070 
1071 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1072 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1073 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1074 
1075 	} else {
1076 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1077 
1078 		priv->RcvCtl[ctx].skb = NULL;
1079 
1080 		dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
1081 				 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
1082 		priv->RcvCtl[ctx].dma = 0;
1083 
1084 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1085 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1086 
1087 		skb_put(skb,len);
1088 	}
1089 
1090 	atomic_sub(count, &priv->buckets_out);
1091 	priv->total_received += count;
1092 
1093 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1094 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1095 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1096 				IOC_AND_NETDEV_NAMES_s_s(dev),
1097 				priv->mpt_rxfidx_tail,
1098 				MPT_LAN_MAX_BUCKETS_OUT);
1099 
1100 		return -1;
1101 	}
1102 
1103 	if (remaining == 0)
1104 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1105 			"(priv->buckets_out = %d)\n",
1106 			IOC_AND_NETDEV_NAMES_s_s(dev),
1107 			atomic_read(&priv->buckets_out));
1108 	else if (remaining < 10)
1109 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1110 			"(priv->buckets_out = %d)\n",
1111 			IOC_AND_NETDEV_NAMES_s_s(dev),
1112 			remaining, atomic_read(&priv->buckets_out));
1113 
1114 	if ((remaining < priv->bucketthresh) &&
1115 	    ((atomic_read(&priv->buckets_out) - remaining) >
1116 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1117 
1118 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1119 			"buckets_out count and fw's BucketsRemaining "
1120 			"count has crossed the threshold, issuing a "
1121 			"LanReset to clear the fw's hashtable. You may "
1122 			"want to check your /var/log/messages for \"CRC "
1123 			"error\" event notifications.\n");
1124 
1125 		mpt_lan_reset(dev);
1126 		mpt_lan_wake_post_buckets_task(dev, 0);
1127 	}
1128 
1129 	return mpt_lan_receive_skb(dev, skb);
1130 }
1131 
1132 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1133 /* Simple SGE's only at the moment */
1134 
1135 static void
mpt_lan_post_receive_buckets(struct mpt_lan_priv * priv)1136 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1137 {
1138 	struct net_device *dev = priv->dev;
1139 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1140 	MPT_FRAME_HDR *mf;
1141 	LANReceivePostRequest_t *pRecvReq;
1142 	SGETransaction32_t *pTrans;
1143 	SGESimple64_t *pSimple;
1144 	struct sk_buff *skb;
1145 	dma_addr_t dma;
1146 	u32 curr, buckets, count, max;
1147 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1148 	unsigned long flags;
1149 	int i;
1150 
1151 	curr = atomic_read(&priv->buckets_out);
1152 	buckets = (priv->max_buckets_out - curr);
1153 
1154 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1155 			IOC_AND_NETDEV_NAMES_s_s(dev),
1156 			__func__, buckets, curr));
1157 
1158 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1159 			(sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
1160 
1161 	while (buckets) {
1162 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1163 		if (mf == NULL) {
1164 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1165 				__func__);
1166 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1167 				 __func__, buckets));
1168 			goto out;
1169 		}
1170 		pRecvReq = (LANReceivePostRequest_t *) mf;
1171 
1172 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1173 		mpt_dev->RequestNB[i] = 0;
1174 		count = buckets;
1175 		if (count > max)
1176 			count = max;
1177 
1178 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1179 		pRecvReq->ChainOffset = 0;
1180 		pRecvReq->MsgFlags    = 0;
1181 		pRecvReq->PortNumber  = priv->pnum;
1182 
1183 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1184 		pSimple = NULL;
1185 
1186 		for (i = 0; i < count; i++) {
1187 			int ctx;
1188 
1189 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1190 			if (priv->mpt_rxfidx_tail < 0) {
1191 				printk (KERN_ERR "%s: Can't alloc context\n",
1192 					__func__);
1193 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1194 						       flags);
1195 				break;
1196 			}
1197 
1198 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1199 
1200 			skb = priv->RcvCtl[ctx].skb;
1201 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1202 				dma_unmap_single(&mpt_dev->pcidev->dev,
1203 						 priv->RcvCtl[ctx].dma,
1204 						 priv->RcvCtl[ctx].len,
1205 						 DMA_FROM_DEVICE);
1206 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1207 				skb = priv->RcvCtl[ctx].skb = NULL;
1208 			}
1209 
1210 			if (skb == NULL) {
1211 				skb = dev_alloc_skb(len);
1212 				if (skb == NULL) {
1213 					printk (KERN_WARNING
1214 						MYNAM "/%s: Can't alloc skb\n",
1215 						__func__);
1216 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1217 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1218 					break;
1219 				}
1220 
1221 				dma = dma_map_single(&mpt_dev->pcidev->dev,
1222 						     skb->data, len,
1223 						     DMA_FROM_DEVICE);
1224 
1225 				priv->RcvCtl[ctx].skb = skb;
1226 				priv->RcvCtl[ctx].dma = dma;
1227 				priv->RcvCtl[ctx].len = len;
1228 			}
1229 
1230 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1231 
1232 			pTrans->ContextSize   = sizeof(u32);
1233 			pTrans->DetailsLength = 0;
1234 			pTrans->Flags         = 0;
1235 			pTrans->TransactionContext = cpu_to_le32(ctx);
1236 
1237 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1238 
1239 			pSimple->FlagsLength = cpu_to_le32(
1240 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1241 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1242 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1243 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1244 			if (sizeof(dma_addr_t) > sizeof(u32))
1245 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1246 			else
1247 				pSimple->Address.High = 0;
1248 
1249 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1250 		}
1251 
1252 		if (pSimple == NULL) {
1253 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1254 /**/				__func__);
1255 			mpt_free_msg_frame(mpt_dev, mf);
1256 			goto out;
1257 		}
1258 
1259 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1260 
1261 		pRecvReq->BucketCount = cpu_to_le32(i);
1262 
1263 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1264  *	for (i = 0; i < j + 2; i ++)
1265  *	    printk (" %08x", le32_to_cpu(msg[i]));
1266  *	printk ("\n");
1267  */
1268 
1269 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1270 
1271 		priv->total_posted += i;
1272 		buckets -= i;
1273 		atomic_add(i, &priv->buckets_out);
1274 	}
1275 
1276 out:
1277 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1278 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1279 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1280 	__func__, priv->total_posted, priv->total_received));
1281 
1282 	clear_bit(0, &priv->post_buckets_active);
1283 }
1284 
1285 static void
mpt_lan_post_receive_buckets_work(struct work_struct * work)1286 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1287 {
1288 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1289 						  post_buckets_task.work));
1290 }
1291 
1292 static const struct net_device_ops mpt_netdev_ops = {
1293 	.ndo_open       = mpt_lan_open,
1294 	.ndo_stop       = mpt_lan_close,
1295 	.ndo_start_xmit = mpt_lan_sdu_send,
1296 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1297 };
1298 
1299 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1300 static struct net_device *
mpt_register_lan_device(MPT_ADAPTER * mpt_dev,int pnum)1301 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1302 {
1303 	struct net_device *dev;
1304 	struct mpt_lan_priv *priv;
1305 	u8 HWaddr[FC_ALEN], *a;
1306 
1307 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1308 	if (!dev)
1309 		return NULL;
1310 
1311 	dev->mtu = MPT_LAN_MTU;
1312 
1313 	priv = netdev_priv(dev);
1314 
1315 	priv->dev = dev;
1316 	priv->mpt_dev = mpt_dev;
1317 	priv->pnum = pnum;
1318 
1319 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1320 			  mpt_lan_post_receive_buckets_work);
1321 	priv->post_buckets_active = 0;
1322 
1323 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1324 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1325 
1326 	atomic_set(&priv->buckets_out, 0);
1327 	priv->total_posted = 0;
1328 	priv->total_received = 0;
1329 	priv->max_buckets_out = max_buckets_out;
1330 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1331 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1332 
1333 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1334 			__LINE__,
1335 			mpt_dev->pfacts[0].MaxLanBuckets,
1336 			max_buckets_out,
1337 			priv->max_buckets_out));
1338 
1339 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1340 	spin_lock_init(&priv->txfidx_lock);
1341 	spin_lock_init(&priv->rxfidx_lock);
1342 
1343 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1344 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1345 
1346 	HWaddr[0] = a[5];
1347 	HWaddr[1] = a[4];
1348 	HWaddr[2] = a[3];
1349 	HWaddr[3] = a[2];
1350 	HWaddr[4] = a[1];
1351 	HWaddr[5] = a[0];
1352 
1353 	dev->addr_len = FC_ALEN;
1354 	dev_addr_set(dev, HWaddr);
1355 	memset(dev->broadcast, 0xff, FC_ALEN);
1356 
1357 	/* The Tx queue is 127 deep on the 909.
1358 	 * Give ourselves some breathing room.
1359 	 */
1360 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1361 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1362 
1363 	dev->netdev_ops = &mpt_netdev_ops;
1364 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1365 
1366 	/* MTU range: 96 - 65280 */
1367 	dev->min_mtu = MPT_LAN_MIN_MTU;
1368 	dev->max_mtu = MPT_LAN_MAX_MTU;
1369 
1370 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1371 		"and setting initial values\n"));
1372 
1373 	if (register_netdev(dev) != 0) {
1374 		free_netdev(dev);
1375 		dev = NULL;
1376 	}
1377 	return dev;
1378 }
1379 
1380 static int
mptlan_probe(struct pci_dev * pdev)1381 mptlan_probe(struct pci_dev *pdev)
1382 {
1383 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1384 	struct net_device	*dev;
1385 	int			i;
1386 
1387 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1388 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1389 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1390 		       ioc->name, ioc->pfacts[i].PortNumber,
1391 		       ioc->pfacts[i].ProtocolFlags,
1392 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1393 			       ioc->pfacts[i].ProtocolFlags));
1394 
1395 		if (!(ioc->pfacts[i].ProtocolFlags &
1396 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1397 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1398 			       "seems to be disabled on this adapter port!\n",
1399 			       ioc->name);
1400 			continue;
1401 		}
1402 
1403 		dev = mpt_register_lan_device(ioc, i);
1404 		if (!dev) {
1405 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1406 			       "port%d as a LAN device\n", ioc->name,
1407 			       ioc->pfacts[i].PortNumber);
1408 			continue;
1409 		}
1410 
1411 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1412 		       "registered as '%s'\n", ioc->name, dev->name);
1413 		printk(KERN_INFO MYNAM ": %s/%s: "
1414 		       "LanAddr = %pM\n",
1415 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1416 		       dev->dev_addr);
1417 
1418 		ioc->netdev = dev;
1419 
1420 		return 0;
1421 	}
1422 
1423 	return -ENODEV;
1424 }
1425 
1426 static void
mptlan_remove(struct pci_dev * pdev)1427 mptlan_remove(struct pci_dev *pdev)
1428 {
1429 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1430 	struct net_device	*dev = ioc->netdev;
1431 	struct mpt_lan_priv *priv = netdev_priv(dev);
1432 
1433 	cancel_delayed_work_sync(&priv->post_buckets_task);
1434 	if(dev != NULL) {
1435 		unregister_netdev(dev);
1436 		free_netdev(dev);
1437 	}
1438 }
1439 
1440 static struct mpt_pci_driver mptlan_driver = {
1441 	.probe		= mptlan_probe,
1442 	.remove		= mptlan_remove,
1443 };
1444 
mpt_lan_init(void)1445 static int __init mpt_lan_init (void)
1446 {
1447 	show_mptmod_ver(LANAME, LANVER);
1448 
1449 	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1450 				"lan_reply");
1451 	if (LanCtx <= 0) {
1452 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1453 		return -EBUSY;
1454 	}
1455 
1456 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1457 
1458 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1459 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1460 		       "handler with mptbase! The world is at an end! "
1461 		       "Everything is fading to black! Goodbye.\n");
1462 		return -EBUSY;
1463 	}
1464 
1465 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1466 
1467 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1468 	return 0;
1469 }
1470 
mpt_lan_exit(void)1471 static void __exit mpt_lan_exit(void)
1472 {
1473 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1474 	mpt_reset_deregister(LanCtx);
1475 
1476 	if (LanCtx) {
1477 		mpt_deregister(LanCtx);
1478 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1479 	}
1480 }
1481 
1482 module_init(mpt_lan_init);
1483 module_exit(mpt_lan_exit);
1484 
1485 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1486 static unsigned short
mpt_lan_type_trans(struct sk_buff * skb,struct net_device * dev)1487 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1488 {
1489 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1490 	struct fcllc *fcllc;
1491 
1492 	skb_reset_mac_header(skb);
1493 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1494 
1495 	if (fch->dtype == htons(0xffff)) {
1496 		u32 *p = (u32 *) fch;
1497 
1498 		swab32s(p + 0);
1499 		swab32s(p + 1);
1500 		swab32s(p + 2);
1501 		swab32s(p + 3);
1502 
1503 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1504 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1505 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1506 				fch->saddr);
1507 	}
1508 
1509 	if (*fch->daddr & 1) {
1510 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1511 			skb->pkt_type = PACKET_BROADCAST;
1512 		} else {
1513 			skb->pkt_type = PACKET_MULTICAST;
1514 		}
1515 	} else {
1516 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1517 			skb->pkt_type = PACKET_OTHERHOST;
1518 		} else {
1519 			skb->pkt_type = PACKET_HOST;
1520 		}
1521 	}
1522 
1523 	fcllc = (struct fcllc *)skb->data;
1524 
1525 	/* Strip the SNAP header from ARP packets since we don't
1526 	 * pass them through to the 802.2/SNAP layers.
1527 	 */
1528 	if (fcllc->dsap == EXTENDED_SAP &&
1529 		(fcllc->ethertype == htons(ETH_P_IP) ||
1530 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1531 		skb_pull(skb, sizeof(struct fcllc));
1532 		return fcllc->ethertype;
1533 	}
1534 
1535 	return htons(ETH_P_802_2);
1536 }
1537 
1538 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1539