xref: /titanic_41/usr/src/uts/common/io/e1000g/e1000g_rx.c (revision 112f9fc1e25dcac8b980e034e763f96fb9736261)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * **********************************************************************
30  *									*
31  * Module Name:								*
32  *   e1000g_rx.c							*
33  *									*
34  * Abstract:								*
35  *   This file contains some routines that takes care of Receive	*
36  *   interrupt and also for the received packet				*
37  *   it sends up to upper layer.					*
38  *   It tries to do a zero copy if free buffers are available in	*
39  *   the pool. Also it implements shortcut to Ipq			*
40  *									*
41  *									*
42  *   This driver runs on the following hardware:			*
43  *   - Wisemane based PCI gigabit ethernet adapters			*
44  *									*
45  * Environment:								*
46  *   Kernel Mode -							*
47  *									*
48  * **********************************************************************
49  */
50 
51 #include "e1000g_sw.h"
52 #include "e1000g_debug.h"
53 
54 /*
55  * local prototypes
56  */
57 static RX_SW_PACKET *e1000g_get_buf(e1000g_rx_ring_t *rx_ring);
58 #pragma	inline(e1000g_get_buf)
59 
60 /*
61  * **********************************************************************
62  * Name:      e1000g_rxfree_func					*
63  *									*
64  * Description:								*
65  *									*
66  *	This functionis called when a mp is freed by the user thru	*
67  *	freeb call (Only for mp constructed through desballoc call)	*
68  *	It returns back the freed buffer to the freelist		*
69  *									*
70  *									*
71  * Parameter Passed:							*
72  *									*
73  * Return Value:							*
74  *									*
75  * Functions called:							*
76  *									*
77  * **********************************************************************
78  */
79 void
80 e1000g_rxfree_func(RX_SW_PACKET *packet)
81 {
82 	struct e1000g *Adapter;
83 	e1000g_rx_ring_t *rx_ring;
84 
85 	/*
86 	 * Here the rx recycling processes different rx packets in different
87 	 * threads, so we protect it with RW_READER to ensure it won't block
88 	 * other rx recycling threads.
89 	 */
90 	rw_enter(&e1000g_rx_detach_lock, RW_READER);
91 
92 	if (!(packet->flag & E1000G_RX_SW_SENDUP)) {
93 		rw_exit(&e1000g_rx_detach_lock);
94 		return;
95 	}
96 
97 	if (packet->flag & E1000G_RX_SW_DETACHED) {
98 		rw_exit(&e1000g_rx_detach_lock);
99 
100 		ASSERT(packet->mp == NULL);
101 		e1000g_free_rx_sw_packet(packet);
102 
103 		/*
104 		 * Here the e1000g_mblks_pending may be modified by different
105 		 * rx recycling threads simultaneously, so we need to protect
106 		 * it with RW_WRITER.
107 		 */
108 		rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
109 		e1000g_mblks_pending--;
110 		rw_exit(&e1000g_rx_detach_lock);
111 		return;
112 	}
113 
114 	packet->flag &= ~E1000G_RX_SW_SENDUP;
115 
116 	rx_ring = (e1000g_rx_ring_t *)packet->rx_ring;
117 	Adapter = rx_ring->adapter;
118 
119 	if (packet->mp == NULL) {
120 		/*
121 		 * Allocate a mblk that binds to the data buffer
122 		 */
123 		packet->mp = desballoc((unsigned char *)
124 		    packet->rx_buf->address - E1000G_IPALIGNROOM,
125 		    packet->rx_buf->size + E1000G_IPALIGNROOM,
126 		    BPRI_MED, &packet->free_rtn);
127 
128 		if (packet->mp != NULL) {
129 			packet->mp->b_rptr += E1000G_IPALIGNROOM;
130 			packet->mp->b_wptr += E1000G_IPALIGNROOM;
131 		} else {
132 			Adapter->rx_esballoc_fail++;
133 		}
134 	}
135 
136 	mutex_enter(&rx_ring->freelist_lock);
137 	QUEUE_PUSH_TAIL(&rx_ring->free_list, &packet->Link);
138 	Adapter->rx_avail_freepkt++;
139 	mutex_exit(&rx_ring->freelist_lock);
140 
141 	rw_exit(&e1000g_rx_detach_lock);
142 }
143 
144 /*
145  * **********************************************************************
146  * Name:	SetupReceiveStructures					*
147  *									*
148  * Description: This routine initializes all of the receive related	*
149  *	      structures.  This includes the receive descriptors, the	*
150  *	      actual receive buffers, and the RX_SW_PACKET software	*
151  *	      structures.						*
152  *									*
153  *	      NOTE -- The device must have been reset before this	*
154  *		      routine is called.				*
155  *									*
156  * Author:      Hari Seshadri						*
157  * Functions Called :      get_32bit_value;				*
158  *									*
159  *									*
160  *									*
161  * Arguments:								*
162  *      Adapter - A pointer to our context sensitive "Adapter"		*
163  *	structure.							*
164  *									*
165  * Returns:								*
166  *      (none)								*
167  *									*
168  * Modification log:							*
169  * Date      Who  Description						*
170  * --------  ---  -----------------------------------------------------	*
171  *									*
172  * **********************************************************************
173  */
174 void
175 SetupReceiveStructures(struct e1000g *Adapter)
176 {
177 	PRX_SW_PACKET packet;
178 	struct e1000_rx_desc *descriptor;
179 	uint32_t BufferLow;
180 	uint32_t BufferHigh;
181 	uint32_t reg_val;
182 	int i;
183 	int size;
184 	e1000g_rx_ring_t *rx_ring;
185 
186 	rx_ring = Adapter->rx_ring;
187 
188 	/*
189 	 * zero out all of the receive buffer descriptor memory
190 	 * assures any previous data or status is erased
191 	 */
192 	bzero(rx_ring->rbd_area,
193 	    sizeof (struct e1000_rx_desc) * Adapter->NumRxDescriptors);
194 
195 	if (Adapter->init_count == 0) {
196 		/* Init the list of "Receive Buffer" */
197 		QUEUE_INIT_LIST(&rx_ring->recv_list);
198 
199 		/* Init the list of "Free Receive Buffer" */
200 		QUEUE_INIT_LIST(&rx_ring->free_list);
201 
202 		/*
203 		 * Setup Receive list and the Free list. Note that
204 		 * the both were allocated in one packet area.
205 		 */
206 		packet = rx_ring->packet_area;
207 		descriptor = rx_ring->rbd_first;
208 
209 		for (i = 0; i < Adapter->NumRxDescriptors;
210 		    i++, packet = packet->next, descriptor++) {
211 
212 			ASSERT(packet != NULL);
213 			ASSERT(descriptor != NULL);
214 #ifdef __sparc
215 			descriptor->buffer_addr =
216 			    DWORD_SWAP(packet->rx_buf->dma_address);
217 #else
218 			descriptor->buffer_addr =
219 			    packet->rx_buf->dma_address;
220 #endif
221 			/* Add this RX_SW_PACKET to the receive list */
222 			QUEUE_PUSH_TAIL(&rx_ring->recv_list,
223 			    &packet->Link);
224 		}
225 
226 		for (i = 0; i < Adapter->NumRxFreeList;
227 		    i++, packet = packet->next) {
228 			ASSERT(packet != NULL);
229 			/* Add this RX_SW_PACKET to the free list */
230 			QUEUE_PUSH_TAIL(&rx_ring->free_list,
231 			    &packet->Link);
232 		}
233 		Adapter->rx_avail_freepkt = Adapter->NumRxFreeList;
234 	} else {
235 		/* Setup the initial pointer to the first rx descriptor */
236 		packet = (PRX_SW_PACKET)
237 		    QUEUE_GET_HEAD(&rx_ring->recv_list);
238 		descriptor = rx_ring->rbd_first;
239 
240 		for (i = 0; i < Adapter->NumRxDescriptors; i++) {
241 			ASSERT(packet != NULL);
242 			ASSERT(descriptor != NULL);
243 #ifdef __sparc
244 			descriptor->buffer_addr =
245 			    DWORD_SWAP(packet->rx_buf->dma_address);
246 #else
247 			descriptor->buffer_addr =
248 			    packet->rx_buf->dma_address;
249 #endif
250 			/* Get next RX_SW_PACKET */
251 			packet = (PRX_SW_PACKET)
252 			    QUEUE_GET_NEXT(&rx_ring->recv_list, &packet->Link);
253 			descriptor++;
254 		}
255 	}
256 
257 	/*
258 	 * Setup our descriptor pointers
259 	 */
260 	rx_ring->rbd_next = rx_ring->rbd_first;
261 
262 	size = Adapter->NumRxDescriptors * sizeof (struct e1000_rx_desc);
263 	E1000_WRITE_REG(&Adapter->Shared, RDLEN, size);
264 	size = E1000_READ_REG(&Adapter->Shared, RDLEN);
265 
266 	/* To get lower order bits */
267 	BufferLow = (uint32_t)rx_ring->rbd_dma_addr;
268 	/* To get the higher order bits */
269 	BufferHigh = (uint32_t)(rx_ring->rbd_dma_addr >> 32);
270 
271 	E1000_WRITE_REG(&Adapter->Shared, RDBAH, BufferHigh);
272 	E1000_WRITE_REG(&Adapter->Shared, RDBAL, BufferLow);
273 
274 	/*
275 	 * Setup our HW Rx Head & Tail descriptor pointers
276 	 */
277 	E1000_WRITE_REG(&Adapter->Shared, RDT,
278 	    (uint32_t)(rx_ring->rbd_last - rx_ring->rbd_first));
279 	E1000_WRITE_REG(&Adapter->Shared, RDH, 0);
280 
281 	/*
282 	 * Setup the Receive Control Register (RCTL), and ENABLE the
283 	 * receiver. The initial configuration is to: Enable the receiver,
284 	 * accept broadcasts, discard bad packets (and long packets),
285 	 * disable VLAN filter checking, set the receive descriptor
286 	 * minimum threshold size to 1/2, and the receive buffer size to
287 	 * 2k.
288 	 */
289 	reg_val = E1000_RCTL_EN |	/* Enable Receive Unit */
290 	    E1000_RCTL_BAM |		/* Accept Broadcast Packets */
291 	    E1000_RCTL_LPE |		/* Large Packet Enable bit */
292 	    (Adapter->Shared.mc_filter_type << E1000_RCTL_MO_SHIFT) |
293 	    E1000_RCTL_RDMTS_HALF |
294 #ifdef __sparc
295 	    E1000_RCTL_SECRC |		/* Strip Ethernet CRC */
296 #endif
297 	    E1000_RCTL_LBM_NO;		/* Loopback Mode = none */
298 
299 	switch (Adapter->Shared.max_frame_size) {
300 	case ETHERMAX:
301 		reg_val |= E1000_RCTL_SZ_2048;
302 		break;
303 	case FRAME_SIZE_UPTO_4K:
304 		reg_val |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
305 		break;
306 	case FRAME_SIZE_UPTO_8K:
307 		reg_val |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
308 		break;
309 	case FRAME_SIZE_UPTO_10K:
310 	case FRAME_SIZE_UPTO_16K:
311 		reg_val |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX;
312 		break;
313 	default:
314 		reg_val |= E1000_RCTL_SZ_2048;
315 		break;
316 	}
317 
318 	if (Adapter->Shared.tbi_compatibility_on == 1)
319 		reg_val |= E1000_RCTL_SBP;
320 
321 	E1000_WRITE_REG(&Adapter->Shared, RCTL, reg_val);
322 
323 	reg_val =
324 	    E1000_RXCSUM_TUOFL |	/* TCP/UDP checksum offload Enable */
325 	    E1000_RXCSUM_IPOFL;		/* IP checksum offload Enable */
326 
327 	E1000_WRITE_REG(&Adapter->Shared, RXCSUM, reg_val);
328 
329 	Adapter->Shared.autoneg_failed = 1;
330 
331 	Adapter->rx_bcopy_thresh = DEFAULTRXBCOPYTHRESHOLD;
332 }
333 
334 /*
335  * **********************************************************************
336  * Name:	SetupMulticastTable					*
337  *									*
338  * Description: This routine initializes all of the multicast related	*
339  *	structures.							*
340  *	NOTE -- The device must have been reset before this routine	*
341  *		is called.						*
342  *									*
343  * Author:      Hari Seshadri						*
344  *									*
345  * Arguments:								*
346  *      Adapter - A pointer to our context sensitive "Adapter"		*
347  *	structure.							*
348  *									*
349  * Returns:								*
350  *      (none)								*
351  *									*
352  * Modification log:							*
353  * Date      Who  Description						*
354  * --------  ---  -----------------------------------------------------	*
355  *									*
356  * **********************************************************************
357  */
358 void
359 SetupMulticastTable(struct e1000g *Adapter)
360 {
361 	PUCHAR MulticastBuffer;
362 	UINT32 MulticastAddressCount;
363 	UINT32 TempRctlReg;
364 	USHORT PciCommandWord;
365 	int i;
366 
367 	/*
368 	 * The e1000g has the ability to do perfect filtering of 16
369 	 * addresses. The driver uses one of the e1000g's 16 receive
370 	 * address registers for its node/network/mac/individual address.
371 	 * So, we have room for up to 15 multicast addresses in the CAM,
372 	 * additional MC addresses are handled by the MTA (Multicast Table
373 	 * Array)
374 	 */
375 
376 	TempRctlReg = E1000_READ_REG(&Adapter->Shared, RCTL);
377 
378 	MulticastBuffer = (PUCHAR) (Adapter->mcast_table);
379 
380 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
381 		e1000g_log(Adapter, CE_WARN,
382 		    "Adapter requested more than %d MC Addresses.\n",
383 		    MAX_NUM_MULTICAST_ADDRESSES);
384 		MulticastAddressCount = MAX_NUM_MULTICAST_ADDRESSES;
385 	} else {
386 		/*
387 		 * Set the number of MC addresses that we are being
388 		 * requested to use
389 		 */
390 		MulticastAddressCount = Adapter->mcast_count;
391 	}
392 	/*
393 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
394 	 * hang  while writing to the receive address registers if the receiver
395 	 * is not in reset before writing to the registers. Updating the RAR
396 	 * is done during the setting up of the multicast table, hence the
397 	 * receiver has to be put in reset before updating the multicast table
398 	 * and then taken out of reset at the end
399 	 */
400 	/*
401 	 * if WMI was enabled then dis able it before issueing the global
402 	 * reset to the hardware.
403 	 */
404 	/*
405 	 * Only required for WISEMAN_2_0
406 	 */
407 	if (Adapter->Shared.mac_type == e1000_82542_rev2_0) {
408 		e1000_pci_clear_mwi(&Adapter->Shared);
409 		/*
410 		 * The e1000g must be in reset before changing any RA
411 		 * registers. Reset receive unit.  The chip will remain in
412 		 * the reset state until software explicitly restarts it.
413 		 */
414 		E1000_WRITE_REG(&Adapter->Shared, RCTL, E1000_RCTL_RST);
415 		/* Allow receiver time to go in to reset */
416 		DelayInMilliseconds(5);
417 	}
418 
419 	e1000_mc_addr_list_update(&Adapter->Shared, MulticastBuffer,
420 	    MulticastAddressCount, 0, Adapter->unicst_total);
421 
422 	/*
423 	 * Only for Wiseman_2_0
424 	 * If MWI was enabled then re-enable it after issueing (as we
425 	 * disabled it up there) the receive reset command.
426 	 * Wainwright does not have a receive reset command and only thing
427 	 * close to it is global reset which will require tx setup also
428 	 */
429 	if (Adapter->Shared.mac_type == e1000_82542_rev2_0) {
430 		/*
431 		 * if WMI was enabled then reenable it after issueing the
432 		 * global or receive reset to the hardware.
433 		 */
434 
435 		/*
436 		 * Take receiver out of reset
437 		 * clear E1000_RCTL_RST bit (and all others)
438 		 */
439 		E1000_WRITE_REG(&Adapter->Shared, RCTL, 0);
440 		DelayInMilliseconds(5);
441 		if (Adapter->Shared.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
442 			e1000_pci_set_mwi(&Adapter->Shared);
443 	}
444 
445 	/*
446 	 * Restore original value
447 	 */
448 	E1000_WRITE_REG(&Adapter->Shared, RCTL, TempRctlReg);
449 }
450 
451 /*
452  * **********************************************************************
453  * Name:	e1000g_get_buf						*
454  *									*
455  * Description: This routine gets newpkt.				*
456  *									*
457  * Author:      Hari Seshadri						*
458  *									*
459  * Arguments:								*
460  *									*
461  * Returns:								*
462  *      RX_SW_PACKET*							*
463  *									*
464  * Modification log:							*
465  * Date      Who  Description						*
466  * --------  ---  -----------------------------------------------------	*
467  *									*
468  * **********************************************************************
469  */
470 static RX_SW_PACKET *
471 e1000g_get_buf(e1000g_rx_ring_t *rx_ring)
472 {
473 	struct e1000g *Adapter;
474 	RX_SW_PACKET *packet;
475 
476 	Adapter = rx_ring->adapter;
477 
478 	mutex_enter(&rx_ring->freelist_lock);
479 	packet = (PRX_SW_PACKET)
480 	    QUEUE_POP_HEAD(&rx_ring->free_list);
481 	if (packet != NULL)
482 		Adapter->rx_avail_freepkt--;
483 	mutex_exit(&rx_ring->freelist_lock);
484 
485 	return (packet);
486 }
487 
488 /*
489  * **********************************************************************
490  * Name:	e1000g_receive						*
491  *									*
492  * Description: This routine will process packets spanning multiple	*
493  * 		buffers							*
494  *	- Called from the e1000g_intr Handles interrupt for RX side	*
495  *	- Checks the interrupt cause and process it. At the time of	*
496  *	  calling the interrupt cause register has been already		*
497  *	  cleared.							*
498  *									*
499  * Author:      Vinay K Awasthi						*
500  *									*
501  * Date  :      Feb 9, 2000						*
502  *									*
503  * Arguments:								*
504  *      Adapter - A pointer to our context sensitive "Adapter"		*
505  *      structure.							*
506  *									*
507  * Returns:								*
508  *      Pointer to list of mblks to pass up to GLD			*
509  * Functions Called:							*
510  *      (none)								*
511  *									*
512  * Modification log:							*
513  * Date      Who  Description						*
514  * --------  ---  -----------------------------------------------------	*
515  *									*
516  * **********************************************************************
517  */
518 mblk_t *
519 e1000g_receive(struct e1000g *Adapter)
520 {
521 	/*
522 	 * Need :
523 	 * This function addresses the need to process jumbo frames using
524 	 * standard 2048 byte buffers. In solaris, getting large aligned
525 	 * buffers in low memory systems is hard and often it comprises
526 	 * of multiple cookies rather than just one cookie which our HW
527 	 * wants. In low memory systems, it is hard to get lots of large
528 	 * chunks of memory i.e. you can get 256 2k buffers but it is hard
529 	 * to get 64 8k buffers. Pagesize is playing an important role here.
530 	 * If system administrator is willing to tune stream and system dma
531 	 * resources then we may not need this function. At the same time
532 	 * we may not have this option.
533 	 * This function will also make our driver do Jumbo frames on Wiseman
534 	 * hardware.
535 	 */
536 
537 	mblk_t *nmp;
538 	mblk_t *ret_mp;
539 	mblk_t *ret_nmp;
540 	struct e1000_rx_desc *current_desc;
541 	struct e1000_rx_desc *last_desc;
542 	PRX_SW_PACKET packet;
543 	PRX_SW_PACKET newpkt;
544 	USHORT length;
545 	uint32_t pkt_count;
546 	uint32_t desc_count;
547 	unsigned char LastByte;
548 	boolean_t AcceptFrame;
549 	boolean_t end_of_packet;
550 	boolean_t need_copy;
551 	e1000g_rx_ring_t *rx_ring;
552 	dma_buffer_t *rx_buf;
553 	uint16_t cksumflags;
554 	uint32_t sync_offset;
555 	uint32_t sync_len;
556 
557 	ret_mp = NULL;
558 	ret_nmp = NULL;
559 	pkt_count = 0;
560 	desc_count = 0;
561 	cksumflags = 0;
562 
563 	rx_ring = Adapter->rx_ring;
564 
565 	sync_offset = rx_ring->rbd_next - rx_ring->rbd_first;
566 
567 	/* Sync the Rx descriptor DMA buffers */
568 	(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
569 	    0, 0, DDI_DMA_SYNC_FORCPU);
570 
571 	current_desc = rx_ring->rbd_next;
572 	if (!(current_desc->status & E1000_RXD_STAT_DD)) {
573 		/*
574 		 * don't send anything up. just clear the RFD
575 		 */
576 		Adapter->rx_none++;
577 		return (ret_mp);
578 	}
579 
580 	/*
581 	 * Loop through the receive descriptors starting at the last known
582 	 * descriptor owned by the hardware that begins a packet.
583 	 */
584 	while ((current_desc->status & E1000_RXD_STAT_DD) &&
585 	    (pkt_count < Adapter->MaxNumReceivePackets)) {
586 
587 		desc_count++;
588 		/*
589 		 * Now this can happen in Jumbo frame situation.
590 		 */
591 		if (current_desc->status & E1000_RXD_STAT_EOP) {
592 			/* packet has EOP set */
593 			end_of_packet = B_TRUE;
594 		} else {
595 			/*
596 			 * If this received buffer does not have the
597 			 * End-Of-Packet bit set, the received packet
598 			 * will consume multiple buffers. We won't send this
599 			 * packet upstack till we get all the related buffers.
600 			 */
601 			end_of_packet = B_FALSE;
602 		}
603 
604 		/*
605 		 * Get a pointer to the actual receive buffer
606 		 * The mp->b_rptr is mapped to The CurrentDescriptor
607 		 * Buffer Address.
608 		 */
609 		packet =
610 		    (PRX_SW_PACKET)QUEUE_GET_HEAD(&rx_ring->recv_list);
611 		ASSERT(packet != NULL);
612 
613 		rx_buf = packet->rx_buf;
614 
615 		length = current_desc->length;
616 
617 		switch (packet->dma_type) {
618 #ifdef __sparc
619 		case USE_DVMA:
620 			dvma_sync(rx_buf->dma_handle, 0,
621 			    DDI_DMA_SYNC_FORKERNEL);
622 			break;
623 #endif
624 		case USE_DMA:
625 			(void) ddi_dma_sync(rx_buf->dma_handle,
626 			    E1000G_IPALIGNROOM, length,
627 			    DDI_DMA_SYNC_FORCPU);
628 			break;
629 		default:
630 			ASSERT(B_FALSE);
631 			break;
632 		}
633 
634 		LastByte =
635 		    *((unsigned char *)rx_buf->address + length - 1);
636 
637 		if (TBI_ACCEPT(&Adapter->Shared,
638 			current_desc->status,
639 			current_desc->errors,
640 			current_desc->length, LastByte)) {
641 
642 			AcceptFrame = B_TRUE;
643 			mutex_enter(&Adapter->TbiCntrMutex);
644 			AdjustTbiAcceptedStats(Adapter, length,
645 			    Adapter->Shared.mac_addr);
646 			mutex_exit(&Adapter->TbiCntrMutex);
647 			length--;
648 		} else {
649 			AcceptFrame = B_FALSE;
650 		}
651 		/*
652 		 * Indicate the packet to the NOS if it was good.
653 		 * Normally, hardware will discard bad packets for us.
654 		 * Check for the packet to be a valid Ethernet packet
655 		 */
656 
657 		/*
658 		 * There can be few packets which are less than 2k but
659 		 * more than 1514 bytes length. They are really jumbo
660 		 * packets, but for our driver's buffer they can still
661 		 * fit in one buffer as minimum buffer size if 2K. In our
662 		 * above condition, we are taking all EOP packets as
663 		 * JumboPacket=False... JumboPacket=FALSE just tells us
664 		 * that now we can process this packet...as we have
665 		 * received complete packet.
666 		 */
667 
668 		if (!((current_desc->errors == 0) ||
669 		    (current_desc->errors &
670 		    (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) ||
671 		    ((Adapter->Shared.tbi_compatibility_on == 1) &&
672 		    (current_desc->errors == E1000_RXD_ERR_CE)) ||
673 		    AcceptFrame)) {
674 			/*
675 			 * error in incoming packet, either the packet is not a
676 			 * ethernet size packet, or the packet has an error. In
677 			 * either case, the packet will simply be discarded.
678 			 */
679 			e1000g_DEBUGLOG_0(Adapter, e1000g_INFO_LEVEL,
680 			    "Process Receive Interrupts: Error in Packet\n");
681 
682 			Adapter->rx_error++;
683 			/*
684 			 * Returning here as we are done here. There is
685 			 * no point in waiting for while loop to elapse
686 			 * and the things which were done. More efficient
687 			 * and less error prone...
688 			 */
689 			goto rx_drop;
690 		}
691 
692 		need_copy = B_TRUE;
693 
694 		if (length <= Adapter->rx_bcopy_thresh)
695 			goto rx_copy;
696 
697 		/*
698 		 * Get the pre-constructed mblk that was associated
699 		 * to the receive data buffer.
700 		 */
701 		if (packet->mp == NULL) {
702 			packet->mp = desballoc((unsigned char *)
703 			    rx_buf->address - E1000G_IPALIGNROOM,
704 			    length + E1000G_IPALIGNROOM,
705 			    BPRI_MED, &packet->free_rtn);
706 
707 			if (packet->mp != NULL) {
708 				packet->mp->b_rptr += E1000G_IPALIGNROOM;
709 				packet->mp->b_wptr += E1000G_IPALIGNROOM;
710 			} else {
711 				Adapter->rx_esballoc_fail++;
712 			}
713 		}
714 
715 		if (packet->mp != NULL) {
716 			/*
717 			 * We have two sets of buffer pool. One associated with
718 			 * the Rxdescriptors and other a freelist buffer pool.
719 			 * Each time we get a good packet, Try to get a buffer
720 			 * from the freelist pool using e1000g_get_buf. If we
721 			 * get free buffer, then replace the descriptor buffer
722 			 * address with the free buffer we just got, and pass
723 			 * the pre-constructed mblk upstack. (note no copying)
724 			 *
725 			 * If we failed to get a free buffer, then try to
726 			 * allocate a new buffer(mp) and copy the recv buffer
727 			 * content to our newly allocated buffer(mp). Don't
728 			 * disturb the desriptor buffer address. (note copying)
729 			 */
730 			newpkt = e1000g_get_buf(rx_ring);
731 
732 			if (newpkt != NULL) {
733 				/*
734 				 * Get the mblk associated to the data,
735 				 * and strip it off the sw packet.
736 				 */
737 				nmp = packet->mp;
738 				packet->mp = NULL;
739 				packet->flag |= E1000G_RX_SW_SENDUP;
740 
741 				/*
742 				 * Now replace old buffer with the new
743 				 * one we got from free list
744 				 * Both the RxSwPacket as well as the
745 				 * Receive Buffer Descriptor will now
746 				 * point to this new packet.
747 				 */
748 				packet = newpkt;
749 #ifdef __sparc
750 				current_desc->buffer_addr =
751 				    DWORD_SWAP(newpkt->rx_buf->dma_address);
752 #else
753 				current_desc->buffer_addr =
754 				    newpkt->rx_buf->dma_address;
755 #endif
756 				need_copy = B_FALSE;
757 			} else {
758 				Adapter->rx_no_freepkt++;
759 			}
760 		}
761 
762 rx_copy:
763 		if (need_copy) {
764 			/*
765 			 * No buffers available on free list,
766 			 * bcopy the data from the buffer and
767 			 * keep the original buffer. Dont want to
768 			 * do this.. Yack but no other way
769 			 */
770 			if ((nmp =
771 				allocb(length + E1000G_IPALIGNROOM,
772 				    BPRI_MED)) == NULL) {
773 				/*
774 				 * The system has no buffers available
775 				 * to send up the incoming packet, hence
776 				 * the packet will have to be processed
777 				 * when there're more buffers available.
778 				 */
779 				Adapter->rx_allocb_fail++;
780 				goto rx_drop;
781 			}
782 			nmp->b_rptr += E1000G_IPALIGNROOM;
783 			nmp->b_wptr += E1000G_IPALIGNROOM;
784 			/*
785 			 * The free list did not have any buffers
786 			 * available, so, the received packet will
787 			 * have to be copied into a mp and the original
788 			 * buffer will have to be retained for future
789 			 * packet reception.
790 			 */
791 			bcopy(rx_buf->address,
792 			    nmp->b_wptr, length);
793 		}
794 
795 		/*
796 		 * The RX_SW_PACKET MUST be popped off the
797 		 * RxSwPacketList before either a putnext or freemsg
798 		 * is done on the mp that has now been created by the
799 		 * desballoc. If not, it is possible that the free
800 		 * routine will get called from the interrupt context
801 		 * and try to put this packet on the free list
802 		 */
803 		(PRX_SW_PACKET)QUEUE_POP_HEAD(&rx_ring->recv_list);
804 
805 		ASSERT(nmp != NULL);
806 		nmp->b_wptr += length;
807 
808 		if ((Adapter->rx_mblk == NULL) &&
809 		    (GET_ETHER_TYPE((struct ether_header *)nmp->b_rptr) ==
810 		    ETHERTYPE_IP)) {
811 			/*
812 			 *  TCP/UDP checksum offload and
813 			 *  IP checksum offload
814 			 */
815 			if (!(current_desc->status &
816 				E1000_RXD_STAT_IXSM)) {
817 				/*
818 				 * Check TCP/UDP checksum
819 				 */
820 				if ((current_desc->status &
821 					E1000_RXD_STAT_TCPCS) &&
822 				    !(current_desc->errors &
823 					E1000_RXD_ERR_TCPE))
824 					cksumflags |= HCK_FULLCKSUM |
825 						HCK_FULLCKSUM_OK;
826 				/*
827 				 * Check IP Checksum
828 				 */
829 				if ((current_desc->status &
830 					E1000_RXD_STAT_IPCS) &&
831 				    !(current_desc->errors &
832 					E1000_RXD_ERR_IPE))
833 					cksumflags |= HCK_IPV4_HDRCKSUM;
834 			}
835 		}
836 
837 		/*
838 		 * We need to maintain our packet chain in the global
839 		 * Adapter structure, for the Rx processing can end
840 		 * with a fragment that has no EOP set.
841 		 */
842 		if (Adapter->rx_mblk == NULL) {
843 			/* Get the head of the message chain */
844 			Adapter->rx_mblk = nmp;
845 			Adapter->rx_mblk_tail = nmp;
846 			Adapter->rx_packet_len = length;
847 		} else {	/* Not the first packet */
848 			/* Continue adding buffers */
849 			Adapter->rx_mblk_tail->b_cont = nmp;
850 			Adapter->rx_mblk_tail = nmp;
851 			Adapter->rx_packet_len += length;
852 		}
853 		ASSERT(Adapter->rx_mblk != NULL);
854 		ASSERT(Adapter->rx_mblk_tail != NULL);
855 		ASSERT(Adapter->rx_mblk_tail->b_cont == NULL);
856 
857 		/*
858 		 * Now this MP is ready to travel upwards but some more
859 		 * fragments are coming.
860 		 * We will send packet upwards as soon as we get EOP
861 		 * set on the packet.
862 		 */
863 		if (!end_of_packet) {
864 			/*
865 			 * continue to get the next descriptor,
866 			 * Tail would be advanced at the end
867 			 */
868 			goto rx_next_desc;
869 		}
870 
871 		/*
872 		 * Found packet with EOP
873 		 * Process the last fragment.
874 		 */
875 		if (cksumflags != 0) {
876 			(void) hcksum_assoc(Adapter->rx_mblk,
877 			    NULL, NULL, 0, 0, 0, 0, cksumflags, 0);
878 			cksumflags = 0;
879 		}
880 
881 		/*
882 		 * Jumbo Frame Counters
883 		 */
884 		if (Adapter->ProfileJumboTraffic) {
885 			if ((Adapter->rx_packet_len > ETHERMAX) &&
886 			    (Adapter->rx_packet_len <= FRAME_SIZE_UPTO_4K))
887 				Adapter->JumboRx_4K++;
888 
889 			if ((Adapter->rx_packet_len > FRAME_SIZE_UPTO_4K) &&
890 			    (Adapter->rx_packet_len <= FRAME_SIZE_UPTO_8K))
891 				Adapter->JumboRx_8K++;
892 
893 			if ((Adapter->rx_packet_len > FRAME_SIZE_UPTO_8K) &&
894 			    (Adapter->rx_packet_len <= FRAME_SIZE_UPTO_16K))
895 				Adapter->JumboRx_16K++;
896 		}
897 		/*
898 		 * Count packets that span multi-descriptors
899 		 */
900 		if (Adapter->rx_mblk->b_cont != NULL)
901 			Adapter->rx_multi_desc++;
902 
903 		/*
904 		 * Append to list to send upstream
905 		 */
906 		if (ret_mp == NULL) {
907 			ret_mp = ret_nmp = Adapter->rx_mblk;
908 		} else {
909 			ret_nmp->b_next = Adapter->rx_mblk;
910 			ret_nmp = Adapter->rx_mblk;
911 		}
912 		ret_nmp->b_next = NULL;
913 
914 		Adapter->rx_mblk = NULL;
915 		Adapter->rx_mblk_tail = NULL;
916 		Adapter->rx_packet_len = 0;
917 
918 		pkt_count++;
919 
920 rx_next_desc:
921 		/*
922 		 * Zero out the receive descriptors status
923 		 */
924 		current_desc->status = 0;
925 
926 		if (current_desc == rx_ring->rbd_last)
927 			rx_ring->rbd_next = rx_ring->rbd_first;
928 		else
929 			rx_ring->rbd_next++;
930 
931 		last_desc = current_desc;
932 		current_desc = rx_ring->rbd_next;
933 
934 		/*
935 		 * Put the buffer that we just indicated back
936 		 * at the end of our list
937 		 */
938 		QUEUE_PUSH_TAIL(&rx_ring->recv_list,
939 		    &packet->Link);
940 	}	/* while loop */
941 
942 	if (pkt_count >= Adapter->MaxNumReceivePackets)
943 		Adapter->rx_exceed_pkt++;
944 
945 	/* Sync the Rx descriptor DMA buffers */
946 	sync_len = desc_count;
947 	/* Check the wrap-around case */
948 	if ((sync_offset + sync_len) <= Adapter->NumRxDescriptors) {
949 		(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
950 		    sync_offset * sizeof (struct e1000_rx_desc),
951 		    sync_len * sizeof (struct e1000_rx_desc),
952 		    DDI_DMA_SYNC_FORDEV);
953 	} else {
954 		(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
955 		    sync_offset * sizeof (struct e1000_rx_desc),
956 		    0,
957 		    DDI_DMA_SYNC_FORDEV);
958 		sync_len = sync_offset + sync_len - Adapter->NumRxDescriptors;
959 		(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
960 		    0,
961 		    sync_len * sizeof (struct e1000_rx_desc),
962 		    DDI_DMA_SYNC_FORDEV);
963 	}
964 
965 	/*
966 	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
967 	 */
968 	E1000_WRITE_REG(&Adapter->Shared, RDT,
969 	    (uint32_t)(last_desc - rx_ring->rbd_first));
970 
971 	return (ret_mp);
972 
973 rx_drop:
974 	/*
975 	 * Zero out the receive descriptors status
976 	 */
977 	current_desc->status = 0;
978 
979 	/* Sync the Rx descriptor DMA buffers */
980 	sync_len = desc_count;
981 	/* Check the wrap-around case */
982 	if ((sync_offset + sync_len) <= Adapter->NumRxDescriptors) {
983 		(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
984 		    sync_offset * sizeof (struct e1000_rx_desc),
985 		    sync_len * sizeof (struct e1000_rx_desc),
986 		    DDI_DMA_SYNC_FORDEV);
987 	} else {
988 		(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
989 		    sync_offset * sizeof (struct e1000_rx_desc),
990 		    0,
991 		    DDI_DMA_SYNC_FORDEV);
992 		sync_len = sync_offset + sync_len - Adapter->NumRxDescriptors;
993 		(void) ddi_dma_sync(rx_ring->rbd_dma_handle,
994 		    0,
995 		    sync_len * sizeof (struct e1000_rx_desc),
996 		    DDI_DMA_SYNC_FORDEV);
997 	}
998 
999 	if (current_desc == rx_ring->rbd_last)
1000 		rx_ring->rbd_next = rx_ring->rbd_first;
1001 	else
1002 		rx_ring->rbd_next++;
1003 
1004 	last_desc = current_desc;
1005 
1006 	(PRX_SW_PACKET)QUEUE_POP_HEAD(&rx_ring->recv_list);
1007 
1008 	QUEUE_PUSH_TAIL(&rx_ring->recv_list, &packet->Link);
1009 	/*
1010 	 * Reclaim all old buffers already allocated during
1011 	 * Jumbo receives.....for incomplete reception
1012 	 */
1013 	if (Adapter->rx_mblk != NULL) {
1014 		freemsg(Adapter->rx_mblk);
1015 		Adapter->rx_mblk = NULL;
1016 		Adapter->rx_mblk_tail = NULL;
1017 		Adapter->rx_packet_len = 0;
1018 	}
1019 	/*
1020 	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
1021 	 */
1022 	E1000_WRITE_REG(&Adapter->Shared, RDT,
1023 	    (uint32_t)(last_desc - rx_ring->rbd_first));
1024 
1025 	return (ret_mp);
1026 }
1027