xref: /titanic_52/usr/src/uts/common/io/ixgbe/ixgbe_rx.c (revision 3c112a2b34403220c06c3e2fcac403358cfba168)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 /* function prototypes */
31 static mblk_t *ixgbe_rx_bind(ixgbe_rx_data_t *, uint32_t, uint32_t);
32 static mblk_t *ixgbe_rx_copy(ixgbe_rx_data_t *, uint32_t, uint32_t);
33 static void ixgbe_rx_assoc_hcksum(mblk_t *, uint32_t);
34 static mblk_t *ixgbe_lro_bind(ixgbe_rx_data_t *, uint32_t, uint32_t, uint32_t);
35 static mblk_t *ixgbe_lro_copy(ixgbe_rx_data_t *, uint32_t, uint32_t, uint32_t);
36 static int ixgbe_lro_get_start(ixgbe_rx_data_t *, uint32_t);
37 static uint32_t ixgbe_lro_get_first(ixgbe_rx_data_t *, uint32_t);
38 
39 #ifndef IXGBE_DEBUG
40 #pragma inline(ixgbe_rx_assoc_hcksum)
41 #pragma inline(ixgbe_lro_get_start)
42 #pragma inline(ixgbe_lro_get_first)
43 #endif
44 
45 /*
46  * ixgbe_rx_recycle - The call-back function to reclaim rx buffer.
47  *
48  * This function is called when an mp is freed by the user thru
49  * freeb call (Only for mp constructed through desballoc call).
50  * It returns back the freed buffer to the free list.
51  */
52 void
53 ixgbe_rx_recycle(caddr_t arg)
54 {
55 	ixgbe_t *ixgbe;
56 	ixgbe_rx_ring_t *rx_ring;
57 	ixgbe_rx_data_t	*rx_data;
58 	rx_control_block_t *recycle_rcb;
59 	uint32_t free_index;
60 	uint32_t ref_cnt;
61 
62 	recycle_rcb = (rx_control_block_t *)(uintptr_t)arg;
63 	rx_data = recycle_rcb->rx_data;
64 	rx_ring = rx_data->rx_ring;
65 	ixgbe = rx_ring->ixgbe;
66 
67 	if (recycle_rcb->ref_cnt == 0) {
68 		/*
69 		 * This case only happens when rx buffers are being freed
70 		 * in ixgbe_stop() and freemsg() is called.
71 		 */
72 		return;
73 	}
74 
75 	ASSERT(recycle_rcb->mp == NULL);
76 
77 	/*
78 	 * Using the recycled data buffer to generate a new mblk
79 	 */
80 	recycle_rcb->mp = desballoc((unsigned char *)
81 	    recycle_rcb->rx_buf.address,
82 	    recycle_rcb->rx_buf.size,
83 	    0, &recycle_rcb->free_rtn);
84 
85 	/*
86 	 * Put the recycled rx control block into free list
87 	 */
88 	mutex_enter(&rx_data->recycle_lock);
89 
90 	free_index = rx_data->rcb_tail;
91 	ASSERT(rx_data->free_list[free_index] == NULL);
92 
93 	rx_data->free_list[free_index] = recycle_rcb;
94 	rx_data->rcb_tail = NEXT_INDEX(free_index, 1, rx_data->free_list_size);
95 
96 	mutex_exit(&rx_data->recycle_lock);
97 
98 	/*
99 	 * The atomic operation on the number of the available rx control
100 	 * blocks in the free list is used to make the recycling mutual
101 	 * exclusive with the receiving.
102 	 */
103 	atomic_inc_32(&rx_data->rcb_free);
104 	ASSERT(rx_data->rcb_free <= rx_data->free_list_size);
105 
106 	/*
107 	 * Considering the case that the interface is unplumbed
108 	 * and there are still some buffers held by the upper layer.
109 	 * When the buffer is returned back, we need to free it.
110 	 */
111 	ref_cnt = atomic_dec_32_nv(&recycle_rcb->ref_cnt);
112 	if (ref_cnt == 0) {
113 		if (recycle_rcb->mp != NULL) {
114 			freemsg(recycle_rcb->mp);
115 			recycle_rcb->mp = NULL;
116 		}
117 
118 		ixgbe_free_dma_buffer(&recycle_rcb->rx_buf);
119 
120 		mutex_enter(&ixgbe->rx_pending_lock);
121 		atomic_dec_32(&rx_data->rcb_pending);
122 		atomic_dec_32(&ixgbe->rcb_pending);
123 
124 		/*
125 		 * When there is not any buffer belonging to this rx_data
126 		 * held by the upper layer, the rx_data can be freed.
127 		 */
128 		if ((rx_data->flag & IXGBE_RX_STOPPED) &&
129 		    (rx_data->rcb_pending == 0))
130 			ixgbe_free_rx_ring_data(rx_data);
131 
132 		mutex_exit(&ixgbe->rx_pending_lock);
133 	}
134 }
135 
136 /*
137  * ixgbe_rx_copy - Use copy to process the received packet.
138  *
139  * This function will use bcopy to process the packet
140  * and send the copied packet upstream.
141  */
142 static mblk_t *
143 ixgbe_rx_copy(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len)
144 {
145 	ixgbe_t *ixgbe;
146 	rx_control_block_t *current_rcb;
147 	mblk_t *mp;
148 
149 	ixgbe = rx_data->rx_ring->ixgbe;
150 	current_rcb = rx_data->work_list[index];
151 
152 	DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
153 
154 	if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
155 	    DDI_FM_OK) {
156 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
157 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
158 		return (NULL);
159 	}
160 
161 	/*
162 	 * Allocate buffer to receive this packet
163 	 */
164 	mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0);
165 	if (mp == NULL) {
166 		ixgbe_log(ixgbe, "ixgbe_rx_copy: allocate buffer failed");
167 		return (NULL);
168 	}
169 
170 	/*
171 	 * Copy the data received into the new cluster
172 	 */
173 	mp->b_rptr += IPHDR_ALIGN_ROOM;
174 	bcopy(current_rcb->rx_buf.address, mp->b_rptr, pkt_len);
175 	mp->b_wptr = mp->b_rptr + pkt_len;
176 
177 	return (mp);
178 }
179 
180 /*
181  * ixgbe_rx_bind - Use existing DMA buffer to build mblk for receiving.
182  *
183  * This function will use pre-bound DMA buffer to receive the packet
184  * and build mblk that will be sent upstream.
185  */
186 static mblk_t *
187 ixgbe_rx_bind(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len)
188 {
189 	rx_control_block_t *current_rcb;
190 	rx_control_block_t *free_rcb;
191 	uint32_t free_index;
192 	mblk_t *mp;
193 	ixgbe_t	*ixgbe = rx_data->rx_ring->ixgbe;
194 
195 	/*
196 	 * If the free list is empty, we cannot proceed to send
197 	 * the current DMA buffer upstream. We'll have to return
198 	 * and use bcopy to process the packet.
199 	 */
200 	if (ixgbe_atomic_reserve(&rx_data->rcb_free, 1) < 0)
201 		return (NULL);
202 
203 	current_rcb = rx_data->work_list[index];
204 	/*
205 	 * If the mp of the rx control block is NULL, try to do
206 	 * desballoc again.
207 	 */
208 	if (current_rcb->mp == NULL) {
209 		current_rcb->mp = desballoc((unsigned char *)
210 		    current_rcb->rx_buf.address,
211 		    current_rcb->rx_buf.size,
212 		    0, &current_rcb->free_rtn);
213 		/*
214 		 * If it is failed to built a mblk using the current
215 		 * DMA buffer, we have to return and use bcopy to
216 		 * process the packet.
217 		 */
218 		if (current_rcb->mp == NULL) {
219 			atomic_inc_32(&rx_data->rcb_free);
220 			return (NULL);
221 		}
222 	}
223 	/*
224 	 * Sync up the data received
225 	 */
226 	DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
227 
228 	if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
229 	    DDI_FM_OK) {
230 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
231 		atomic_inc_32(&rx_data->rcb_free);
232 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
233 		return (NULL);
234 	}
235 
236 	mp = current_rcb->mp;
237 	current_rcb->mp = NULL;
238 	atomic_inc_32(&current_rcb->ref_cnt);
239 
240 	mp->b_wptr = mp->b_rptr + pkt_len;
241 	mp->b_next = mp->b_cont = NULL;
242 
243 	/*
244 	 * Strip off one free rx control block from the free list
245 	 */
246 	free_index = rx_data->rcb_head;
247 	free_rcb = rx_data->free_list[free_index];
248 	ASSERT(free_rcb != NULL);
249 	rx_data->free_list[free_index] = NULL;
250 	rx_data->rcb_head = NEXT_INDEX(free_index, 1, rx_data->free_list_size);
251 
252 	/*
253 	 * Put the rx control block to the work list
254 	 */
255 	rx_data->work_list[index] = free_rcb;
256 
257 	return (mp);
258 }
259 
260 /*
261  * ixgbe_lro_bind - Use existing DMA buffer to build LRO mblk for receiving.
262  *
263  * This function will use pre-bound DMA buffers to receive the packet
264  * and build LRO mblk that will be sent upstream.
265  */
266 static mblk_t *
267 ixgbe_lro_bind(ixgbe_rx_data_t *rx_data, uint32_t lro_start,
268     uint32_t lro_num, uint32_t pkt_len)
269 {
270 	rx_control_block_t *current_rcb;
271 	union ixgbe_adv_rx_desc *current_rbd;
272 	rx_control_block_t *free_rcb;
273 	uint32_t free_index;
274 	int lro_next;
275 	uint32_t last_pkt_len;
276 	uint32_t i;
277 	mblk_t *mp;
278 	mblk_t *mblk_head;
279 	mblk_t **mblk_tail;
280 	ixgbe_t	*ixgbe = rx_data->rx_ring->ixgbe;
281 
282 	/*
283 	 * If the free list is empty, we cannot proceed to send
284 	 * the current DMA buffer upstream. We'll have to return
285 	 * and use bcopy to process the packet.
286 	 */
287 	if (ixgbe_atomic_reserve(&rx_data->rcb_free, lro_num) < 0)
288 		return (NULL);
289 	current_rcb = rx_data->work_list[lro_start];
290 
291 	/*
292 	 * If any one of the rx data blocks can not support
293 	 * lro bind  operation,  We'll have to return and use
294 	 * bcopy to process the lro  packet.
295 	 */
296 	for (i = lro_num; i > 0; i--) {
297 		/*
298 		 * Sync up the data received
299 		 */
300 		DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
301 
302 		if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
303 		    DDI_FM_OK) {
304 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
305 			atomic_add_32(&rx_data->rcb_free, lro_num);
306 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
307 			return (NULL);
308 		}
309 
310 		/*
311 		 * If the mp of the rx control block is NULL, try to do
312 		 * desballoc again.
313 		 */
314 		if (current_rcb->mp == NULL) {
315 			current_rcb->mp = desballoc((unsigned char *)
316 			    current_rcb->rx_buf.address,
317 			    current_rcb->rx_buf.size,
318 			    0, &current_rcb->free_rtn);
319 			/*
320 			 * If it is failed to built a mblk using the current
321 			 * DMA buffer, we have to return and use bcopy to
322 			 * process the packet.
323 			 */
324 			if (current_rcb->mp == NULL) {
325 				atomic_add_32(&rx_data->rcb_free, lro_num);
326 				return (NULL);
327 			}
328 		}
329 		if (current_rcb->lro_next != -1)
330 			lro_next = current_rcb->lro_next;
331 		current_rcb = rx_data->work_list[lro_next];
332 	}
333 
334 	mblk_head = NULL;
335 	mblk_tail = &mblk_head;
336 	lro_next = lro_start;
337 	last_pkt_len = pkt_len - ixgbe->rx_buf_size * (lro_num - 1);
338 	current_rcb = rx_data->work_list[lro_next];
339 	current_rbd = &rx_data->rbd_ring[lro_next];
340 	while (lro_num --) {
341 		mp = current_rcb->mp;
342 		current_rcb->mp = NULL;
343 		atomic_inc_32(&current_rcb->ref_cnt);
344 		if (lro_num != 0)
345 			mp->b_wptr = mp->b_rptr + ixgbe->rx_buf_size;
346 		else
347 			mp->b_wptr = mp->b_rptr + last_pkt_len;
348 		mp->b_next = mp->b_cont = NULL;
349 		*mblk_tail = mp;
350 		mblk_tail = &mp->b_cont;
351 
352 		/*
353 		 * Strip off one free rx control block from the free list
354 		 */
355 		free_index = rx_data->rcb_head;
356 		free_rcb = rx_data->free_list[free_index];
357 		ASSERT(free_rcb != NULL);
358 		rx_data->free_list[free_index] = NULL;
359 		rx_data->rcb_head = NEXT_INDEX(free_index, 1,
360 		    rx_data->free_list_size);
361 
362 		/*
363 		 * Put the rx control block to the work list
364 		 */
365 		rx_data->work_list[lro_next] = free_rcb;
366 		lro_next = current_rcb->lro_next;
367 		current_rcb->lro_next = -1;
368 		current_rcb->lro_prev = -1;
369 		current_rcb->lro_pkt = B_FALSE;
370 		current_rbd->read.pkt_addr = free_rcb->rx_buf.dma_address;
371 		current_rbd->read.hdr_addr = 0;
372 		if (lro_next == -1)
373 			break;
374 		current_rcb = rx_data->work_list[lro_next];
375 		current_rbd = &rx_data->rbd_ring[lro_next];
376 	}
377 	return (mblk_head);
378 }
379 
380 /*
381  * ixgbe_lro_copy - Use copy to process the received LRO packet.
382  *
383  * This function will use bcopy to process the LRO  packet
384  * and send the copied packet upstream.
385  */
386 static mblk_t *
387 ixgbe_lro_copy(ixgbe_rx_data_t *rx_data, uint32_t lro_start,
388     uint32_t lro_num, uint32_t pkt_len)
389 {
390 	ixgbe_t *ixgbe;
391 	rx_control_block_t *current_rcb;
392 	union ixgbe_adv_rx_desc *current_rbd;
393 	mblk_t *mp;
394 	uint32_t last_pkt_len;
395 	int lro_next;
396 	uint32_t i;
397 
398 	ixgbe = rx_data->rx_ring->ixgbe;
399 
400 	/*
401 	 * Allocate buffer to receive this LRO packet
402 	 */
403 	mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0);
404 	if (mp == NULL) {
405 		ixgbe_log(ixgbe, "LRO copy MP alloc failed");
406 		return (NULL);
407 	}
408 
409 	current_rcb = rx_data->work_list[lro_start];
410 
411 	/*
412 	 * Sync up the LRO packet data received
413 	 */
414 	for (i = lro_num; i > 0; i--) {
415 		DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
416 
417 		if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
418 		    DDI_FM_OK) {
419 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
420 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
421 			return (NULL);
422 		}
423 		if (current_rcb->lro_next != -1)
424 			lro_next = current_rcb->lro_next;
425 		current_rcb = rx_data->work_list[lro_next];
426 	}
427 	lro_next = lro_start;
428 	current_rcb = rx_data->work_list[lro_next];
429 	current_rbd = &rx_data->rbd_ring[lro_next];
430 	last_pkt_len = pkt_len - ixgbe->rx_buf_size * (lro_num - 1);
431 
432 	/*
433 	 * Copy the data received into the new cluster
434 	 */
435 	mp->b_rptr += IPHDR_ALIGN_ROOM;
436 	mp->b_wptr += IPHDR_ALIGN_ROOM;
437 	while (lro_num --) {
438 		if (lro_num != 0) {
439 			bcopy(current_rcb->rx_buf.address, mp->b_wptr,
440 			    ixgbe->rx_buf_size);
441 			mp->b_wptr += ixgbe->rx_buf_size;
442 		} else {
443 			bcopy(current_rcb->rx_buf.address, mp->b_wptr,
444 			    last_pkt_len);
445 			mp->b_wptr += last_pkt_len;
446 		}
447 		lro_next = current_rcb->lro_next;
448 		current_rcb->lro_next = -1;
449 		current_rcb->lro_prev = -1;
450 		current_rcb->lro_pkt = B_FALSE;
451 		current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address;
452 		current_rbd->read.hdr_addr = 0;
453 		if (lro_next == -1)
454 			break;
455 		current_rcb = rx_data->work_list[lro_next];
456 		current_rbd = &rx_data->rbd_ring[lro_next];
457 	}
458 
459 	return (mp);
460 }
461 
462 /*
463  * ixgbe_lro_get_start - get the start rcb index in one LRO packet
464  */
465 static int
466 ixgbe_lro_get_start(ixgbe_rx_data_t *rx_data, uint32_t rx_next)
467 {
468 	int lro_prev;
469 	int lro_start;
470 	uint32_t lro_num = 1;
471 	rx_control_block_t *prev_rcb;
472 	rx_control_block_t *current_rcb = rx_data->work_list[rx_next];
473 	lro_prev = current_rcb->lro_prev;
474 
475 	while (lro_prev != -1) {
476 		lro_num ++;
477 		prev_rcb = rx_data->work_list[lro_prev];
478 		lro_start = lro_prev;
479 		lro_prev = prev_rcb->lro_prev;
480 	}
481 	rx_data->lro_num = lro_num;
482 	return (lro_start);
483 }
484 
485 /*
486  * ixgbe_lro_get_first - get the first LRO rcb index
487  */
488 static uint32_t
489 ixgbe_lro_get_first(ixgbe_rx_data_t *rx_data, uint32_t rx_next)
490 {
491 	rx_control_block_t *current_rcb;
492 	uint32_t lro_first;
493 	lro_first = rx_data->lro_first;
494 	current_rcb = rx_data->work_list[lro_first];
495 	while ((!current_rcb->lro_pkt) && (lro_first != rx_next)) {
496 		lro_first =  NEXT_INDEX(lro_first, 1, rx_data->ring_size);
497 		current_rcb = rx_data->work_list[lro_first];
498 	}
499 	rx_data->lro_first = lro_first;
500 	return (lro_first);
501 }
502 
503 /*
504  * ixgbe_rx_assoc_hcksum - Check the rx hardware checksum status and associate
505  * the hcksum flags.
506  */
507 static void
508 ixgbe_rx_assoc_hcksum(mblk_t *mp, uint32_t status_error)
509 {
510 	uint32_t hcksum_flags = 0;
511 
512 	/*
513 	 * Check TCP/UDP checksum
514 	 */
515 	if ((status_error & IXGBE_RXD_STAT_L4CS) &&
516 	    !(status_error & IXGBE_RXDADV_ERR_TCPE))
517 		hcksum_flags |= HCK_FULLCKSUM_OK;
518 
519 	/*
520 	 * Check IP Checksum
521 	 */
522 	if ((status_error & IXGBE_RXD_STAT_IPCS) &&
523 	    !(status_error & IXGBE_RXDADV_ERR_IPE))
524 		hcksum_flags |= HCK_IPV4_HDRCKSUM_OK;
525 
526 	if (hcksum_flags != 0) {
527 		mac_hcksum_set(mp, 0, 0, 0, 0, hcksum_flags);
528 	}
529 }
530 
531 /*
532  * ixgbe_ring_rx - Receive the data of one ring.
533  *
534  * This function goes throught h/w descriptor in one specified rx ring,
535  * receives the data if the descriptor status shows the data is ready.
536  * It returns a chain of mblks containing the received data, to be
537  * passed up to mac_rx().
538  */
539 mblk_t *
540 ixgbe_ring_rx(ixgbe_rx_ring_t *rx_ring, int poll_bytes)
541 {
542 	union ixgbe_adv_rx_desc *current_rbd;
543 	rx_control_block_t *current_rcb;
544 	mblk_t *mp;
545 	mblk_t *mblk_head;
546 	mblk_t **mblk_tail;
547 	uint32_t rx_next;
548 	uint32_t rx_tail;
549 	uint32_t pkt_len;
550 	uint32_t status_error;
551 	uint32_t pkt_num;
552 	uint32_t rsc_cnt;
553 	uint32_t lro_first;
554 	uint32_t lro_start;
555 	uint32_t lro_next;
556 	boolean_t lro_eop;
557 	uint32_t received_bytes;
558 	ixgbe_t *ixgbe = rx_ring->ixgbe;
559 	ixgbe_rx_data_t *rx_data;
560 
561 	if ((ixgbe->ixgbe_state & IXGBE_SUSPENDED) ||
562 	    (ixgbe->ixgbe_state & IXGBE_ERROR) ||
563 	    !(ixgbe->ixgbe_state & IXGBE_STARTED))
564 		return (NULL);
565 
566 	rx_data = rx_ring->rx_data;
567 	lro_eop = B_FALSE;
568 	mblk_head = NULL;
569 	mblk_tail = &mblk_head;
570 
571 	/*
572 	 * Sync the receive descriptors before accepting the packets
573 	 */
574 	DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORKERNEL);
575 
576 	if (ixgbe_check_dma_handle(rx_data->rbd_area.dma_handle) != DDI_FM_OK) {
577 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
578 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
579 		return (NULL);
580 	}
581 
582 	/*
583 	 * Get the start point of rx bd ring which should be examined
584 	 * during this cycle.
585 	 */
586 	rx_next = rx_data->rbd_next;
587 	current_rbd = &rx_data->rbd_ring[rx_next];
588 	received_bytes = 0;
589 	pkt_num = 0;
590 	status_error = current_rbd->wb.upper.status_error;
591 	while (status_error & IXGBE_RXD_STAT_DD) {
592 		/*
593 		 * If adapter has found errors, but the error
594 		 * is hardware checksum error, this does not discard the
595 		 * packet: let upper layer compute the checksum;
596 		 * Otherwise discard the packet.
597 		 */
598 		if ((status_error & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) ||
599 		    ((!ixgbe->lro_enable) &&
600 		    (!(status_error & IXGBE_RXD_STAT_EOP)))) {
601 			IXGBE_DEBUG_STAT(rx_ring->stat_frame_error);
602 			goto rx_discard;
603 		}
604 
605 		IXGBE_DEBUG_STAT_COND(rx_ring->stat_cksum_error,
606 		    (status_error & IXGBE_RXDADV_ERR_TCPE) ||
607 		    (status_error & IXGBE_RXDADV_ERR_IPE));
608 
609 		if (ixgbe->lro_enable) {
610 			rsc_cnt =  (current_rbd->wb.lower.lo_dword.data &
611 			    IXGBE_RXDADV_RSCCNT_MASK) >>
612 			    IXGBE_RXDADV_RSCCNT_SHIFT;
613 			if (rsc_cnt != 0) {
614 				if (status_error & IXGBE_RXD_STAT_EOP) {
615 					pkt_len = current_rbd->wb.upper.length;
616 					if (rx_data->work_list[rx_next]->
617 					    lro_prev != -1) {
618 						lro_start =
619 						    ixgbe_lro_get_start(rx_data,
620 						    rx_next);
621 						ixgbe->lro_pkt_count++;
622 						pkt_len +=
623 						    (rx_data->lro_num  - 1) *
624 						    ixgbe->rx_buf_size;
625 						lro_eop = B_TRUE;
626 					}
627 				} else {
628 					lro_next = (status_error &
629 					    IXGBE_RXDADV_NEXTP_MASK) >>
630 					    IXGBE_RXDADV_NEXTP_SHIFT;
631 					rx_data->work_list[lro_next]->lro_prev
632 					    = rx_next;
633 					rx_data->work_list[rx_next]->lro_next =
634 					    lro_next;
635 					rx_data->work_list[rx_next]->lro_pkt =
636 					    B_TRUE;
637 					goto rx_discard;
638 				}
639 
640 			} else {
641 				pkt_len = current_rbd->wb.upper.length;
642 			}
643 		} else {
644 			pkt_len = current_rbd->wb.upper.length;
645 		}
646 
647 
648 		if ((poll_bytes != IXGBE_POLL_NULL) &&
649 		    ((received_bytes + pkt_len) > poll_bytes))
650 			break;
651 
652 		received_bytes += pkt_len;
653 		mp = NULL;
654 
655 		/*
656 		 * For packets with length more than the copy threshold,
657 		 * we'll first try to use the existing DMA buffer to build
658 		 * an mblk and send the mblk upstream.
659 		 *
660 		 * If the first method fails, or the packet length is less
661 		 * than the copy threshold, we'll allocate a new mblk and
662 		 * copy the packet data to the new mblk.
663 		 */
664 		if (lro_eop) {
665 			mp = ixgbe_lro_bind(rx_data, lro_start,
666 			    rx_data->lro_num, pkt_len);
667 			if (mp == NULL)
668 				mp = ixgbe_lro_copy(rx_data, lro_start,
669 				    rx_data->lro_num, pkt_len);
670 			lro_eop = B_FALSE;
671 			rx_data->lro_num = 0;
672 
673 		} else {
674 			if (pkt_len > ixgbe->rx_copy_thresh)
675 				mp = ixgbe_rx_bind(rx_data, rx_next, pkt_len);
676 
677 			if (mp == NULL)
678 				mp = ixgbe_rx_copy(rx_data, rx_next, pkt_len);
679 		}
680 		if (mp != NULL) {
681 			/*
682 			 * Check h/w checksum offload status
683 			 */
684 			if (ixgbe->rx_hcksum_enable)
685 				ixgbe_rx_assoc_hcksum(mp, status_error);
686 
687 			*mblk_tail = mp;
688 			mblk_tail = &mp->b_next;
689 		}
690 
691 rx_discard:
692 		/*
693 		 * Reset rx descriptor read bits
694 		 */
695 		current_rcb = rx_data->work_list[rx_next];
696 		if (ixgbe->lro_enable) {
697 			if (!current_rcb->lro_pkt) {
698 				current_rbd->read.pkt_addr =
699 				    current_rcb->rx_buf.dma_address;
700 				current_rbd->read.hdr_addr = 0;
701 			}
702 		} else {
703 			current_rbd->read.pkt_addr =
704 			    current_rcb->rx_buf.dma_address;
705 			current_rbd->read.hdr_addr = 0;
706 		}
707 
708 		rx_next = NEXT_INDEX(rx_next, 1, rx_data->ring_size);
709 
710 		/*
711 		 * The receive function is in interrupt context, so here
712 		 * rx_limit_per_intr is used to avoid doing receiving too long
713 		 * per interrupt.
714 		 */
715 		if (++pkt_num > ixgbe->rx_limit_per_intr) {
716 			IXGBE_DEBUG_STAT(rx_ring->stat_exceed_pkt);
717 			break;
718 		}
719 
720 		current_rbd = &rx_data->rbd_ring[rx_next];
721 		status_error = current_rbd->wb.upper.status_error;
722 	}
723 
724 	rx_ring->stat_rbytes += received_bytes;
725 	rx_ring->stat_ipackets += pkt_num;
726 
727 	DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORDEV);
728 
729 	rx_data->rbd_next = rx_next;
730 
731 	/*
732 	 * Update the h/w tail accordingly
733 	 */
734 	if (ixgbe->lro_enable) {
735 		lro_first = ixgbe_lro_get_first(rx_data, rx_next);
736 		rx_tail = PREV_INDEX(lro_first, 1, rx_data->ring_size);
737 	} else
738 		rx_tail = PREV_INDEX(rx_next, 1, rx_data->ring_size);
739 
740 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_RDT(rx_ring->hw_index), rx_tail);
741 
742 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
743 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
744 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
745 	}
746 
747 	return (mblk_head);
748 }
749 
750 mblk_t *
751 ixgbe_ring_rx_poll(void *arg, int n_bytes)
752 {
753 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)arg;
754 	mblk_t *mp = NULL;
755 
756 	ASSERT(n_bytes >= 0);
757 
758 	if (n_bytes == 0)
759 		return (NULL);
760 
761 	mutex_enter(&rx_ring->rx_lock);
762 	mp = ixgbe_ring_rx(rx_ring, n_bytes);
763 	mutex_exit(&rx_ring->rx_lock);
764 
765 	return (mp);
766 }
767