1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2009 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #include "ixgbe_sw.h" 29 30 /* function prototypes */ 31 static mblk_t *ixgbe_rx_bind(ixgbe_rx_data_t *, uint32_t, uint32_t); 32 static mblk_t *ixgbe_rx_copy(ixgbe_rx_data_t *, uint32_t, uint32_t); 33 static void ixgbe_rx_assoc_hcksum(mblk_t *, uint32_t); 34 static mblk_t *ixgbe_lro_bind(ixgbe_rx_data_t *, uint32_t, uint32_t, uint32_t); 35 static mblk_t *ixgbe_lro_copy(ixgbe_rx_data_t *, uint32_t, uint32_t, uint32_t); 36 static int ixgbe_lro_get_start(ixgbe_rx_data_t *, uint32_t); 37 static uint32_t ixgbe_lro_get_first(ixgbe_rx_data_t *, uint32_t); 38 39 #ifndef IXGBE_DEBUG 40 #pragma inline(ixgbe_rx_assoc_hcksum) 41 #pragma inline(ixgbe_lro_get_start) 42 #pragma inline(ixgbe_lro_get_first) 43 #endif 44 45 /* 46 * ixgbe_rx_recycle - The call-back function to reclaim rx buffer. 47 * 48 * This function is called when an mp is freed by the user thru 49 * freeb call (Only for mp constructed through desballoc call). 50 * It returns back the freed buffer to the free list. 51 */ 52 void 53 ixgbe_rx_recycle(caddr_t arg) 54 { 55 ixgbe_t *ixgbe; 56 ixgbe_rx_ring_t *rx_ring; 57 ixgbe_rx_data_t *rx_data; 58 rx_control_block_t *recycle_rcb; 59 uint32_t free_index; 60 uint32_t ref_cnt; 61 62 recycle_rcb = (rx_control_block_t *)(uintptr_t)arg; 63 rx_data = recycle_rcb->rx_data; 64 rx_ring = rx_data->rx_ring; 65 ixgbe = rx_ring->ixgbe; 66 67 if (recycle_rcb->ref_cnt == 0) { 68 /* 69 * This case only happens when rx buffers are being freed 70 * in ixgbe_stop() and freemsg() is called. 71 */ 72 return; 73 } 74 75 ASSERT(recycle_rcb->mp == NULL); 76 77 /* 78 * Using the recycled data buffer to generate a new mblk 79 */ 80 recycle_rcb->mp = desballoc((unsigned char *) 81 recycle_rcb->rx_buf.address, 82 recycle_rcb->rx_buf.size, 83 0, &recycle_rcb->free_rtn); 84 85 /* 86 * Put the recycled rx control block into free list 87 */ 88 mutex_enter(&rx_data->recycle_lock); 89 90 free_index = rx_data->rcb_tail; 91 ASSERT(rx_data->free_list[free_index] == NULL); 92 93 rx_data->free_list[free_index] = recycle_rcb; 94 rx_data->rcb_tail = NEXT_INDEX(free_index, 1, rx_data->free_list_size); 95 96 mutex_exit(&rx_data->recycle_lock); 97 98 /* 99 * The atomic operation on the number of the available rx control 100 * blocks in the free list is used to make the recycling mutual 101 * exclusive with the receiving. 102 */ 103 atomic_inc_32(&rx_data->rcb_free); 104 ASSERT(rx_data->rcb_free <= rx_data->free_list_size); 105 106 /* 107 * Considering the case that the interface is unplumbed 108 * and there are still some buffers held by the upper layer. 109 * When the buffer is returned back, we need to free it. 110 */ 111 ref_cnt = atomic_dec_32_nv(&recycle_rcb->ref_cnt); 112 if (ref_cnt == 0) { 113 if (recycle_rcb->mp != NULL) { 114 freemsg(recycle_rcb->mp); 115 recycle_rcb->mp = NULL; 116 } 117 118 ixgbe_free_dma_buffer(&recycle_rcb->rx_buf); 119 120 mutex_enter(&ixgbe->rx_pending_lock); 121 atomic_dec_32(&rx_data->rcb_pending); 122 atomic_dec_32(&ixgbe->rcb_pending); 123 124 /* 125 * When there is not any buffer belonging to this rx_data 126 * held by the upper layer, the rx_data can be freed. 127 */ 128 if ((rx_data->flag & IXGBE_RX_STOPPED) && 129 (rx_data->rcb_pending == 0)) 130 ixgbe_free_rx_ring_data(rx_data); 131 132 mutex_exit(&ixgbe->rx_pending_lock); 133 } 134 } 135 136 /* 137 * ixgbe_rx_copy - Use copy to process the received packet. 138 * 139 * This function will use bcopy to process the packet 140 * and send the copied packet upstream. 141 */ 142 static mblk_t * 143 ixgbe_rx_copy(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len) 144 { 145 ixgbe_t *ixgbe; 146 rx_control_block_t *current_rcb; 147 mblk_t *mp; 148 149 ixgbe = rx_data->rx_ring->ixgbe; 150 current_rcb = rx_data->work_list[index]; 151 152 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 153 154 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 155 DDI_FM_OK) { 156 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 157 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 158 return (NULL); 159 } 160 161 /* 162 * Allocate buffer to receive this packet 163 */ 164 mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0); 165 if (mp == NULL) { 166 ixgbe_log(ixgbe, "ixgbe_rx_copy: allocate buffer failed"); 167 return (NULL); 168 } 169 170 /* 171 * Copy the data received into the new cluster 172 */ 173 mp->b_rptr += IPHDR_ALIGN_ROOM; 174 bcopy(current_rcb->rx_buf.address, mp->b_rptr, pkt_len); 175 mp->b_wptr = mp->b_rptr + pkt_len; 176 177 return (mp); 178 } 179 180 /* 181 * ixgbe_rx_bind - Use existing DMA buffer to build mblk for receiving. 182 * 183 * This function will use pre-bound DMA buffer to receive the packet 184 * and build mblk that will be sent upstream. 185 */ 186 static mblk_t * 187 ixgbe_rx_bind(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len) 188 { 189 rx_control_block_t *current_rcb; 190 rx_control_block_t *free_rcb; 191 uint32_t free_index; 192 mblk_t *mp; 193 ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe; 194 195 /* 196 * If the free list is empty, we cannot proceed to send 197 * the current DMA buffer upstream. We'll have to return 198 * and use bcopy to process the packet. 199 */ 200 if (ixgbe_atomic_reserve(&rx_data->rcb_free, 1) < 0) 201 return (NULL); 202 203 current_rcb = rx_data->work_list[index]; 204 /* 205 * If the mp of the rx control block is NULL, try to do 206 * desballoc again. 207 */ 208 if (current_rcb->mp == NULL) { 209 current_rcb->mp = desballoc((unsigned char *) 210 current_rcb->rx_buf.address, 211 current_rcb->rx_buf.size, 212 0, ¤t_rcb->free_rtn); 213 /* 214 * If it is failed to built a mblk using the current 215 * DMA buffer, we have to return and use bcopy to 216 * process the packet. 217 */ 218 if (current_rcb->mp == NULL) { 219 atomic_inc_32(&rx_data->rcb_free); 220 return (NULL); 221 } 222 } 223 /* 224 * Sync up the data received 225 */ 226 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 227 228 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 229 DDI_FM_OK) { 230 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 231 atomic_inc_32(&rx_data->rcb_free); 232 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 233 return (NULL); 234 } 235 236 mp = current_rcb->mp; 237 current_rcb->mp = NULL; 238 atomic_inc_32(¤t_rcb->ref_cnt); 239 240 mp->b_wptr = mp->b_rptr + pkt_len; 241 mp->b_next = mp->b_cont = NULL; 242 243 /* 244 * Strip off one free rx control block from the free list 245 */ 246 free_index = rx_data->rcb_head; 247 free_rcb = rx_data->free_list[free_index]; 248 ASSERT(free_rcb != NULL); 249 rx_data->free_list[free_index] = NULL; 250 rx_data->rcb_head = NEXT_INDEX(free_index, 1, rx_data->free_list_size); 251 252 /* 253 * Put the rx control block to the work list 254 */ 255 rx_data->work_list[index] = free_rcb; 256 257 return (mp); 258 } 259 260 /* 261 * ixgbe_lro_bind - Use existing DMA buffer to build LRO mblk for receiving. 262 * 263 * This function will use pre-bound DMA buffers to receive the packet 264 * and build LRO mblk that will be sent upstream. 265 */ 266 static mblk_t * 267 ixgbe_lro_bind(ixgbe_rx_data_t *rx_data, uint32_t lro_start, 268 uint32_t lro_num, uint32_t pkt_len) 269 { 270 rx_control_block_t *current_rcb; 271 union ixgbe_adv_rx_desc *current_rbd; 272 rx_control_block_t *free_rcb; 273 uint32_t free_index; 274 int lro_next; 275 uint32_t last_pkt_len; 276 uint32_t i; 277 mblk_t *mp; 278 mblk_t *mblk_head; 279 mblk_t **mblk_tail; 280 ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe; 281 282 /* 283 * If the free list is empty, we cannot proceed to send 284 * the current DMA buffer upstream. We'll have to return 285 * and use bcopy to process the packet. 286 */ 287 if (ixgbe_atomic_reserve(&rx_data->rcb_free, lro_num) < 0) 288 return (NULL); 289 current_rcb = rx_data->work_list[lro_start]; 290 291 /* 292 * If any one of the rx data blocks can not support 293 * lro bind operation, We'll have to return and use 294 * bcopy to process the lro packet. 295 */ 296 for (i = lro_num; i > 0; i--) { 297 /* 298 * Sync up the data received 299 */ 300 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 301 302 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 303 DDI_FM_OK) { 304 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 305 atomic_add_32(&rx_data->rcb_free, lro_num); 306 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 307 return (NULL); 308 } 309 310 /* 311 * If the mp of the rx control block is NULL, try to do 312 * desballoc again. 313 */ 314 if (current_rcb->mp == NULL) { 315 current_rcb->mp = desballoc((unsigned char *) 316 current_rcb->rx_buf.address, 317 current_rcb->rx_buf.size, 318 0, ¤t_rcb->free_rtn); 319 /* 320 * If it is failed to built a mblk using the current 321 * DMA buffer, we have to return and use bcopy to 322 * process the packet. 323 */ 324 if (current_rcb->mp == NULL) { 325 atomic_add_32(&rx_data->rcb_free, lro_num); 326 return (NULL); 327 } 328 } 329 if (current_rcb->lro_next != -1) 330 lro_next = current_rcb->lro_next; 331 current_rcb = rx_data->work_list[lro_next]; 332 } 333 334 mblk_head = NULL; 335 mblk_tail = &mblk_head; 336 lro_next = lro_start; 337 last_pkt_len = pkt_len - ixgbe->rx_buf_size * (lro_num - 1); 338 current_rcb = rx_data->work_list[lro_next]; 339 current_rbd = &rx_data->rbd_ring[lro_next]; 340 while (lro_num --) { 341 mp = current_rcb->mp; 342 current_rcb->mp = NULL; 343 atomic_inc_32(¤t_rcb->ref_cnt); 344 if (lro_num != 0) 345 mp->b_wptr = mp->b_rptr + ixgbe->rx_buf_size; 346 else 347 mp->b_wptr = mp->b_rptr + last_pkt_len; 348 mp->b_next = mp->b_cont = NULL; 349 *mblk_tail = mp; 350 mblk_tail = &mp->b_cont; 351 352 /* 353 * Strip off one free rx control block from the free list 354 */ 355 free_index = rx_data->rcb_head; 356 free_rcb = rx_data->free_list[free_index]; 357 ASSERT(free_rcb != NULL); 358 rx_data->free_list[free_index] = NULL; 359 rx_data->rcb_head = NEXT_INDEX(free_index, 1, 360 rx_data->free_list_size); 361 362 /* 363 * Put the rx control block to the work list 364 */ 365 rx_data->work_list[lro_next] = free_rcb; 366 lro_next = current_rcb->lro_next; 367 current_rcb->lro_next = -1; 368 current_rcb->lro_prev = -1; 369 current_rcb->lro_pkt = B_FALSE; 370 current_rbd->read.pkt_addr = free_rcb->rx_buf.dma_address; 371 current_rbd->read.hdr_addr = 0; 372 if (lro_next == -1) 373 break; 374 current_rcb = rx_data->work_list[lro_next]; 375 current_rbd = &rx_data->rbd_ring[lro_next]; 376 } 377 return (mblk_head); 378 } 379 380 /* 381 * ixgbe_lro_copy - Use copy to process the received LRO packet. 382 * 383 * This function will use bcopy to process the LRO packet 384 * and send the copied packet upstream. 385 */ 386 static mblk_t * 387 ixgbe_lro_copy(ixgbe_rx_data_t *rx_data, uint32_t lro_start, 388 uint32_t lro_num, uint32_t pkt_len) 389 { 390 ixgbe_t *ixgbe; 391 rx_control_block_t *current_rcb; 392 union ixgbe_adv_rx_desc *current_rbd; 393 mblk_t *mp; 394 uint32_t last_pkt_len; 395 int lro_next; 396 uint32_t i; 397 398 ixgbe = rx_data->rx_ring->ixgbe; 399 400 /* 401 * Allocate buffer to receive this LRO packet 402 */ 403 mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0); 404 if (mp == NULL) { 405 ixgbe_log(ixgbe, "LRO copy MP alloc failed"); 406 return (NULL); 407 } 408 409 current_rcb = rx_data->work_list[lro_start]; 410 411 /* 412 * Sync up the LRO packet data received 413 */ 414 for (i = lro_num; i > 0; i--) { 415 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 416 417 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 418 DDI_FM_OK) { 419 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 420 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 421 return (NULL); 422 } 423 if (current_rcb->lro_next != -1) 424 lro_next = current_rcb->lro_next; 425 current_rcb = rx_data->work_list[lro_next]; 426 } 427 lro_next = lro_start; 428 current_rcb = rx_data->work_list[lro_next]; 429 current_rbd = &rx_data->rbd_ring[lro_next]; 430 last_pkt_len = pkt_len - ixgbe->rx_buf_size * (lro_num - 1); 431 432 /* 433 * Copy the data received into the new cluster 434 */ 435 mp->b_rptr += IPHDR_ALIGN_ROOM; 436 mp->b_wptr += IPHDR_ALIGN_ROOM; 437 while (lro_num --) { 438 if (lro_num != 0) { 439 bcopy(current_rcb->rx_buf.address, mp->b_wptr, 440 ixgbe->rx_buf_size); 441 mp->b_wptr += ixgbe->rx_buf_size; 442 } else { 443 bcopy(current_rcb->rx_buf.address, mp->b_wptr, 444 last_pkt_len); 445 mp->b_wptr += last_pkt_len; 446 } 447 lro_next = current_rcb->lro_next; 448 current_rcb->lro_next = -1; 449 current_rcb->lro_prev = -1; 450 current_rcb->lro_pkt = B_FALSE; 451 current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address; 452 current_rbd->read.hdr_addr = 0; 453 if (lro_next == -1) 454 break; 455 current_rcb = rx_data->work_list[lro_next]; 456 current_rbd = &rx_data->rbd_ring[lro_next]; 457 } 458 459 return (mp); 460 } 461 462 /* 463 * ixgbe_lro_get_start - get the start rcb index in one LRO packet 464 */ 465 static int 466 ixgbe_lro_get_start(ixgbe_rx_data_t *rx_data, uint32_t rx_next) 467 { 468 int lro_prev; 469 int lro_start; 470 uint32_t lro_num = 1; 471 rx_control_block_t *prev_rcb; 472 rx_control_block_t *current_rcb = rx_data->work_list[rx_next]; 473 lro_prev = current_rcb->lro_prev; 474 475 while (lro_prev != -1) { 476 lro_num ++; 477 prev_rcb = rx_data->work_list[lro_prev]; 478 lro_start = lro_prev; 479 lro_prev = prev_rcb->lro_prev; 480 } 481 rx_data->lro_num = lro_num; 482 return (lro_start); 483 } 484 485 /* 486 * ixgbe_lro_get_first - get the first LRO rcb index 487 */ 488 static uint32_t 489 ixgbe_lro_get_first(ixgbe_rx_data_t *rx_data, uint32_t rx_next) 490 { 491 rx_control_block_t *current_rcb; 492 uint32_t lro_first; 493 lro_first = rx_data->lro_first; 494 current_rcb = rx_data->work_list[lro_first]; 495 while ((!current_rcb->lro_pkt) && (lro_first != rx_next)) { 496 lro_first = NEXT_INDEX(lro_first, 1, rx_data->ring_size); 497 current_rcb = rx_data->work_list[lro_first]; 498 } 499 rx_data->lro_first = lro_first; 500 return (lro_first); 501 } 502 503 /* 504 * ixgbe_rx_assoc_hcksum - Check the rx hardware checksum status and associate 505 * the hcksum flags. 506 */ 507 static void 508 ixgbe_rx_assoc_hcksum(mblk_t *mp, uint32_t status_error) 509 { 510 uint32_t hcksum_flags = 0; 511 512 /* 513 * Check TCP/UDP checksum 514 */ 515 if ((status_error & IXGBE_RXD_STAT_L4CS) && 516 !(status_error & IXGBE_RXDADV_ERR_TCPE)) 517 hcksum_flags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK; 518 519 /* 520 * Check IP Checksum 521 */ 522 if ((status_error & IXGBE_RXD_STAT_IPCS) && 523 !(status_error & IXGBE_RXDADV_ERR_IPE)) 524 hcksum_flags |= HCK_IPV4_HDRCKSUM; 525 526 if (hcksum_flags != 0) { 527 (void) hcksum_assoc(mp, 528 NULL, NULL, 0, 0, 0, 0, hcksum_flags, 0); 529 } 530 } 531 532 /* 533 * ixgbe_ring_rx - Receive the data of one ring. 534 * 535 * This function goes throught h/w descriptor in one specified rx ring, 536 * receives the data if the descriptor status shows the data is ready. 537 * It returns a chain of mblks containing the received data, to be 538 * passed up to mac_rx(). 539 */ 540 mblk_t * 541 ixgbe_ring_rx(ixgbe_rx_ring_t *rx_ring, int poll_bytes) 542 { 543 union ixgbe_adv_rx_desc *current_rbd; 544 rx_control_block_t *current_rcb; 545 mblk_t *mp; 546 mblk_t *mblk_head; 547 mblk_t **mblk_tail; 548 uint32_t rx_next; 549 uint32_t rx_tail; 550 uint32_t pkt_len; 551 uint32_t status_error; 552 uint32_t pkt_num; 553 uint32_t rsc_cnt; 554 uint32_t lro_first; 555 uint32_t lro_start; 556 uint32_t lro_next; 557 boolean_t lro_eop; 558 uint32_t received_bytes; 559 ixgbe_t *ixgbe = rx_ring->ixgbe; 560 ixgbe_rx_data_t *rx_data; 561 562 if ((ixgbe->ixgbe_state & IXGBE_SUSPENDED) || 563 (ixgbe->ixgbe_state & IXGBE_ERROR) || 564 !(ixgbe->ixgbe_state & IXGBE_STARTED)) 565 return (NULL); 566 567 rx_data = rx_ring->rx_data; 568 lro_eop = B_FALSE; 569 mblk_head = NULL; 570 mblk_tail = &mblk_head; 571 572 /* 573 * Sync the receive descriptors before accepting the packets 574 */ 575 DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORKERNEL); 576 577 if (ixgbe_check_dma_handle(rx_data->rbd_area.dma_handle) != DDI_FM_OK) { 578 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 579 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 580 return (NULL); 581 } 582 583 /* 584 * Get the start point of rx bd ring which should be examined 585 * during this cycle. 586 */ 587 rx_next = rx_data->rbd_next; 588 current_rbd = &rx_data->rbd_ring[rx_next]; 589 received_bytes = 0; 590 pkt_num = 0; 591 status_error = current_rbd->wb.upper.status_error; 592 while (status_error & IXGBE_RXD_STAT_DD) { 593 /* 594 * If adapter has found errors, but the error 595 * is hardware checksum error, this does not discard the 596 * packet: let upper layer compute the checksum; 597 * Otherwise discard the packet. 598 */ 599 if ((status_error & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) || 600 ((!ixgbe->lro_enable) && 601 (!(status_error & IXGBE_RXD_STAT_EOP)))) { 602 IXGBE_DEBUG_STAT(rx_ring->stat_frame_error); 603 goto rx_discard; 604 } 605 606 IXGBE_DEBUG_STAT_COND(rx_ring->stat_cksum_error, 607 (status_error & IXGBE_RXDADV_ERR_TCPE) || 608 (status_error & IXGBE_RXDADV_ERR_IPE)); 609 610 if (ixgbe->lro_enable) { 611 rsc_cnt = (current_rbd->wb.lower.lo_dword.data & 612 IXGBE_RXDADV_RSCCNT_MASK) >> 613 IXGBE_RXDADV_RSCCNT_SHIFT; 614 if (rsc_cnt != 0) { 615 if (status_error & IXGBE_RXD_STAT_EOP) { 616 pkt_len = current_rbd->wb.upper.length; 617 if (rx_data->work_list[rx_next]-> 618 lro_prev != -1) { 619 lro_start = 620 ixgbe_lro_get_start(rx_data, 621 rx_next); 622 ixgbe->lro_pkt_count++; 623 pkt_len += 624 (rx_data->lro_num - 1) * 625 ixgbe->rx_buf_size; 626 lro_eop = B_TRUE; 627 } 628 } else { 629 lro_next = (status_error & 630 IXGBE_RXDADV_NEXTP_MASK) >> 631 IXGBE_RXDADV_NEXTP_SHIFT; 632 rx_data->work_list[lro_next]->lro_prev 633 = rx_next; 634 rx_data->work_list[rx_next]->lro_next = 635 lro_next; 636 rx_data->work_list[rx_next]->lro_pkt = 637 B_TRUE; 638 goto rx_discard; 639 } 640 641 } else { 642 pkt_len = current_rbd->wb.upper.length; 643 } 644 } else { 645 pkt_len = current_rbd->wb.upper.length; 646 } 647 648 649 if ((poll_bytes != IXGBE_POLL_NULL) && 650 ((received_bytes + pkt_len) > poll_bytes)) 651 break; 652 653 received_bytes += pkt_len; 654 mp = NULL; 655 656 /* 657 * For packets with length more than the copy threshold, 658 * we'll first try to use the existing DMA buffer to build 659 * an mblk and send the mblk upstream. 660 * 661 * If the first method fails, or the packet length is less 662 * than the copy threshold, we'll allocate a new mblk and 663 * copy the packet data to the new mblk. 664 */ 665 if (lro_eop) { 666 mp = ixgbe_lro_bind(rx_data, lro_start, 667 rx_data->lro_num, pkt_len); 668 if (mp == NULL) 669 mp = ixgbe_lro_copy(rx_data, lro_start, 670 rx_data->lro_num, pkt_len); 671 lro_eop = B_FALSE; 672 rx_data->lro_num = 0; 673 674 } else { 675 if (pkt_len > ixgbe->rx_copy_thresh) 676 mp = ixgbe_rx_bind(rx_data, rx_next, pkt_len); 677 678 if (mp == NULL) 679 mp = ixgbe_rx_copy(rx_data, rx_next, pkt_len); 680 } 681 if (mp != NULL) { 682 /* 683 * Check h/w checksum offload status 684 */ 685 if (ixgbe->rx_hcksum_enable) 686 ixgbe_rx_assoc_hcksum(mp, status_error); 687 688 *mblk_tail = mp; 689 mblk_tail = &mp->b_next; 690 } 691 692 rx_discard: 693 /* 694 * Reset rx descriptor read bits 695 */ 696 current_rcb = rx_data->work_list[rx_next]; 697 if (ixgbe->lro_enable) { 698 if (!current_rcb->lro_pkt) { 699 current_rbd->read.pkt_addr = 700 current_rcb->rx_buf.dma_address; 701 current_rbd->read.hdr_addr = 0; 702 } 703 } else { 704 current_rbd->read.pkt_addr = 705 current_rcb->rx_buf.dma_address; 706 current_rbd->read.hdr_addr = 0; 707 } 708 709 rx_next = NEXT_INDEX(rx_next, 1, rx_data->ring_size); 710 711 /* 712 * The receive function is in interrupt context, so here 713 * rx_limit_per_intr is used to avoid doing receiving too long 714 * per interrupt. 715 */ 716 if (++pkt_num > ixgbe->rx_limit_per_intr) { 717 IXGBE_DEBUG_STAT(rx_ring->stat_exceed_pkt); 718 break; 719 } 720 721 current_rbd = &rx_data->rbd_ring[rx_next]; 722 status_error = current_rbd->wb.upper.status_error; 723 } 724 725 DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORDEV); 726 727 rx_data->rbd_next = rx_next; 728 729 /* 730 * Update the h/w tail accordingly 731 */ 732 if (ixgbe->lro_enable) { 733 lro_first = ixgbe_lro_get_first(rx_data, rx_next); 734 rx_tail = PREV_INDEX(lro_first, 1, rx_data->ring_size); 735 } else 736 rx_tail = PREV_INDEX(rx_next, 1, rx_data->ring_size); 737 738 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_RDT(rx_ring->index), rx_tail); 739 740 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 741 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 742 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 743 } 744 745 return (mblk_head); 746 } 747 748 mblk_t * 749 ixgbe_ring_rx_poll(void *arg, int n_bytes) 750 { 751 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)arg; 752 mblk_t *mp = NULL; 753 754 ASSERT(n_bytes >= 0); 755 756 if (n_bytes == 0) 757 return (NULL); 758 759 mutex_enter(&rx_ring->rx_lock); 760 mp = ixgbe_ring_rx(rx_ring, n_bytes); 761 mutex_exit(&rx_ring->rx_lock); 762 763 return (mp); 764 } 765