1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 */ 29 30 #include "ixgbe_sw.h" 31 32 /* function prototypes */ 33 static mblk_t *ixgbe_rx_bind(ixgbe_rx_data_t *, uint32_t, uint32_t); 34 static mblk_t *ixgbe_rx_copy(ixgbe_rx_data_t *, uint32_t, uint32_t); 35 static void ixgbe_rx_assoc_hcksum(mblk_t *, uint32_t); 36 static mblk_t *ixgbe_lro_bind(ixgbe_rx_data_t *, uint32_t, uint32_t, uint32_t); 37 static mblk_t *ixgbe_lro_copy(ixgbe_rx_data_t *, uint32_t, uint32_t, uint32_t); 38 static int ixgbe_lro_get_start(ixgbe_rx_data_t *, uint32_t); 39 static uint32_t ixgbe_lro_get_first(ixgbe_rx_data_t *, uint32_t); 40 41 #ifndef IXGBE_DEBUG 42 #pragma inline(ixgbe_rx_assoc_hcksum) 43 #pragma inline(ixgbe_lro_get_start) 44 #pragma inline(ixgbe_lro_get_first) 45 #endif 46 47 /* 48 * ixgbe_rx_recycle - The call-back function to reclaim rx buffer. 49 * 50 * This function is called when an mp is freed by the user thru 51 * freeb call (Only for mp constructed through desballoc call). 52 * It returns back the freed buffer to the free list. 53 */ 54 void 55 ixgbe_rx_recycle(caddr_t arg) 56 { 57 ixgbe_t *ixgbe; 58 ixgbe_rx_ring_t *rx_ring; 59 ixgbe_rx_data_t *rx_data; 60 rx_control_block_t *recycle_rcb; 61 uint32_t free_index; 62 uint32_t ref_cnt; 63 64 recycle_rcb = (rx_control_block_t *)(uintptr_t)arg; 65 rx_data = recycle_rcb->rx_data; 66 rx_ring = rx_data->rx_ring; 67 ixgbe = rx_ring->ixgbe; 68 69 if (recycle_rcb->ref_cnt == 0) { 70 /* 71 * This case only happens when rx buffers are being freed 72 * in ixgbe_stop() and freemsg() is called. 73 */ 74 return; 75 } 76 77 ASSERT(recycle_rcb->mp == NULL); 78 79 /* 80 * Using the recycled data buffer to generate a new mblk 81 */ 82 recycle_rcb->mp = desballoc((unsigned char *) 83 recycle_rcb->rx_buf.address, 84 recycle_rcb->rx_buf.size, 85 0, &recycle_rcb->free_rtn); 86 87 /* 88 * Put the recycled rx control block into free list 89 */ 90 mutex_enter(&rx_data->recycle_lock); 91 92 free_index = rx_data->rcb_tail; 93 ASSERT(rx_data->free_list[free_index] == NULL); 94 95 rx_data->free_list[free_index] = recycle_rcb; 96 rx_data->rcb_tail = NEXT_INDEX(free_index, 1, rx_data->free_list_size); 97 98 mutex_exit(&rx_data->recycle_lock); 99 100 /* 101 * The atomic operation on the number of the available rx control 102 * blocks in the free list is used to make the recycling mutual 103 * exclusive with the receiving. 104 */ 105 atomic_inc_32(&rx_data->rcb_free); 106 ASSERT(rx_data->rcb_free <= rx_data->free_list_size); 107 108 /* 109 * Considering the case that the interface is unplumbed 110 * and there are still some buffers held by the upper layer. 111 * When the buffer is returned back, we need to free it. 112 */ 113 ref_cnt = atomic_dec_32_nv(&recycle_rcb->ref_cnt); 114 if (ref_cnt == 0) { 115 if (recycle_rcb->mp != NULL) { 116 freemsg(recycle_rcb->mp); 117 recycle_rcb->mp = NULL; 118 } 119 120 ixgbe_free_dma_buffer(&recycle_rcb->rx_buf); 121 122 mutex_enter(&ixgbe->rx_pending_lock); 123 atomic_dec_32(&rx_data->rcb_pending); 124 atomic_dec_32(&ixgbe->rcb_pending); 125 126 /* 127 * When there is not any buffer belonging to this rx_data 128 * held by the upper layer, the rx_data can be freed. 129 */ 130 if ((rx_data->flag & IXGBE_RX_STOPPED) && 131 (rx_data->rcb_pending == 0)) 132 ixgbe_free_rx_ring_data(rx_data); 133 134 mutex_exit(&ixgbe->rx_pending_lock); 135 } 136 } 137 138 /* 139 * ixgbe_rx_copy - Use copy to process the received packet. 140 * 141 * This function will use bcopy to process the packet 142 * and send the copied packet upstream. 143 */ 144 static mblk_t * 145 ixgbe_rx_copy(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len) 146 { 147 ixgbe_t *ixgbe; 148 rx_control_block_t *current_rcb; 149 mblk_t *mp; 150 151 ixgbe = rx_data->rx_ring->ixgbe; 152 current_rcb = rx_data->work_list[index]; 153 154 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 155 156 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 157 DDI_FM_OK) { 158 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 159 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 160 return (NULL); 161 } 162 163 /* 164 * Allocate buffer to receive this packet 165 */ 166 mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0); 167 if (mp == NULL) { 168 ixgbe_log(ixgbe, "ixgbe_rx_copy: allocate buffer failed"); 169 return (NULL); 170 } 171 172 /* 173 * Copy the data received into the new cluster 174 */ 175 mp->b_rptr += IPHDR_ALIGN_ROOM; 176 bcopy(current_rcb->rx_buf.address, mp->b_rptr, pkt_len); 177 mp->b_wptr = mp->b_rptr + pkt_len; 178 179 return (mp); 180 } 181 182 /* 183 * ixgbe_rx_bind - Use existing DMA buffer to build mblk for receiving. 184 * 185 * This function will use pre-bound DMA buffer to receive the packet 186 * and build mblk that will be sent upstream. 187 */ 188 static mblk_t * 189 ixgbe_rx_bind(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len) 190 { 191 rx_control_block_t *current_rcb; 192 rx_control_block_t *free_rcb; 193 uint32_t free_index; 194 mblk_t *mp; 195 ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe; 196 197 /* 198 * If the free list is empty, we cannot proceed to send 199 * the current DMA buffer upstream. We'll have to return 200 * and use bcopy to process the packet. 201 */ 202 if (ixgbe_atomic_reserve(&rx_data->rcb_free, 1) < 0) 203 return (NULL); 204 205 current_rcb = rx_data->work_list[index]; 206 /* 207 * If the mp of the rx control block is NULL, try to do 208 * desballoc again. 209 */ 210 if (current_rcb->mp == NULL) { 211 current_rcb->mp = desballoc((unsigned char *) 212 current_rcb->rx_buf.address, 213 current_rcb->rx_buf.size, 214 0, ¤t_rcb->free_rtn); 215 /* 216 * If it is failed to built a mblk using the current 217 * DMA buffer, we have to return and use bcopy to 218 * process the packet. 219 */ 220 if (current_rcb->mp == NULL) { 221 atomic_inc_32(&rx_data->rcb_free); 222 return (NULL); 223 } 224 } 225 /* 226 * Sync up the data received 227 */ 228 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 229 230 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 231 DDI_FM_OK) { 232 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 233 atomic_inc_32(&rx_data->rcb_free); 234 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 235 return (NULL); 236 } 237 238 mp = current_rcb->mp; 239 current_rcb->mp = NULL; 240 atomic_inc_32(¤t_rcb->ref_cnt); 241 242 mp->b_wptr = mp->b_rptr + pkt_len; 243 mp->b_next = mp->b_cont = NULL; 244 245 /* 246 * Strip off one free rx control block from the free list 247 */ 248 free_index = rx_data->rcb_head; 249 free_rcb = rx_data->free_list[free_index]; 250 ASSERT(free_rcb != NULL); 251 rx_data->free_list[free_index] = NULL; 252 rx_data->rcb_head = NEXT_INDEX(free_index, 1, rx_data->free_list_size); 253 254 /* 255 * Put the rx control block to the work list 256 */ 257 rx_data->work_list[index] = free_rcb; 258 259 return (mp); 260 } 261 262 /* 263 * ixgbe_lro_bind - Use existing DMA buffer to build LRO mblk for receiving. 264 * 265 * This function will use pre-bound DMA buffers to receive the packet 266 * and build LRO mblk that will be sent upstream. 267 */ 268 static mblk_t * 269 ixgbe_lro_bind(ixgbe_rx_data_t *rx_data, uint32_t lro_start, 270 uint32_t lro_num, uint32_t pkt_len) 271 { 272 rx_control_block_t *current_rcb; 273 union ixgbe_adv_rx_desc *current_rbd; 274 rx_control_block_t *free_rcb; 275 uint32_t free_index; 276 int lro_next; 277 uint32_t last_pkt_len; 278 uint32_t i; 279 mblk_t *mp; 280 mblk_t *mblk_head; 281 mblk_t **mblk_tail; 282 ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe; 283 284 /* 285 * If the free list is empty, we cannot proceed to send 286 * the current DMA buffer upstream. We'll have to return 287 * and use bcopy to process the packet. 288 */ 289 if (ixgbe_atomic_reserve(&rx_data->rcb_free, lro_num) < 0) 290 return (NULL); 291 current_rcb = rx_data->work_list[lro_start]; 292 293 /* 294 * If any one of the rx data blocks can not support 295 * lro bind operation, We'll have to return and use 296 * bcopy to process the lro packet. 297 */ 298 for (i = lro_num; i > 0; i--) { 299 /* 300 * Sync up the data received 301 */ 302 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 303 304 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 305 DDI_FM_OK) { 306 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 307 atomic_add_32(&rx_data->rcb_free, lro_num); 308 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 309 return (NULL); 310 } 311 312 /* 313 * If the mp of the rx control block is NULL, try to do 314 * desballoc again. 315 */ 316 if (current_rcb->mp == NULL) { 317 current_rcb->mp = desballoc((unsigned char *) 318 current_rcb->rx_buf.address, 319 current_rcb->rx_buf.size, 320 0, ¤t_rcb->free_rtn); 321 /* 322 * If it is failed to built a mblk using the current 323 * DMA buffer, we have to return and use bcopy to 324 * process the packet. 325 */ 326 if (current_rcb->mp == NULL) { 327 atomic_add_32(&rx_data->rcb_free, lro_num); 328 return (NULL); 329 } 330 } 331 if (current_rcb->lro_next != -1) 332 lro_next = current_rcb->lro_next; 333 current_rcb = rx_data->work_list[lro_next]; 334 } 335 336 mblk_head = NULL; 337 mblk_tail = &mblk_head; 338 lro_next = lro_start; 339 last_pkt_len = pkt_len - ixgbe->rx_buf_size * (lro_num - 1); 340 current_rcb = rx_data->work_list[lro_next]; 341 current_rbd = &rx_data->rbd_ring[lro_next]; 342 while (lro_num --) { 343 mp = current_rcb->mp; 344 current_rcb->mp = NULL; 345 atomic_inc_32(¤t_rcb->ref_cnt); 346 if (lro_num != 0) 347 mp->b_wptr = mp->b_rptr + ixgbe->rx_buf_size; 348 else 349 mp->b_wptr = mp->b_rptr + last_pkt_len; 350 mp->b_next = mp->b_cont = NULL; 351 *mblk_tail = mp; 352 mblk_tail = &mp->b_cont; 353 354 /* 355 * Strip off one free rx control block from the free list 356 */ 357 free_index = rx_data->rcb_head; 358 free_rcb = rx_data->free_list[free_index]; 359 ASSERT(free_rcb != NULL); 360 rx_data->free_list[free_index] = NULL; 361 rx_data->rcb_head = NEXT_INDEX(free_index, 1, 362 rx_data->free_list_size); 363 364 /* 365 * Put the rx control block to the work list 366 */ 367 rx_data->work_list[lro_next] = free_rcb; 368 lro_next = current_rcb->lro_next; 369 current_rcb->lro_next = -1; 370 current_rcb->lro_prev = -1; 371 current_rcb->lro_pkt = B_FALSE; 372 current_rbd->read.pkt_addr = free_rcb->rx_buf.dma_address; 373 current_rbd->read.hdr_addr = 0; 374 if (lro_next == -1) 375 break; 376 current_rcb = rx_data->work_list[lro_next]; 377 current_rbd = &rx_data->rbd_ring[lro_next]; 378 } 379 return (mblk_head); 380 } 381 382 /* 383 * ixgbe_lro_copy - Use copy to process the received LRO packet. 384 * 385 * This function will use bcopy to process the LRO packet 386 * and send the copied packet upstream. 387 */ 388 static mblk_t * 389 ixgbe_lro_copy(ixgbe_rx_data_t *rx_data, uint32_t lro_start, 390 uint32_t lro_num, uint32_t pkt_len) 391 { 392 ixgbe_t *ixgbe; 393 rx_control_block_t *current_rcb; 394 union ixgbe_adv_rx_desc *current_rbd; 395 mblk_t *mp; 396 uint32_t last_pkt_len; 397 int lro_next; 398 uint32_t i; 399 400 ixgbe = rx_data->rx_ring->ixgbe; 401 402 /* 403 * Allocate buffer to receive this LRO packet 404 */ 405 mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0); 406 if (mp == NULL) { 407 ixgbe_log(ixgbe, "LRO copy MP alloc failed"); 408 return (NULL); 409 } 410 411 current_rcb = rx_data->work_list[lro_start]; 412 413 /* 414 * Sync up the LRO packet data received 415 */ 416 for (i = lro_num; i > 0; i--) { 417 DMA_SYNC(¤t_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL); 418 419 if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) != 420 DDI_FM_OK) { 421 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 422 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 423 return (NULL); 424 } 425 if (current_rcb->lro_next != -1) 426 lro_next = current_rcb->lro_next; 427 current_rcb = rx_data->work_list[lro_next]; 428 } 429 lro_next = lro_start; 430 current_rcb = rx_data->work_list[lro_next]; 431 current_rbd = &rx_data->rbd_ring[lro_next]; 432 last_pkt_len = pkt_len - ixgbe->rx_buf_size * (lro_num - 1); 433 434 /* 435 * Copy the data received into the new cluster 436 */ 437 mp->b_rptr += IPHDR_ALIGN_ROOM; 438 mp->b_wptr += IPHDR_ALIGN_ROOM; 439 while (lro_num --) { 440 if (lro_num != 0) { 441 bcopy(current_rcb->rx_buf.address, mp->b_wptr, 442 ixgbe->rx_buf_size); 443 mp->b_wptr += ixgbe->rx_buf_size; 444 } else { 445 bcopy(current_rcb->rx_buf.address, mp->b_wptr, 446 last_pkt_len); 447 mp->b_wptr += last_pkt_len; 448 } 449 lro_next = current_rcb->lro_next; 450 current_rcb->lro_next = -1; 451 current_rcb->lro_prev = -1; 452 current_rcb->lro_pkt = B_FALSE; 453 current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address; 454 current_rbd->read.hdr_addr = 0; 455 if (lro_next == -1) 456 break; 457 current_rcb = rx_data->work_list[lro_next]; 458 current_rbd = &rx_data->rbd_ring[lro_next]; 459 } 460 461 return (mp); 462 } 463 464 /* 465 * ixgbe_lro_get_start - get the start rcb index in one LRO packet 466 */ 467 static int 468 ixgbe_lro_get_start(ixgbe_rx_data_t *rx_data, uint32_t rx_next) 469 { 470 int lro_prev; 471 int lro_start; 472 uint32_t lro_num = 1; 473 rx_control_block_t *prev_rcb; 474 rx_control_block_t *current_rcb = rx_data->work_list[rx_next]; 475 lro_prev = current_rcb->lro_prev; 476 477 while (lro_prev != -1) { 478 lro_num ++; 479 prev_rcb = rx_data->work_list[lro_prev]; 480 lro_start = lro_prev; 481 lro_prev = prev_rcb->lro_prev; 482 } 483 rx_data->lro_num = lro_num; 484 return (lro_start); 485 } 486 487 /* 488 * ixgbe_lro_get_first - get the first LRO rcb index 489 */ 490 static uint32_t 491 ixgbe_lro_get_first(ixgbe_rx_data_t *rx_data, uint32_t rx_next) 492 { 493 rx_control_block_t *current_rcb; 494 uint32_t lro_first; 495 lro_first = rx_data->lro_first; 496 current_rcb = rx_data->work_list[lro_first]; 497 while ((!current_rcb->lro_pkt) && (lro_first != rx_next)) { 498 lro_first = NEXT_INDEX(lro_first, 1, rx_data->ring_size); 499 current_rcb = rx_data->work_list[lro_first]; 500 } 501 rx_data->lro_first = lro_first; 502 return (lro_first); 503 } 504 505 /* 506 * ixgbe_rx_assoc_hcksum - Check the rx hardware checksum status and associate 507 * the hcksum flags. 508 */ 509 static void 510 ixgbe_rx_assoc_hcksum(mblk_t *mp, uint32_t status_error) 511 { 512 uint32_t hcksum_flags = 0; 513 514 /* 515 * Check TCP/UDP checksum 516 */ 517 if ((status_error & IXGBE_RXD_STAT_L4CS) && 518 !(status_error & IXGBE_RXDADV_ERR_TCPE)) 519 hcksum_flags |= HCK_FULLCKSUM_OK; 520 521 /* 522 * Check IP Checksum 523 */ 524 if ((status_error & IXGBE_RXD_STAT_IPCS) && 525 !(status_error & IXGBE_RXDADV_ERR_IPE)) 526 hcksum_flags |= HCK_IPV4_HDRCKSUM_OK; 527 528 if (hcksum_flags != 0) { 529 mac_hcksum_set(mp, 0, 0, 0, 0, hcksum_flags); 530 } 531 } 532 533 /* 534 * ixgbe_ring_rx - Receive the data of one ring. 535 * 536 * This function goes throught h/w descriptor in one specified rx ring, 537 * receives the data if the descriptor status shows the data is ready. 538 * It returns a chain of mblks containing the received data, to be 539 * passed up to mac_rx(). 540 */ 541 mblk_t * 542 ixgbe_ring_rx(ixgbe_rx_ring_t *rx_ring, int poll_bytes) 543 { 544 union ixgbe_adv_rx_desc *current_rbd; 545 rx_control_block_t *current_rcb; 546 mblk_t *mp; 547 mblk_t *mblk_head; 548 mblk_t **mblk_tail; 549 uint32_t rx_next; 550 uint32_t rx_tail; 551 uint32_t pkt_len; 552 uint32_t status_error; 553 uint32_t pkt_num; 554 uint32_t rsc_cnt; 555 uint32_t lro_first; 556 uint32_t lro_start; 557 uint32_t lro_next; 558 boolean_t lro_eop; 559 uint32_t received_bytes; 560 ixgbe_t *ixgbe = rx_ring->ixgbe; 561 ixgbe_rx_data_t *rx_data; 562 563 if ((ixgbe->ixgbe_state & IXGBE_SUSPENDED) || 564 (ixgbe->ixgbe_state & IXGBE_ERROR) || 565 (ixgbe->ixgbe_state & IXGBE_OVERTEMP) || 566 !(ixgbe->ixgbe_state & IXGBE_STARTED)) 567 return (NULL); 568 569 rx_data = rx_ring->rx_data; 570 lro_eop = B_FALSE; 571 mblk_head = NULL; 572 mblk_tail = &mblk_head; 573 574 /* 575 * Sync the receive descriptors before accepting the packets 576 */ 577 DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORKERNEL); 578 579 if (ixgbe_check_dma_handle(rx_data->rbd_area.dma_handle) != DDI_FM_OK) { 580 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 581 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 582 return (NULL); 583 } 584 585 /* 586 * Get the start point of rx bd ring which should be examined 587 * during this cycle. 588 */ 589 rx_next = rx_data->rbd_next; 590 current_rbd = &rx_data->rbd_ring[rx_next]; 591 received_bytes = 0; 592 pkt_num = 0; 593 status_error = current_rbd->wb.upper.status_error; 594 while (status_error & IXGBE_RXD_STAT_DD) { 595 /* 596 * If adapter has found errors, but the error 597 * is hardware checksum error, this does not discard the 598 * packet: let upper layer compute the checksum; 599 * Otherwise discard the packet. 600 */ 601 if ((status_error & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) || 602 ((!ixgbe->lro_enable) && 603 (!(status_error & IXGBE_RXD_STAT_EOP)))) { 604 IXGBE_DEBUG_STAT(rx_ring->stat_frame_error); 605 goto rx_discard; 606 } 607 608 IXGBE_DEBUG_STAT_COND(rx_ring->stat_cksum_error, 609 (status_error & IXGBE_RXDADV_ERR_TCPE) || 610 (status_error & IXGBE_RXDADV_ERR_IPE)); 611 612 if (ixgbe->lro_enable) { 613 rsc_cnt = (current_rbd->wb.lower.lo_dword.data & 614 IXGBE_RXDADV_RSCCNT_MASK) >> 615 IXGBE_RXDADV_RSCCNT_SHIFT; 616 if (rsc_cnt != 0) { 617 if (status_error & IXGBE_RXD_STAT_EOP) { 618 pkt_len = current_rbd->wb.upper.length; 619 if (rx_data->work_list[rx_next]-> 620 lro_prev != -1) { 621 lro_start = 622 ixgbe_lro_get_start(rx_data, 623 rx_next); 624 ixgbe->lro_pkt_count++; 625 pkt_len += 626 (rx_data->lro_num - 1) * 627 ixgbe->rx_buf_size; 628 lro_eop = B_TRUE; 629 } 630 } else { 631 lro_next = (status_error & 632 IXGBE_RXDADV_NEXTP_MASK) >> 633 IXGBE_RXDADV_NEXTP_SHIFT; 634 rx_data->work_list[lro_next]->lro_prev 635 = rx_next; 636 rx_data->work_list[rx_next]->lro_next = 637 lro_next; 638 rx_data->work_list[rx_next]->lro_pkt = 639 B_TRUE; 640 goto rx_discard; 641 } 642 643 } else { 644 pkt_len = current_rbd->wb.upper.length; 645 } 646 } else { 647 pkt_len = current_rbd->wb.upper.length; 648 } 649 650 651 if ((poll_bytes != IXGBE_POLL_NULL) && 652 ((received_bytes + pkt_len) > poll_bytes)) 653 break; 654 655 received_bytes += pkt_len; 656 mp = NULL; 657 658 /* 659 * For packets with length more than the copy threshold, 660 * we'll first try to use the existing DMA buffer to build 661 * an mblk and send the mblk upstream. 662 * 663 * If the first method fails, or the packet length is less 664 * than the copy threshold, we'll allocate a new mblk and 665 * copy the packet data to the new mblk. 666 */ 667 if (lro_eop) { 668 mp = ixgbe_lro_bind(rx_data, lro_start, 669 rx_data->lro_num, pkt_len); 670 if (mp == NULL) 671 mp = ixgbe_lro_copy(rx_data, lro_start, 672 rx_data->lro_num, pkt_len); 673 lro_eop = B_FALSE; 674 rx_data->lro_num = 0; 675 676 } else { 677 if (pkt_len > ixgbe->rx_copy_thresh) 678 mp = ixgbe_rx_bind(rx_data, rx_next, pkt_len); 679 680 if (mp == NULL) 681 mp = ixgbe_rx_copy(rx_data, rx_next, pkt_len); 682 } 683 if (mp != NULL) { 684 /* 685 * Check h/w checksum offload status 686 */ 687 if (ixgbe->rx_hcksum_enable) 688 ixgbe_rx_assoc_hcksum(mp, status_error); 689 690 *mblk_tail = mp; 691 mblk_tail = &mp->b_next; 692 } 693 694 rx_discard: 695 /* 696 * Reset rx descriptor read bits 697 */ 698 current_rcb = rx_data->work_list[rx_next]; 699 if (ixgbe->lro_enable) { 700 if (!current_rcb->lro_pkt) { 701 current_rbd->read.pkt_addr = 702 current_rcb->rx_buf.dma_address; 703 current_rbd->read.hdr_addr = 0; 704 } 705 } else { 706 current_rbd->read.pkt_addr = 707 current_rcb->rx_buf.dma_address; 708 current_rbd->read.hdr_addr = 0; 709 } 710 711 rx_next = NEXT_INDEX(rx_next, 1, rx_data->ring_size); 712 713 /* 714 * The receive function is in interrupt context, so here 715 * rx_limit_per_intr is used to avoid doing receiving too long 716 * per interrupt. 717 */ 718 if (++pkt_num > ixgbe->rx_limit_per_intr) { 719 IXGBE_DEBUG_STAT(rx_ring->stat_exceed_pkt); 720 break; 721 } 722 723 current_rbd = &rx_data->rbd_ring[rx_next]; 724 status_error = current_rbd->wb.upper.status_error; 725 } 726 727 rx_ring->stat_rbytes += received_bytes; 728 rx_ring->stat_ipackets += pkt_num; 729 730 DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORDEV); 731 732 rx_data->rbd_next = rx_next; 733 734 /* 735 * Update the h/w tail accordingly 736 */ 737 if (ixgbe->lro_enable) { 738 lro_first = ixgbe_lro_get_first(rx_data, rx_next); 739 rx_tail = PREV_INDEX(lro_first, 1, rx_data->ring_size); 740 } else 741 rx_tail = PREV_INDEX(rx_next, 1, rx_data->ring_size); 742 743 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_RDT(rx_ring->hw_index), rx_tail); 744 745 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 746 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 747 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 748 } 749 750 return (mblk_head); 751 } 752 753 mblk_t * 754 ixgbe_ring_rx_poll(void *arg, int n_bytes) 755 { 756 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)arg; 757 mblk_t *mp = NULL; 758 759 ASSERT(n_bytes >= 0); 760 761 if (n_bytes == 0) 762 return (NULL); 763 764 mutex_enter(&rx_ring->rx_lock); 765 mp = ixgbe_ring_rx(rx_ring, n_bytes); 766 mutex_exit(&rx_ring->rx_lock); 767 768 return (mp); 769 } 770