1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright (c) 2002-2006 Neterion, Inc. 22 */ 23 24 #ifdef XGE_DEBUG_FP 25 #include "xgehal-device.h" 26 #endif 27 28 #include "xgehal-ring.h" 29 #include "xgehal-fifo.h" 30 31 /** 32 * xge_hal_device_bar0 - Get BAR0 mapped address. 33 * @hldev: HAL device handle. 34 * 35 * Returns: BAR0 address of the specified device. 36 */ 37 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 38 xge_hal_device_bar0(xge_hal_device_t *hldev) 39 { 40 return hldev->bar0; 41 } 42 43 /** 44 * xge_hal_device_isrbar0 - Get BAR0 mapped address. 45 * @hldev: HAL device handle. 46 * 47 * Returns: BAR0 address of the specified device. 48 */ 49 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 50 xge_hal_device_isrbar0(xge_hal_device_t *hldev) 51 { 52 return hldev->isrbar0; 53 } 54 55 /** 56 * xge_hal_device_bar1 - Get BAR1 mapped address. 57 * @hldev: HAL device handle. 58 * 59 * Returns: BAR1 address of the specified device. 60 */ 61 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 62 xge_hal_device_bar1(xge_hal_device_t *hldev) 63 { 64 return hldev->bar1; 65 } 66 67 /** 68 * xge_hal_device_bar0_set - Set BAR0 mapped address. 69 * @hldev: HAL device handle. 70 * @bar0: BAR0 mapped address. 71 * * Set BAR0 address in the HAL device object. 72 */ 73 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 74 xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0) 75 { 76 xge_assert(bar0); 77 hldev->bar0 = bar0; 78 } 79 80 /** 81 * xge_hal_device_isrbar0_set - Set BAR0 mapped address. 82 * @hldev: HAL device handle. 83 * @isrbar0: BAR0 mapped address. 84 * * Set BAR0 address in the HAL device object. 85 */ 86 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 87 xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0) 88 { 89 xge_assert(isrbar0); 90 hldev->isrbar0 = isrbar0; 91 } 92 93 /** 94 * xge_hal_device_bar1_set - Set BAR1 mapped address. 95 * @hldev: HAL device handle. 96 * @channelh: Channel handle. 97 * @bar1: BAR1 mapped address. 98 * 99 * Set BAR1 address for the given channel. 100 */ 101 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 102 xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh, 103 char *bar1) 104 { 105 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 106 107 xge_assert(bar1); 108 xge_assert(fifo); 109 110 /* Initializing the BAR1 address as the start of 111 * the FIFO queue pointer and as a location of FIFO control 112 * word. */ 113 fifo->hw_pair = 114 (xge_hal_fifo_hw_pair_t *) (bar1 + 115 (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); 116 hldev->bar1 = bar1; 117 } 118 119 120 /** 121 * xge_hal_device_rev - Get Device revision number. 122 * @hldev: HAL device handle. 123 * 124 * Returns: Device revision number 125 */ 126 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int 127 xge_hal_device_rev(xge_hal_device_t *hldev) 128 { 129 return hldev->revision; 130 } 131 132 133 /** 134 * xge_hal_device_begin_irq - Begin IRQ processing. 135 * @hldev: HAL device handle. 136 * @reason: "Reason" for the interrupt, the value of Xframe's 137 * general_int_status register. 138 * 139 * The function performs two actions, It first checks whether (shared IRQ) the 140 * interrupt was raised by the device. Next, it masks the device interrupts. 141 * 142 * Note: 143 * xge_hal_device_begin_irq() does not flush MMIO writes through the 144 * bridge. Therefore, two back-to-back interrupts are potentially possible. 145 * It is the responsibility of the ULD to make sure that only one 146 * xge_hal_device_continue_irq() runs at a time. 147 * 148 * Returns: 0, if the interrupt is not "ours" (note that in this case the 149 * device remain enabled). 150 * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter 151 * status. 152 * See also: xge_hal_device_handle_irq() 153 */ 154 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 155 xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason) 156 { 157 u64 val64; 158 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 159 160 hldev->stats.sw_dev_info_stats.total_intr_cnt++; 161 162 val64 = xge_os_pio_mem_read64(hldev->pdev, 163 hldev->regh0, &isrbar0->general_int_status); 164 if (xge_os_unlikely(!val64)) { 165 /* not Xframe interrupt */ 166 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 167 *reason = 0; 168 return XGE_HAL_ERR_WRONG_IRQ; 169 } 170 171 if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) { 172 u64 adapter_status = 173 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 174 &isrbar0->adapter_status); 175 if (adapter_status == XGE_HAL_ALL_FOXES) { 176 (void) xge_queue_produce(hldev->queueh, 177 XGE_HAL_EVENT_SLOT_FREEZE, 178 hldev, 179 1, /* critical: slot freeze */ 180 sizeof(u64), 181 (void*)&adapter_status); 182 *reason = 0; 183 return XGE_HAL_ERR_CRITICAL; 184 } 185 } 186 187 *reason = val64; 188 189 /* separate fast path, i.e. no errors */ 190 if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) { 191 hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++; 192 return XGE_HAL_OK; 193 } 194 if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) { 195 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++; 196 return XGE_HAL_OK; 197 } 198 199 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 200 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) { 201 xge_hal_status_e status; 202 hldev->stats.sw_dev_info_stats.txpic_intr_cnt++; 203 status = __hal_device_handle_txpic(hldev, val64); 204 if (status != XGE_HAL_OK) { 205 return status; 206 } 207 } 208 209 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) { 210 xge_hal_status_e status; 211 hldev->stats.sw_dev_info_stats.txdma_intr_cnt++; 212 status = __hal_device_handle_txdma(hldev, val64); 213 if (status != XGE_HAL_OK) { 214 return status; 215 } 216 } 217 218 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) { 219 xge_hal_status_e status; 220 hldev->stats.sw_dev_info_stats.txmac_intr_cnt++; 221 status = __hal_device_handle_txmac(hldev, val64); 222 if (status != XGE_HAL_OK) { 223 return status; 224 } 225 } 226 227 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) { 228 xge_hal_status_e status; 229 hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++; 230 status = __hal_device_handle_txxgxs(hldev, val64); 231 if (status != XGE_HAL_OK) { 232 return status; 233 } 234 } 235 236 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) { 237 xge_hal_status_e status; 238 hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++; 239 status = __hal_device_handle_rxpic(hldev, val64); 240 if (status != XGE_HAL_OK) { 241 return status; 242 } 243 } 244 245 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) { 246 xge_hal_status_e status; 247 hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++; 248 status = __hal_device_handle_rxdma(hldev, val64); 249 if (status != XGE_HAL_OK) { 250 return status; 251 } 252 } 253 254 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) { 255 xge_hal_status_e status; 256 hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++; 257 status = __hal_device_handle_rxmac(hldev, val64); 258 if (status != XGE_HAL_OK) { 259 return status; 260 } 261 } 262 263 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) { 264 xge_hal_status_e status; 265 hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++; 266 status = __hal_device_handle_rxxgxs(hldev, val64); 267 if (status != XGE_HAL_OK) { 268 return status; 269 } 270 } 271 272 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) { 273 xge_hal_status_e status; 274 hldev->stats.sw_dev_info_stats.mc_intr_cnt++; 275 status = __hal_device_handle_mc(hldev, val64); 276 if (status != XGE_HAL_OK) { 277 return status; 278 } 279 } 280 281 return XGE_HAL_OK; 282 } 283 284 /** 285 * xge_hal_device_clear_rx - Acknowledge (that is, clear) the 286 * condition that has caused the RX interrupt. 287 * @hldev: HAL device handle. 288 * 289 * Acknowledge (that is, clear) the condition that has caused 290 * the Rx interrupt. 291 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), 292 * xge_hal_device_clear_tx(), xge_hal_device_mask_rx(). 293 */ 294 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 295 xge_hal_device_clear_rx(xge_hal_device_t *hldev) 296 { 297 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 298 299 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 300 0xFFFFFFFFFFFFFFFFULL, 301 &isrbar0->rx_traffic_int); 302 } 303 304 /** 305 * xge_hal_device_clear_tx - Acknowledge (that is, clear) the 306 * condition that has caused the TX interrupt. 307 * @hldev: HAL device handle. 308 * 309 * Acknowledge (that is, clear) the condition that has caused 310 * the Tx interrupt. 311 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), 312 * xge_hal_device_clear_rx(), xge_hal_device_mask_tx(). 313 */ 314 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 315 xge_hal_device_clear_tx(xge_hal_device_t *hldev) 316 { 317 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 318 319 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 320 0xFFFFFFFFFFFFFFFFULL, 321 &isrbar0->tx_traffic_int); 322 } 323 324 /** 325 * xge_hal_device_poll_rx_channel - Poll Rx channel for completed 326 * descriptors and process the same. 327 * @channel: HAL channel. 328 * 329 * The function polls the Rx channel for the completed descriptors and calls 330 * the upper-layer driver (ULD) via supplied completion callback. 331 * 332 * Returns: XGE_HAL_OK, if the polling is completed successful. 333 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 334 * descriptors available which are yet to be processed. 335 * 336 * See also: xge_hal_device_poll_tx_channel() 337 */ 338 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 339 xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx) 340 { 341 xge_hal_status_e ret = XGE_HAL_OK; 342 xge_hal_dtr_h first_dtrh; 343 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 344 u8 t_code; 345 int got_bytes; 346 347 /* for each opened rx channel */ 348 got_bytes = *got_rx = 0; 349 ((xge_hal_ring_t *)channel)->cmpl_cnt = 0; 350 channel->poll_bytes = 0; 351 if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh, 352 &t_code)) == XGE_HAL_OK) { 353 if (channel->callback(channel, first_dtrh, 354 t_code, channel->userdata) != XGE_HAL_OK) { 355 (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; 356 got_bytes += channel->poll_bytes + 1; 357 ret = XGE_HAL_COMPLETIONS_REMAIN; 358 } else { 359 (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; 360 got_bytes += channel->poll_bytes + 1; 361 } 362 } 363 364 if (*got_rx) { 365 hldev->irq_workload_rxd[channel->post_qid] += *got_rx; 366 hldev->irq_workload_rxcnt[channel->post_qid] ++; 367 } 368 hldev->irq_workload_rxlen[channel->post_qid] += got_bytes; 369 370 return ret; 371 } 372 373 /** 374 * xge_hal_device_poll_tx_channel - Poll Tx channel for completed 375 * descriptors and process the same. 376 * @hldev: HAL channel. 377 * 378 * The function polls the Tx channel for the completed descriptors and calls 379 * the upper-layer driver (ULD) via supplied completion callback. 380 * 381 * Returns: XGE_HAL_OK, if the polling is completed successful. 382 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 383 * descriptors available which are yet to be processed. 384 * 385 * See also: xge_hal_device_poll_rx_channel(). 386 */ 387 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 388 xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx) 389 { 390 xge_hal_dtr_h first_dtrh; 391 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 392 u8 t_code; 393 int got_bytes; 394 395 /* for each opened tx channel */ 396 got_bytes = *got_tx = 0; 397 channel->poll_bytes = 0; 398 if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh, 399 &t_code) == XGE_HAL_OK) { 400 if (channel->callback(channel, first_dtrh, 401 t_code, channel->userdata) != XGE_HAL_OK) { 402 (*got_tx)++; 403 got_bytes += channel->poll_bytes + 1; 404 return XGE_HAL_COMPLETIONS_REMAIN; 405 } 406 (*got_tx)++; 407 got_bytes += channel->poll_bytes + 1; 408 } 409 410 if (*got_tx) { 411 hldev->irq_workload_txd[channel->post_qid] += *got_tx; 412 hldev->irq_workload_txcnt[channel->post_qid] ++; 413 } 414 hldev->irq_workload_txlen[channel->post_qid] += got_bytes; 415 416 return XGE_HAL_OK; 417 } 418 419 /** 420 * xge_hal_device_poll_rx_channels - Poll Rx channels for completed 421 * descriptors and process the same. 422 * @hldev: HAL device handle. 423 * 424 * The function polls the Rx channels for the completed descriptors and calls 425 * the upper-layer driver (ULD) via supplied completion callback. 426 * 427 * Returns: XGE_HAL_OK, if the polling is completed successful. 428 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 429 * descriptors available which are yet to be processed. 430 * 431 * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq(). 432 */ 433 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 434 xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx) 435 { 436 xge_list_t *item; 437 xge_hal_channel_t *channel; 438 439 /* for each opened rx channel */ 440 xge_list_for_each(item, &hldev->ring_channels) { 441 if (hldev->terminating) 442 return XGE_HAL_OK; 443 channel = xge_container_of(item, xge_hal_channel_t, item); 444 (void) xge_hal_device_poll_rx_channel(channel, got_rx); 445 } 446 447 return XGE_HAL_OK; 448 } 449 450 /** 451 * xge_hal_device_poll_tx_channels - Poll Tx channels for completed 452 * descriptors and process the same. 453 * @hldev: HAL device handle. 454 * 455 * The function polls the Tx channels for the completed descriptors and calls 456 * the upper-layer driver (ULD) via supplied completion callback. 457 * 458 * Returns: XGE_HAL_OK, if the polling is completed successful. 459 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 460 * descriptors available which are yet to be processed. 461 * 462 * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq(). 463 */ 464 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 465 xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx) 466 { 467 xge_list_t *item; 468 xge_hal_channel_t *channel; 469 470 /* for each opened tx channel */ 471 xge_list_for_each(item, &hldev->fifo_channels) { 472 if (hldev->terminating) 473 return XGE_HAL_OK; 474 channel = xge_container_of(item, xge_hal_channel_t, item); 475 (void) xge_hal_device_poll_tx_channel(channel, got_tx); 476 } 477 478 return XGE_HAL_OK; 479 } 480 481 /** 482 * xge_hal_device_mask_tx - Mask Tx interrupts. 483 * @hldev: HAL device handle. 484 * 485 * Mask Tx device interrupts. 486 * 487 * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(), 488 * xge_hal_device_clear_tx(). 489 */ 490 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 491 xge_hal_device_mask_tx(xge_hal_device_t *hldev) 492 { 493 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 494 495 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 496 0xFFFFFFFFFFFFFFFFULL, 497 &isrbar0->tx_traffic_mask); 498 } 499 500 /** 501 * xge_hal_device_mask_rx - Mask Rx interrupts. 502 * @hldev: HAL device handle. 503 * 504 * Mask Rx device interrupts. 505 * 506 * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(), 507 * xge_hal_device_clear_rx(). 508 */ 509 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 510 xge_hal_device_mask_rx(xge_hal_device_t *hldev) 511 { 512 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 513 514 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 515 0xFFFFFFFFFFFFFFFFULL, 516 &isrbar0->rx_traffic_mask); 517 } 518 519 /** 520 * xge_hal_device_mask_all - Mask all device interrupts. 521 * @hldev: HAL device handle. 522 * 523 * Mask all device interrupts. 524 * 525 * See also: xge_hal_device_unmask_all() 526 */ 527 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 528 xge_hal_device_mask_all(xge_hal_device_t *hldev) 529 { 530 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 531 532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 533 0xFFFFFFFFFFFFFFFFULL, 534 &isrbar0->general_int_mask); 535 } 536 537 /** 538 * xge_hal_device_unmask_tx - Unmask Tx interrupts. 539 * @hldev: HAL device handle. 540 * 541 * Unmask Tx device interrupts. 542 * 543 * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx(). 544 */ 545 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 546 xge_hal_device_unmask_tx(xge_hal_device_t *hldev) 547 { 548 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 549 550 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 551 0x0ULL, 552 &isrbar0->tx_traffic_mask); 553 } 554 555 /** 556 * xge_hal_device_unmask_rx - Unmask Rx interrupts. 557 * @hldev: HAL device handle. 558 * 559 * Unmask Rx device interrupts. 560 * 561 * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx(). 562 */ 563 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 564 xge_hal_device_unmask_rx(xge_hal_device_t *hldev) 565 { 566 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 567 568 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 569 0x0ULL, 570 &isrbar0->rx_traffic_mask); 571 } 572 573 /** 574 * xge_hal_device_unmask_all - Unmask all device interrupts. 575 * @hldev: HAL device handle. 576 * 577 * Unmask all device interrupts. 578 * 579 * See also: xge_hal_device_mask_all() 580 */ 581 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 582 xge_hal_device_unmask_all(xge_hal_device_t *hldev) 583 { 584 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 585 586 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 587 0x0ULL, 588 &isrbar0->general_int_mask); 589 } 590 591 592 /** 593 * xge_hal_device_continue_irq - Continue handling IRQ: process all 594 * completed descriptors. 595 * @hldev: HAL device handle. 596 * 597 * Process completed descriptors and unmask the device interrupts. 598 * 599 * The xge_hal_device_continue_irq() walks all open channels 600 * and calls upper-layer driver (ULD) via supplied completion 601 * callback. Note that the completion callback is specified at channel open 602 * time, see xge_hal_channel_open(). 603 * 604 * Note that the xge_hal_device_continue_irq is part of the _fast_ path. 605 * To optimize the processing, the function does _not_ check for 606 * errors and alarms. 607 * 608 * The latter is done in a polling fashion, via xge_hal_device_poll(). 609 * 610 * Returns: XGE_HAL_OK. 611 * 612 * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(), 613 * xge_hal_ring_dtr_next_completed(), 614 * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}. 615 */ 616 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 617 xge_hal_device_continue_irq(xge_hal_device_t *hldev) 618 { 619 int got_rx = 1, got_tx = 1; 620 int isr_polling_cnt = hldev->config.isr_polling_cnt; 621 int count = 0; 622 623 do 624 { 625 if (got_rx) 626 (void) xge_hal_device_poll_rx_channels(hldev, &got_rx); 627 if (got_tx && hldev->tti_enabled) 628 (void) xge_hal_device_poll_tx_channels(hldev, &got_tx); 629 630 if (!got_rx && !got_tx) 631 break; 632 633 count += (got_rx + got_tx); 634 }while (isr_polling_cnt--); 635 636 if (!count) 637 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 638 639 return XGE_HAL_OK; 640 } 641 642 /** 643 * xge_hal_device_handle_irq - Handle device IRQ. 644 * @hldev: HAL device handle. 645 * 646 * Perform the complete handling of the line interrupt. The function 647 * performs two calls. 648 * First it uses xge_hal_device_begin_irq() to check the reason for 649 * the interrupt and mask the device interrupts. 650 * Second, it calls xge_hal_device_continue_irq() to process all 651 * completed descriptors and re-enable the interrupts. 652 * 653 * Returns: XGE_HAL_OK - success; 654 * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device. 655 * 656 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(). 657 */ 658 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 659 xge_hal_device_handle_irq(xge_hal_device_t *hldev) 660 { 661 u64 reason; 662 xge_hal_status_e status; 663 664 xge_hal_device_mask_all(hldev); 665 666 status = xge_hal_device_begin_irq(hldev, &reason); 667 if (status != XGE_HAL_OK) { 668 xge_hal_device_unmask_all(hldev); 669 return status; 670 } 671 672 if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) { 673 xge_hal_device_clear_rx(hldev); 674 } 675 676 status = xge_hal_device_continue_irq(hldev); 677 678 xge_hal_device_clear_tx(hldev); 679 680 xge_hal_device_unmask_all(hldev); 681 682 return status; 683 } 684 685 #if defined(XGE_HAL_CONFIG_LRO) 686 687 688 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int 689 __hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip) 690 { 691 692 /* Match Source address field */ 693 if ((lro->ip_hdr->saddr != ip->saddr)) 694 return XGE_HAL_FAIL; 695 696 /* Match Destination address field */ 697 if ((lro->ip_hdr->daddr != ip->daddr)) 698 return XGE_HAL_FAIL; 699 700 /* Match Source Port field */ 701 if ((lro->tcp_hdr->source != tcp->source)) 702 return XGE_HAL_FAIL; 703 704 /* Match Destination Port field */ 705 if ((lro->tcp_hdr->dest != tcp->dest)) 706 return XGE_HAL_FAIL; 707 708 return XGE_HAL_OK; 709 } 710 711 /* 712 * __hal_tcp_seg_len: Find the tcp seg len. 713 * @ip: ip header. 714 * @tcp: tcp header. 715 * returns: Tcp seg length. 716 */ 717 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16 718 __hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp) 719 { 720 u16 ret; 721 722 ret = (xge_os_ntohs(ip->tot_len) - 723 ((ip->version_ihl & 0x0F)<<2) - 724 ((tcp->doff_res)>>2)); 725 return (ret); 726 } 727 728 /* 729 * __hal_ip_lro_capable: Finds whether ip is lro capable. 730 * @ip: ip header. 731 * @ext_info: descriptor info. 732 */ 733 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 734 __hal_ip_lro_capable(iplro_t *ip, 735 xge_hal_dtr_info_t *ext_info) 736 { 737 738 #ifdef XGE_LL_DEBUG_DUMP_PKT 739 { 740 u16 i; 741 u8 ch, *iph = (u8 *)ip; 742 743 xge_debug_ring(XGE_TRACE, "Dump Ip:" ); 744 for (i =0; i < 40; i++) { 745 ch = ntohs(*((u8 *)(iph + i)) ); 746 printf("i:%d %02x, ",i,ch); 747 } 748 } 749 #endif 750 751 if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) { 752 xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl); 753 return XGE_HAL_FAIL; 754 } 755 756 if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) { 757 xge_debug_ring(XGE_ERR, "IP fragmented"); 758 return XGE_HAL_FAIL; 759 } 760 761 return XGE_HAL_OK; 762 } 763 764 /* 765 * __hal_tcp_lro_capable: Finds whether tcp is lro capable. 766 * @ip: ip header. 767 * @tcp: tcp header. 768 */ 769 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 770 __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off) 771 { 772 #ifdef XGE_LL_DEBUG_DUMP_PKT 773 { 774 u8 ch; 775 u16 i; 776 777 xge_debug_ring(XGE_TRACE, "Dump Tcp:" ); 778 for (i =0; i < 20; i++) { 779 ch = ntohs(*((u8 *)((u8 *)tcp + i)) ); 780 xge_os_printf("i:%d %02x, ",i,ch); 781 } 782 } 783 #endif 784 if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) && 785 (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl)) 786 goto _exit_fail; 787 788 *ts_off = -1; 789 if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) { 790 u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */ 791 u16 off = 20; /* Start of tcp options */ 792 int i, diff; 793 794 /* Does Packet can contain time stamp */ 795 if (tcp_hdr_len < 32) { 796 /* 797 * If the session is not opened, we can consider 798 * this packet for LRO 799 */ 800 if (lro == NULL) 801 return XGE_HAL_OK; 802 803 goto _exit_fail; 804 } 805 806 /* Ignore No-operation 0x1 */ 807 while (((u8 *)tcp)[off] == 0x1) 808 off++; 809 810 /* Next option == Timestamp */ 811 if (((u8 *)tcp)[off] != 0x8) { 812 /* 813 * If the session ie not opened, we can consider 814 * this packet for LRO 815 */ 816 if (lro == NULL) 817 return XGE_HAL_OK; 818 819 goto _exit_fail; 820 } 821 822 *ts_off = off; 823 if (lro == NULL) 824 return XGE_HAL_OK; 825 826 /* 827 * Now the session is opened. If the LRO frame doesn't 828 * have time stamp, we cannot consider current packet for 829 * LRO. 830 */ 831 if (lro->ts_off == -1) { 832 xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x\n", tcp->doff_res, tcp->ctrl); 833 return XGE_HAL_FAIL; 834 } 835 836 /* 837 * If the difference is greater than three, then there are 838 * more options possible. 839 * else, there are two cases: 840 * case 1: remaining are padding bytes. 841 * case 2: remaining can contain options or padding 842 */ 843 off += ((u8 *)tcp)[off+1]; 844 diff = tcp_hdr_len - off; 845 if (diff > 3) { 846 /* 847 * Probably contains more options. 848 */ 849 xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x \n", tcp->doff_res, tcp->ctrl); 850 return XGE_HAL_FAIL; 851 } 852 853 for (i = 0; i < diff; i++) { 854 u8 byte = ((u8 *)tcp)[off+i]; 855 856 /* Ignore No-operation 0x1 */ 857 if ((byte == 0x0) || (byte == 0x1)) 858 continue; 859 xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x \n", tcp->doff_res, tcp->ctrl); 860 return XGE_HAL_FAIL; 861 } 862 863 /* 864 * Update the time stamp of LRO frame. 865 */ 866 xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2), 867 (char *)((char *)tcp + (*ts_off) + 2), 8); 868 } 869 870 return XGE_HAL_OK; 871 872 _exit_fail: 873 xge_debug_ring(XGE_ERR, "tcphdr not fastpth %02x %02x\n", tcp->doff_res, tcp->ctrl); 874 return XGE_HAL_FAIL; 875 876 } 877 878 /* 879 * __hal_lro_capable: Finds whether frame is lro capable. 880 * @buffer: Ethernet frame. 881 * @ip: ip frame. 882 * @tcp: tcp frame. 883 * @ext_info: Descriptor info. 884 * @hldev: Hal context. 885 */ 886 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 887 __hal_lro_capable( u8 *buffer, 888 iplro_t **ip, 889 tcplro_t **tcp, 890 xge_hal_dtr_info_t *ext_info, 891 xge_hal_device_t *hldev) 892 { 893 u8 ip_off, ip_length; 894 895 if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) { 896 xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto); 897 return XGE_HAL_FAIL; 898 } 899 #ifdef XGE_LL_DEBUG_DUMP_PKT 900 { 901 u8 ch; 902 u16 i; 903 904 xge_os_printf("Dump Eth:" ); 905 for (i =0; i < 60; i++) { 906 ch = ntohs(*((u8 *)(buffer + i)) ); 907 xge_os_printf("i:%d %02x, ",i,ch); 908 } 909 } 910 #endif 911 912 switch (ext_info->frame) { 913 case XGE_HAL_FRAME_TYPE_DIX: 914 ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE; 915 break; 916 case XGE_HAL_FRAME_TYPE_LLC: 917 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 918 XGE_HAL_HEADER_802_2_SIZE); 919 break; 920 case XGE_HAL_FRAME_TYPE_SNAP: 921 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 922 XGE_HAL_HEADER_SNAP_SIZE); 923 break; 924 default: // XGE_HAL_FRAME_TYPE_IPX, etc. 925 return XGE_HAL_FAIL; 926 } 927 928 929 if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) { 930 ip_off += XGE_HAL_HEADER_VLAN_SIZE; 931 } 932 933 /* Grab ip, tcp headers */ 934 *ip = (iplro_t *)((char*)buffer + ip_off); 935 936 ip_length = (u8)((*ip)->version_ihl & 0x0F); 937 ip_length = ip_length <<2; 938 *tcp = (tcplro_t *)((unsigned long)*ip + ip_length); 939 940 xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT 941 " tcp:"XGE_OS_LLXFMT"", (int)ip_length, 942 (unsigned long long)(long)*ip, (unsigned long long)(long)*tcp); 943 944 return XGE_HAL_OK; 945 946 } 947 948 949 /* 950 * __hal_open_lro_session: Open a new LRO session. 951 * @buffer: Ethernet frame. 952 * @ip: ip header. 953 * @tcp: tcp header. 954 * @lro: lro pointer 955 * @ext_info: Descriptor info. 956 * @hldev: Hal context. 957 * @slot: Bucket no. 958 * @tcp_seg_len: Length of tcp segment. 959 * @ts_off: time stamp offset in the packet. 960 */ 961 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void 962 __hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro, 963 xge_hal_device_t *hldev, int slot, u32 tcp_seg_len, 964 int ts_off) 965 { 966 967 lro_t *lro_new = &hldev->lro_pool[slot]; 968 969 lro_new->in_use = 1; 970 lro_new->ll_hdr = buffer; 971 lro_new->ip_hdr = ip; 972 lro_new->tcp_hdr = tcp; 973 lro_new->tcp_next_seq_num = tcp_seg_len + xge_os_ntohl( 974 tcp->seq); 975 lro_new->tcp_seq_num = tcp->seq; 976 lro_new->tcp_ack_num = tcp->ack_seq; 977 lro_new->sg_num = 1; 978 lro_new->total_length = xge_os_ntohs(ip->tot_len); 979 lro_new->frags_len = 0; 980 lro_new->ts_off = ts_off; 981 982 hldev->stats.sw_dev_info_stats.tot_frms_lroised++; 983 hldev->stats.sw_dev_info_stats.tot_lro_sessions++; 984 985 *lro = hldev->lro_recent = lro_new; 986 return; 987 } 988 989 /* 990 * __hal_lro_get_free_slot: Get a free LRO bucket. 991 * @hldev: Hal context. 992 */ 993 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int 994 __hal_lro_get_free_slot (xge_hal_device_t *hldev) 995 { 996 int i; 997 998 for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { 999 lro_t *lro_temp = &hldev->lro_pool[i]; 1000 1001 if (!lro_temp->in_use) 1002 return i; 1003 } 1004 return -1; 1005 } 1006 1007 /* 1008 * __hal_get_lro_session: Gets matching LRO session or creates one. 1009 * @buffer: Ethernet frame. 1010 * @ip: ip header. 1011 * @tcp: tcp header. 1012 * @lro: lro pointer 1013 * @ext_info: Descriptor info. 1014 * @hldev: Hal context. 1015 */ 1016 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1017 __hal_get_lro_session (u8 *buffer, 1018 iplro_t *ip, 1019 tcplro_t *tcp, 1020 lro_t **lro, 1021 xge_hal_dtr_info_t *ext_info, 1022 xge_hal_device_t *hldev, 1023 lro_t **lro_end3 /* Valid only when ret=END_3 */) 1024 { 1025 lro_t *lro_match; 1026 int i, free_slot = -1; 1027 u32 tcp_seg_len; 1028 int ts_off = -1; 1029 1030 *lro = lro_match = NULL; 1031 /* 1032 * Compare the incoming frame with the lro session left from the 1033 * previous call. There is a good chance that this incoming frame 1034 * matches the lro session. 1035 */ 1036 if (hldev->lro_recent && hldev->lro_recent->in_use) { 1037 if (__hal_lro_check_for_session_match(hldev->lro_recent, 1038 tcp, ip) 1039 == XGE_HAL_OK) 1040 lro_match = hldev->lro_recent; 1041 } 1042 1043 if (!lro_match) { 1044 /* 1045 * Search in the pool of LROs for the session that matches 1046 * the incoming frame. 1047 */ 1048 for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { 1049 lro_t *lro_temp = &hldev->lro_pool[i]; 1050 1051 if (!lro_temp->in_use) { 1052 if (free_slot == -1) 1053 free_slot = i; 1054 continue; 1055 } 1056 1057 if (__hal_lro_check_for_session_match(lro_temp, tcp, 1058 ip) == XGE_HAL_OK) { 1059 lro_match = lro_temp; 1060 break; 1061 } 1062 } 1063 } 1064 1065 1066 if (lro_match) { 1067 /* 1068 * Matching LRO Session found 1069 */ 1070 *lro = lro_match; 1071 1072 if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) { 1073 xge_debug_ring(XGE_ERR, "**retransmit **" 1074 "found***"); 1075 hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++; 1076 return XGE_HAL_INF_LRO_END_2; 1077 } 1078 1079 if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info)) 1080 return XGE_HAL_INF_LRO_END_2; 1081 1082 if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match, 1083 &ts_off)) { 1084 /* 1085 * Close the current session and open a new 1086 * LRO session with this packet, 1087 * provided it has tcp payload 1088 */ 1089 tcp_seg_len = __hal_tcp_seg_len(ip, tcp); 1090 if (tcp_seg_len == 0) 1091 return XGE_HAL_INF_LRO_END_2; 1092 1093 /* Get a free bucket */ 1094 free_slot = __hal_lro_get_free_slot(hldev); 1095 if (free_slot == -1) 1096 return XGE_HAL_INF_LRO_END_2; 1097 1098 /* 1099 * Open a new LRO session 1100 */ 1101 __hal_open_lro_session (buffer, ip, tcp, lro_end3, 1102 hldev, free_slot, tcp_seg_len, 1103 ts_off); 1104 1105 return XGE_HAL_INF_LRO_END_3; 1106 } 1107 1108 /* 1109 * The frame is good, in-sequence, can be LRO-ed; 1110 * take its (latest) ACK - unless it is a dupack. 1111 * Note: to be exact need to check window size as well.. 1112 */ 1113 if (lro_match->tcp_ack_num == tcp->ack_seq && 1114 lro_match->tcp_seq_num == tcp->seq) { 1115 hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++; 1116 return XGE_HAL_INF_LRO_END_2; 1117 } 1118 1119 lro_match->tcp_seq_num = tcp->seq; 1120 lro_match->tcp_ack_num = tcp->ack_seq; 1121 lro_match->frags_len += __hal_tcp_seg_len(ip, tcp); 1122 1123 hldev->lro_recent = lro_match; 1124 1125 return XGE_HAL_INF_LRO_CONT; 1126 } 1127 1128 /* ********** New Session ***************/ 1129 if (free_slot == -1) 1130 return XGE_HAL_INF_LRO_UNCAPABLE; 1131 1132 if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info)) 1133 return XGE_HAL_INF_LRO_UNCAPABLE; 1134 1135 if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off)) 1136 return XGE_HAL_INF_LRO_UNCAPABLE; 1137 1138 xge_debug_ring(XGE_TRACE, "Creating lro session."); 1139 1140 /* 1141 * Open a LRO session, provided the packet contains payload. 1142 */ 1143 tcp_seg_len = __hal_tcp_seg_len(ip, tcp); 1144 if (tcp_seg_len == 0) 1145 return XGE_HAL_INF_LRO_UNCAPABLE; 1146 1147 __hal_open_lro_session (buffer, ip, tcp, lro, hldev, free_slot, 1148 tcp_seg_len, ts_off); 1149 1150 return XGE_HAL_INF_LRO_BEGIN; 1151 } 1152 1153 /* 1154 * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal. 1155 * @ip: ip header. 1156 * @tcp: tcp header. 1157 * @lro: lro pointer 1158 * @hldev: Hal context. 1159 */ 1160 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1161 __hal_lro_under_optimal_thresh (iplro_t *ip, 1162 tcplro_t *tcp, 1163 lro_t *lro, 1164 xge_hal_device_t *hldev) 1165 { 1166 if (!lro) return XGE_HAL_FAIL; 1167 1168 if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) > 1169 hldev->config.lro_frm_len) { 1170 xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:" 1171 "max length %d \n", hldev->config.lro_frm_len); 1172 hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++; 1173 return XGE_HAL_FAIL; 1174 } 1175 1176 if (lro->sg_num == hldev->config.lro_sg_size) { 1177 xge_debug_ring(XGE_TRACE, "Max sg count exceeded:" 1178 "max sg %d \n", hldev->config.lro_sg_size); 1179 hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; 1180 return XGE_HAL_FAIL; 1181 } 1182 1183 return XGE_HAL_OK; 1184 } 1185 1186 /* 1187 * __hal_collapse_ip_hdr: Collapses ip header. 1188 * @ip: ip header. 1189 * @tcp: tcp header. 1190 * @lro: lro pointer 1191 * @hldev: Hal context. 1192 */ 1193 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1194 __hal_collapse_ip_hdr ( iplro_t *ip, 1195 tcplro_t *tcp, 1196 lro_t *lro, 1197 xge_hal_device_t *hldev) 1198 { 1199 1200 lro->total_length += __hal_tcp_seg_len(ip, tcp); 1201 1202 /* May be we have to handle time stamps or more options */ 1203 1204 return XGE_HAL_OK; 1205 1206 } 1207 1208 /* 1209 * __hal_collapse_tcp_hdr: Collapses tcp header. 1210 * @ip: ip header. 1211 * @tcp: tcp header. 1212 * @lro: lro pointer 1213 * @hldev: Hal context. 1214 */ 1215 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1216 __hal_collapse_tcp_hdr ( iplro_t *ip, 1217 tcplro_t *tcp, 1218 lro_t *lro, 1219 xge_hal_device_t *hldev) 1220 { 1221 lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp); 1222 return XGE_HAL_OK; 1223 1224 } 1225 1226 /* 1227 * __hal_append_lro: Appends new frame to existing LRO session. 1228 * @ip: ip header. 1229 * @tcp: IN tcp header, OUT tcp payload. 1230 * @seg_len: tcp payload length. 1231 * @lro: lro pointer 1232 * @hldev: Hal context. 1233 */ 1234 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1235 __hal_append_lro(iplro_t *ip, 1236 tcplro_t **tcp, 1237 u32 *seg_len, 1238 lro_t *lro, 1239 xge_hal_device_t *hldev) 1240 { 1241 (void) __hal_collapse_ip_hdr(ip, *tcp, lro, hldev); 1242 (void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev); 1243 // Update mbuf chain will be done in ll driver. 1244 // xge_hal_accumulate_large_rx on success of appending new frame to 1245 // lro will return to ll driver tcpdata pointer, and tcp payload length. 1246 // along with return code lro frame appended. 1247 1248 lro->sg_num++; 1249 *seg_len = __hal_tcp_seg_len(ip, *tcp); 1250 *tcp = (tcplro_t *)((unsigned long)*tcp + (((*tcp)->doff_res)>>2)); 1251 1252 return XGE_HAL_OK; 1253 1254 } 1255 1256 /** 1257 * xge_hal_accumulate_large_rx: LRO a given frame 1258 * frames 1259 * @buffer: Ethernet frame. 1260 * @tcp: tcp header. 1261 * @seglen: packet length. 1262 * @p_lro: lro pointer. 1263 * @ext_info: descriptor info, see xge_hal_dtr_info_t{}. 1264 * @hldev: HAL device. 1265 * 1266 * LRO the newly received frame, i.e. attach it (if possible) to the 1267 * already accumulated (i.e., already LRO-ed) received frames (if any), 1268 * to form one super-sized frame for the subsequent processing 1269 * by the stack. 1270 */ 1271 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1272 xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen, 1273 lro_t **p_lro, xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, 1274 lro_t **lro_end3) 1275 { 1276 iplro_t *ip; 1277 xge_hal_status_e ret; 1278 lro_t *lro; 1279 1280 xge_debug_ring(XGE_TRACE, "Entered accumu lro. "); 1281 if (XGE_HAL_OK != __hal_lro_capable(buffer, &ip, (tcplro_t **)tcp, 1282 ext_info, hldev)) 1283 return XGE_HAL_INF_LRO_UNCAPABLE; 1284 1285 /* 1286 * This function shall get matching LRO or else 1287 * create one and return it 1288 */ 1289 ret = __hal_get_lro_session(buffer, ip, (tcplro_t *)*tcp, 1290 p_lro, ext_info, hldev, lro_end3); 1291 xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret); 1292 lro = *p_lro; 1293 if (XGE_HAL_INF_LRO_CONT == ret) { 1294 if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip, 1295 (tcplro_t *)*tcp, lro, hldev)) { 1296 (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen, 1297 lro, 1298 hldev); 1299 hldev->stats.sw_dev_info_stats.tot_frms_lroised++; 1300 1301 if (lro->sg_num >= hldev->config.lro_sg_size) { 1302 hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; 1303 ret = XGE_HAL_INF_LRO_END_1; 1304 } 1305 1306 } else ret = XGE_HAL_INF_LRO_END_2; 1307 } 1308 1309 /* 1310 * Since its time to flush, 1311 * update ip header so that it can be sent up 1312 */ 1313 if ((ret == XGE_HAL_INF_LRO_END_1) || 1314 (ret == XGE_HAL_INF_LRO_END_2) || 1315 (ret == XGE_HAL_INF_LRO_END_3)) { 1316 lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length); 1317 lro->ip_hdr->check = xge_os_htons(0); 1318 lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), 1319 (lro->ip_hdr->version_ihl & 0x0F)); 1320 lro->tcp_hdr->ack_seq = lro->tcp_ack_num; 1321 } 1322 1323 return (ret); 1324 } 1325 1326 /** 1327 * xge_hal_lro_close_session: Close LRO session 1328 * @lro: LRO Session. 1329 * @hldev: HAL Context. 1330 */ 1331 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void 1332 xge_hal_lro_close_session (lro_t *lro) 1333 { 1334 lro->in_use = 0; 1335 } 1336 1337 /** 1338 * xge_hal_lro_get_next_session: Returns next LRO session in the list or NULL 1339 * if none exists. 1340 * @hldev: Hal context. 1341 */ 1342 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * 1343 xge_hal_lro_get_next_session (xge_hal_device_t *hldev) 1344 { 1345 int i; 1346 int start_idx = hldev->lro_next_idx; 1347 1348 for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { 1349 lro_t *lro = &hldev->lro_pool[i]; 1350 1351 if (!lro->in_use) 1352 continue; 1353 1354 lro->ip_hdr->tot_len = xge_os_htons(lro->total_length); 1355 lro->ip_hdr->check = xge_os_htons(0); 1356 lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), 1357 (lro->ip_hdr->version_ihl & 0x0F)); 1358 hldev->lro_next_idx = i + 1; 1359 return lro; 1360 } 1361 1362 hldev->lro_next_idx = 0; 1363 return NULL; 1364 1365 } 1366 #endif 1367