1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright (c) 2002-2006 Neterion, Inc. 22 */ 23 24 #ifdef XGE_DEBUG_FP 25 #include "xgehal-device.h" 26 #endif 27 28 #include "xgehal-ring.h" 29 #include "xgehal-fifo.h" 30 31 /** 32 * xge_hal_device_bar0 - Get BAR0 mapped address. 33 * @hldev: HAL device handle. 34 * 35 * Returns: BAR0 address of the specified device. 36 */ 37 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 38 xge_hal_device_bar0(xge_hal_device_t *hldev) 39 { 40 return hldev->bar0; 41 } 42 43 /** 44 * xge_hal_device_isrbar0 - Get BAR0 mapped address. 45 * @hldev: HAL device handle. 46 * 47 * Returns: BAR0 address of the specified device. 48 */ 49 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 50 xge_hal_device_isrbar0(xge_hal_device_t *hldev) 51 { 52 return hldev->isrbar0; 53 } 54 55 /** 56 * xge_hal_device_bar1 - Get BAR1 mapped address. 57 * @hldev: HAL device handle. 58 * 59 * Returns: BAR1 address of the specified device. 60 */ 61 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 62 xge_hal_device_bar1(xge_hal_device_t *hldev) 63 { 64 return hldev->bar1; 65 } 66 67 /** 68 * xge_hal_device_bar0_set - Set BAR0 mapped address. 69 * @hldev: HAL device handle. 70 * @bar0: BAR0 mapped address. 71 * * Set BAR0 address in the HAL device object. 72 */ 73 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 74 xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0) 75 { 76 xge_assert(bar0); 77 hldev->bar0 = bar0; 78 } 79 80 /** 81 * xge_hal_device_isrbar0_set - Set BAR0 mapped address. 82 * @hldev: HAL device handle. 83 * @isrbar0: BAR0 mapped address. 84 * * Set BAR0 address in the HAL device object. 85 */ 86 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 87 xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0) 88 { 89 xge_assert(isrbar0); 90 hldev->isrbar0 = isrbar0; 91 } 92 93 /** 94 * xge_hal_device_bar1_set - Set BAR1 mapped address. 95 * @hldev: HAL device handle. 96 * @channelh: Channel handle. 97 * @bar1: BAR1 mapped address. 98 * 99 * Set BAR1 address for the given channel. 100 */ 101 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 102 xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh, 103 char *bar1) 104 { 105 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 106 107 xge_assert(bar1); 108 xge_assert(fifo); 109 110 /* Initializing the BAR1 address as the start of 111 * the FIFO queue pointer and as a location of FIFO control 112 * word. */ 113 fifo->hw_pair = 114 (xge_hal_fifo_hw_pair_t *) (bar1 + 115 (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); 116 hldev->bar1 = bar1; 117 } 118 119 120 /** 121 * xge_hal_device_rev - Get Device revision number. 122 * @hldev: HAL device handle. 123 * 124 * Returns: Device revision number 125 */ 126 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int 127 xge_hal_device_rev(xge_hal_device_t *hldev) 128 { 129 return hldev->revision; 130 } 131 132 133 /** 134 * xge_hal_device_begin_irq - Begin IRQ processing. 135 * @hldev: HAL device handle. 136 * @reason: "Reason" for the interrupt, the value of Xframe's 137 * general_int_status register. 138 * 139 * The function performs two actions, It first checks whether (shared IRQ) the 140 * interrupt was raised by the device. Next, it masks the device interrupts. 141 * 142 * Note: 143 * xge_hal_device_begin_irq() does not flush MMIO writes through the 144 * bridge. Therefore, two back-to-back interrupts are potentially possible. 145 * It is the responsibility of the ULD to make sure that only one 146 * xge_hal_device_continue_irq() runs at a time. 147 * 148 * Returns: 0, if the interrupt is not "ours" (note that in this case the 149 * device remain enabled). 150 * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter 151 * status. 152 * See also: xge_hal_device_handle_irq() 153 */ 154 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 155 xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason) 156 { 157 u64 val64; 158 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 159 160 hldev->stats.sw_dev_info_stats.total_intr_cnt++; 161 162 val64 = xge_os_pio_mem_read64(hldev->pdev, 163 hldev->regh0, &isrbar0->general_int_status); 164 if (xge_os_unlikely(!val64)) { 165 /* not Xframe interrupt */ 166 hldev->stats.sw_dev_info_stats.not_xge_intr_cnt++; 167 *reason = 0; 168 return XGE_HAL_ERR_WRONG_IRQ; 169 } 170 171 if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) { 172 u64 adapter_status = 173 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 174 &isrbar0->adapter_status); 175 if (adapter_status == XGE_HAL_ALL_FOXES) { 176 (void) xge_queue_produce(hldev->queueh, 177 XGE_HAL_EVENT_SLOT_FREEZE, 178 hldev, 179 1, /* critical: slot freeze */ 180 sizeof(u64), 181 (void*)&adapter_status); 182 *reason = 0; 183 return XGE_HAL_ERR_CRITICAL; 184 } 185 } 186 187 *reason = val64; 188 189 /* separate fast path, i.e. no errors */ 190 if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) { 191 hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++; 192 return XGE_HAL_OK; 193 } 194 if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) { 195 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++; 196 return XGE_HAL_OK; 197 } 198 199 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 200 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) { 201 xge_hal_status_e status; 202 hldev->stats.sw_dev_info_stats.txpic_intr_cnt++; 203 status = __hal_device_handle_txpic(hldev, val64); 204 if (status != XGE_HAL_OK) { 205 return status; 206 } 207 } 208 209 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) { 210 xge_hal_status_e status; 211 hldev->stats.sw_dev_info_stats.txdma_intr_cnt++; 212 status = __hal_device_handle_txdma(hldev, val64); 213 if (status != XGE_HAL_OK) { 214 return status; 215 } 216 } 217 218 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) { 219 xge_hal_status_e status; 220 hldev->stats.sw_dev_info_stats.txmac_intr_cnt++; 221 status = __hal_device_handle_txmac(hldev, val64); 222 if (status != XGE_HAL_OK) { 223 return status; 224 } 225 } 226 227 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) { 228 xge_hal_status_e status; 229 hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++; 230 status = __hal_device_handle_txxgxs(hldev, val64); 231 if (status != XGE_HAL_OK) { 232 return status; 233 } 234 } 235 236 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) { 237 xge_hal_status_e status; 238 hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++; 239 status = __hal_device_handle_rxpic(hldev, val64); 240 if (status != XGE_HAL_OK) { 241 return status; 242 } 243 } 244 245 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) { 246 xge_hal_status_e status; 247 hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++; 248 status = __hal_device_handle_rxdma(hldev, val64); 249 if (status != XGE_HAL_OK) { 250 return status; 251 } 252 } 253 254 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) { 255 xge_hal_status_e status; 256 hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++; 257 status = __hal_device_handle_rxmac(hldev, val64); 258 if (status != XGE_HAL_OK) { 259 return status; 260 } 261 } 262 263 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) { 264 xge_hal_status_e status; 265 hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++; 266 status = __hal_device_handle_rxxgxs(hldev, val64); 267 if (status != XGE_HAL_OK) { 268 return status; 269 } 270 } 271 272 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) { 273 xge_hal_status_e status; 274 hldev->stats.sw_dev_info_stats.mc_intr_cnt++; 275 status = __hal_device_handle_mc(hldev, val64); 276 if (status != XGE_HAL_OK) { 277 return status; 278 } 279 } 280 281 return XGE_HAL_OK; 282 } 283 284 /** 285 * xge_hal_device_clear_rx - Acknowledge (that is, clear) the 286 * condition that has caused the RX interrupt. 287 * @hldev: HAL device handle. 288 * 289 * Acknowledge (that is, clear) the condition that has caused 290 * the Rx interrupt. 291 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), 292 * xge_hal_device_clear_tx(), xge_hal_device_mask_rx(). 293 */ 294 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 295 xge_hal_device_clear_rx(xge_hal_device_t *hldev) 296 { 297 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 298 299 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 300 0xFFFFFFFFFFFFFFFFULL, 301 &isrbar0->rx_traffic_int); 302 } 303 304 /** 305 * xge_hal_device_clear_tx - Acknowledge (that is, clear) the 306 * condition that has caused the TX interrupt. 307 * @hldev: HAL device handle. 308 * 309 * Acknowledge (that is, clear) the condition that has caused 310 * the Tx interrupt. 311 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), 312 * xge_hal_device_clear_rx(), xge_hal_device_mask_tx(). 313 */ 314 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 315 xge_hal_device_clear_tx(xge_hal_device_t *hldev) 316 { 317 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 318 319 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 320 0xFFFFFFFFFFFFFFFFULL, 321 &isrbar0->tx_traffic_int); 322 } 323 324 /** 325 * xge_hal_device_poll_rx_channel - Poll Rx channel for completed 326 * descriptors and process the same. 327 * @channel: HAL channel. 328 * @got_rx: Buffer to return the flag set if receive interrupt is occured 329 * 330 * The function polls the Rx channel for the completed descriptors and calls 331 * the upper-layer driver (ULD) via supplied completion callback. 332 * 333 * Returns: XGE_HAL_OK, if the polling is completed successful. 334 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 335 * descriptors available which are yet to be processed. 336 * 337 * See also: xge_hal_device_poll_tx_channel() 338 */ 339 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 340 xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx) 341 { 342 xge_hal_status_e ret = XGE_HAL_OK; 343 xge_hal_dtr_h first_dtrh; 344 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 345 u8 t_code; 346 int got_bytes; 347 348 /* for each opened rx channel */ 349 got_bytes = *got_rx = 0; 350 ((xge_hal_ring_t *)channel)->cmpl_cnt = 0; 351 channel->poll_bytes = 0; 352 if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh, 353 &t_code)) == XGE_HAL_OK) { 354 if (channel->callback(channel, first_dtrh, 355 t_code, channel->userdata) != XGE_HAL_OK) { 356 (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; 357 got_bytes += channel->poll_bytes + 1; 358 ret = XGE_HAL_COMPLETIONS_REMAIN; 359 } else { 360 (*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1; 361 got_bytes += channel->poll_bytes + 1; 362 } 363 } 364 365 if (*got_rx) { 366 hldev->irq_workload_rxd[channel->post_qid] += *got_rx; 367 hldev->irq_workload_rxcnt[channel->post_qid] ++; 368 } 369 hldev->irq_workload_rxlen[channel->post_qid] += got_bytes; 370 371 return ret; 372 } 373 374 /** 375 * xge_hal_device_poll_tx_channel - Poll Tx channel for completed 376 * descriptors and process the same. 377 * @channel: HAL channel. 378 * @got_tx: Buffer to return the flag set if transmit interrupt is occured 379 * 380 * The function polls the Tx channel for the completed descriptors and calls 381 * the upper-layer driver (ULD) via supplied completion callback. 382 * 383 * Returns: XGE_HAL_OK, if the polling is completed successful. 384 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 385 * descriptors available which are yet to be processed. 386 * 387 * See also: xge_hal_device_poll_rx_channel(). 388 */ 389 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 390 xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx) 391 { 392 xge_hal_dtr_h first_dtrh; 393 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 394 u8 t_code; 395 int got_bytes; 396 397 /* for each opened tx channel */ 398 got_bytes = *got_tx = 0; 399 channel->poll_bytes = 0; 400 if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh, 401 &t_code) == XGE_HAL_OK) { 402 if (channel->callback(channel, first_dtrh, 403 t_code, channel->userdata) != XGE_HAL_OK) { 404 (*got_tx)++; 405 got_bytes += channel->poll_bytes + 1; 406 return XGE_HAL_COMPLETIONS_REMAIN; 407 } 408 (*got_tx)++; 409 got_bytes += channel->poll_bytes + 1; 410 } 411 412 if (*got_tx) { 413 hldev->irq_workload_txd[channel->post_qid] += *got_tx; 414 hldev->irq_workload_txcnt[channel->post_qid] ++; 415 } 416 hldev->irq_workload_txlen[channel->post_qid] += got_bytes; 417 418 return XGE_HAL_OK; 419 } 420 421 /** 422 * xge_hal_device_poll_rx_channels - Poll Rx channels for completed 423 * descriptors and process the same. 424 * @hldev: HAL device handle. 425 * @got_rx: Buffer to return flag set if receive is ready 426 * 427 * The function polls the Rx channels for the completed descriptors and calls 428 * the upper-layer driver (ULD) via supplied completion callback. 429 * 430 * Returns: XGE_HAL_OK, if the polling is completed successful. 431 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 432 * descriptors available which are yet to be processed. 433 * 434 * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq(). 435 */ 436 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 437 xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx) 438 { 439 xge_list_t *item; 440 xge_hal_channel_t *channel; 441 442 /* for each opened rx channel */ 443 xge_list_for_each(item, &hldev->ring_channels) { 444 if (hldev->terminating) 445 return XGE_HAL_OK; 446 channel = xge_container_of(item, xge_hal_channel_t, item); 447 (void) xge_hal_device_poll_rx_channel(channel, got_rx); 448 } 449 450 return XGE_HAL_OK; 451 } 452 453 /** 454 * xge_hal_device_poll_tx_channels - Poll Tx channels for completed 455 * descriptors and process the same. 456 * @hldev: HAL device handle. 457 * @got_tx: Buffer to return flag set if transmit is ready 458 * 459 * The function polls the Tx channels for the completed descriptors and calls 460 * the upper-layer driver (ULD) via supplied completion callback. 461 * 462 * Returns: XGE_HAL_OK, if the polling is completed successful. 463 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 464 * descriptors available which are yet to be processed. 465 * 466 * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq(). 467 */ 468 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 469 xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx) 470 { 471 xge_list_t *item; 472 xge_hal_channel_t *channel; 473 474 /* for each opened tx channel */ 475 xge_list_for_each(item, &hldev->fifo_channels) { 476 if (hldev->terminating) 477 return XGE_HAL_OK; 478 channel = xge_container_of(item, xge_hal_channel_t, item); 479 (void) xge_hal_device_poll_tx_channel(channel, got_tx); 480 } 481 482 return XGE_HAL_OK; 483 } 484 485 /** 486 * xge_hal_device_mask_tx - Mask Tx interrupts. 487 * @hldev: HAL device handle. 488 * 489 * Mask Tx device interrupts. 490 * 491 * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(), 492 * xge_hal_device_clear_tx(). 493 */ 494 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 495 xge_hal_device_mask_tx(xge_hal_device_t *hldev) 496 { 497 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 498 499 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 500 0xFFFFFFFFFFFFFFFFULL, 501 &isrbar0->tx_traffic_mask); 502 } 503 504 /** 505 * xge_hal_device_mask_rx - Mask Rx interrupts. 506 * @hldev: HAL device handle. 507 * 508 * Mask Rx device interrupts. 509 * 510 * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(), 511 * xge_hal_device_clear_rx(). 512 */ 513 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 514 xge_hal_device_mask_rx(xge_hal_device_t *hldev) 515 { 516 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 517 518 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 519 0xFFFFFFFFFFFFFFFFULL, 520 &isrbar0->rx_traffic_mask); 521 } 522 523 /** 524 * xge_hal_device_mask_all - Mask all device interrupts. 525 * @hldev: HAL device handle. 526 * 527 * Mask all device interrupts. 528 * 529 * See also: xge_hal_device_unmask_all() 530 */ 531 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 532 xge_hal_device_mask_all(xge_hal_device_t *hldev) 533 { 534 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 535 536 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 537 0xFFFFFFFFFFFFFFFFULL, 538 &isrbar0->general_int_mask); 539 } 540 541 /** 542 * xge_hal_device_unmask_tx - Unmask Tx interrupts. 543 * @hldev: HAL device handle. 544 * 545 * Unmask Tx device interrupts. 546 * 547 * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx(). 548 */ 549 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 550 xge_hal_device_unmask_tx(xge_hal_device_t *hldev) 551 { 552 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 553 554 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 555 0x0ULL, 556 &isrbar0->tx_traffic_mask); 557 } 558 559 /** 560 * xge_hal_device_unmask_rx - Unmask Rx interrupts. 561 * @hldev: HAL device handle. 562 * 563 * Unmask Rx device interrupts. 564 * 565 * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx(). 566 */ 567 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 568 xge_hal_device_unmask_rx(xge_hal_device_t *hldev) 569 { 570 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 571 572 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 573 0x0ULL, 574 &isrbar0->rx_traffic_mask); 575 } 576 577 /** 578 * xge_hal_device_unmask_all - Unmask all device interrupts. 579 * @hldev: HAL device handle. 580 * 581 * Unmask all device interrupts. 582 * 583 * See also: xge_hal_device_mask_all() 584 */ 585 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 586 xge_hal_device_unmask_all(xge_hal_device_t *hldev) 587 { 588 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 589 590 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 591 0x0ULL, 592 &isrbar0->general_int_mask); 593 } 594 595 596 /** 597 * xge_hal_device_continue_irq - Continue handling IRQ: process all 598 * completed descriptors. 599 * @hldev: HAL device handle. 600 * 601 * Process completed descriptors and unmask the device interrupts. 602 * 603 * The xge_hal_device_continue_irq() walks all open channels 604 * and calls upper-layer driver (ULD) via supplied completion 605 * callback. Note that the completion callback is specified at channel open 606 * time, see xge_hal_channel_open(). 607 * 608 * Note that the xge_hal_device_continue_irq is part of the _fast_ path. 609 * To optimize the processing, the function does _not_ check for 610 * errors and alarms. 611 * 612 * The latter is done in a polling fashion, via xge_hal_device_poll(). 613 * 614 * Returns: XGE_HAL_OK. 615 * 616 * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(), 617 * xge_hal_ring_dtr_next_completed(), 618 * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}. 619 */ 620 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 621 xge_hal_device_continue_irq(xge_hal_device_t *hldev) 622 { 623 int got_rx = 1, got_tx = 1; 624 int isr_polling_cnt = hldev->config.isr_polling_cnt; 625 int count = 0; 626 627 do 628 { 629 if (got_rx) 630 (void) xge_hal_device_poll_rx_channels(hldev, &got_rx); 631 if (got_tx && hldev->tti_enabled) 632 (void) xge_hal_device_poll_tx_channels(hldev, &got_tx); 633 634 if (!got_rx && !got_tx) 635 break; 636 637 count += (got_rx + got_tx); 638 }while (isr_polling_cnt--); 639 640 if (!count) 641 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 642 643 return XGE_HAL_OK; 644 } 645 646 /** 647 * xge_hal_device_handle_irq - Handle device IRQ. 648 * @hldev: HAL device handle. 649 * 650 * Perform the complete handling of the line interrupt. The function 651 * performs two calls. 652 * First it uses xge_hal_device_begin_irq() to check the reason for 653 * the interrupt and mask the device interrupts. 654 * Second, it calls xge_hal_device_continue_irq() to process all 655 * completed descriptors and re-enable the interrupts. 656 * 657 * Returns: XGE_HAL_OK - success; 658 * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device. 659 * 660 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(). 661 */ 662 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 663 xge_hal_device_handle_irq(xge_hal_device_t *hldev) 664 { 665 u64 reason; 666 xge_hal_status_e status; 667 668 xge_hal_device_mask_all(hldev); 669 670 status = xge_hal_device_begin_irq(hldev, &reason); 671 if (status != XGE_HAL_OK) { 672 xge_hal_device_unmask_all(hldev); 673 return status; 674 } 675 676 if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) { 677 xge_hal_device_clear_rx(hldev); 678 } 679 680 status = xge_hal_device_continue_irq(hldev); 681 682 xge_hal_device_clear_tx(hldev); 683 684 xge_hal_device_unmask_all(hldev); 685 686 return status; 687 } 688 689 #if defined(XGE_HAL_CONFIG_LRO) 690 691 692 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int 693 __hal_lro_check_for_session_match(lro_t *lro, tcplro_t *tcp, iplro_t *ip) 694 { 695 696 /* Match Source address field */ 697 if ((lro->ip_hdr->saddr != ip->saddr)) 698 return XGE_HAL_FAIL; 699 700 /* Match Destination address field */ 701 if ((lro->ip_hdr->daddr != ip->daddr)) 702 return XGE_HAL_FAIL; 703 704 /* Match Source Port field */ 705 if ((lro->tcp_hdr->source != tcp->source)) 706 return XGE_HAL_FAIL; 707 708 /* Match Destination Port field */ 709 if ((lro->tcp_hdr->dest != tcp->dest)) 710 return XGE_HAL_FAIL; 711 712 return XGE_HAL_OK; 713 } 714 715 /* 716 * __hal_tcp_seg_len: Find the tcp seg len. 717 * @ip: ip header. 718 * @tcp: tcp header. 719 * returns: Tcp seg length. 720 */ 721 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16 722 __hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp) 723 { 724 u16 ret; 725 726 ret = (xge_os_ntohs(ip->tot_len) - 727 ((ip->version_ihl & 0x0F)<<2) - 728 ((tcp->doff_res)>>2)); 729 return (ret); 730 } 731 732 /* 733 * __hal_ip_lro_capable: Finds whether ip is lro capable. 734 * @ip: ip header. 735 * @ext_info: descriptor info. 736 */ 737 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 738 __hal_ip_lro_capable(iplro_t *ip, 739 xge_hal_dtr_info_t *ext_info) 740 { 741 742 #ifdef XGE_LL_DEBUG_DUMP_PKT 743 { 744 u16 i; 745 u8 ch, *iph = (u8 *)ip; 746 747 xge_debug_ring(XGE_TRACE, "Dump Ip:" ); 748 for (i =0; i < 40; i++) { 749 ch = ntohs(*((u8 *)(iph + i)) ); 750 printf("i:%d %02x, ",i,ch); 751 } 752 } 753 #endif 754 755 if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) { 756 xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl); 757 return XGE_HAL_FAIL; 758 } 759 760 if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) { 761 xge_debug_ring(XGE_ERR, "IP fragmented"); 762 return XGE_HAL_FAIL; 763 } 764 765 return XGE_HAL_OK; 766 } 767 768 /* 769 * __hal_tcp_lro_capable: Finds whether tcp is lro capable. 770 * @ip: ip header. 771 * @tcp: tcp header. 772 */ 773 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 774 __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off) 775 { 776 #ifdef XGE_LL_DEBUG_DUMP_PKT 777 { 778 u8 ch; 779 u16 i; 780 781 xge_debug_ring(XGE_TRACE, "Dump Tcp:" ); 782 for (i =0; i < 20; i++) { 783 ch = ntohs(*((u8 *)((u8 *)tcp + i)) ); 784 xge_os_printf("i:%d %02x, ",i,ch); 785 } 786 } 787 #endif 788 if ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) && 789 (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl)) 790 goto _exit_fail; 791 792 *ts_off = -1; 793 if (TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) { 794 u16 tcp_hdr_len = tcp->doff_res >> 2; /* TCP header len */ 795 u16 off = 20; /* Start of tcp options */ 796 int i, diff; 797 798 /* Does Packet can contain time stamp */ 799 if (tcp_hdr_len < 32) { 800 /* 801 * If the session is not opened, we can consider 802 * this packet for LRO 803 */ 804 if (lro == NULL) 805 return XGE_HAL_OK; 806 807 goto _exit_fail; 808 } 809 810 /* Ignore No-operation 0x1 */ 811 while (((u8 *)tcp)[off] == 0x1) 812 off++; 813 814 /* Next option == Timestamp */ 815 if (((u8 *)tcp)[off] != 0x8) { 816 /* 817 * If the session ie not opened, we can consider 818 * this packet for LRO 819 */ 820 if (lro == NULL) 821 return XGE_HAL_OK; 822 823 goto _exit_fail; 824 } 825 826 *ts_off = off; 827 if (lro == NULL) 828 return XGE_HAL_OK; 829 830 /* 831 * Now the session is opened. If the LRO frame doesn't 832 * have time stamp, we cannot consider current packet for 833 * LRO. 834 */ 835 if (lro->ts_off == -1) { 836 xge_debug_ring(XGE_ERR, "Pkt received with time stamp after session opened with no time stamp : %02x %02x", tcp->doff_res, tcp->ctrl); 837 return XGE_HAL_FAIL; 838 } 839 840 /* 841 * If the difference is greater than three, then there are 842 * more options possible. 843 * else, there are two cases: 844 * case 1: remaining are padding bytes. 845 * case 2: remaining can contain options or padding 846 */ 847 off += ((u8 *)tcp)[off+1]; 848 diff = tcp_hdr_len - off; 849 if (diff > 3) { 850 /* 851 * Probably contains more options. 852 */ 853 xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl); 854 return XGE_HAL_FAIL; 855 } 856 857 for (i = 0; i < diff; i++) { 858 u8 byte = ((u8 *)tcp)[off+i]; 859 860 /* Ignore No-operation 0x1 */ 861 if ((byte == 0x0) || (byte == 0x1)) 862 continue; 863 xge_debug_ring(XGE_ERR, "tcphdr not fastpth : pkt received with tcp options in addition to time stamp after the session is opened %02x %02x ", tcp->doff_res, tcp->ctrl); 864 return XGE_HAL_FAIL; 865 } 866 867 /* 868 * Update the time stamp of LRO frame. 869 */ 870 xge_os_memcpy(((char *)lro->tcp_hdr + lro->ts_off + 2), 871 (char *)((char *)tcp + (*ts_off) + 2), 8); 872 } 873 874 return XGE_HAL_OK; 875 876 _exit_fail: 877 xge_debug_ring(XGE_TRACE, "tcphdr not fastpth %02x %02x", tcp->doff_res, tcp->ctrl); 878 return XGE_HAL_FAIL; 879 880 } 881 882 /* 883 * __hal_lro_capable: Finds whether frame is lro capable. 884 * @buffer: Ethernet frame. 885 * @ip: ip frame. 886 * @tcp: tcp frame. 887 * @ext_info: Descriptor info. 888 */ 889 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 890 __hal_lro_capable( u8 *buffer, 891 iplro_t **ip, 892 tcplro_t **tcp, 893 xge_hal_dtr_info_t *ext_info) 894 { 895 u8 ip_off, ip_length; 896 897 if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) { 898 xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto); 899 return XGE_HAL_FAIL; 900 } 901 902 if ( !*ip ) 903 { 904 #ifdef XGE_LL_DEBUG_DUMP_PKT 905 { 906 u8 ch; 907 u16 i; 908 909 xge_os_printf("Dump Eth:" ); 910 for (i =0; i < 60; i++) { 911 ch = ntohs(*((u8 *)(buffer + i)) ); 912 xge_os_printf("i:%d %02x, ",i,ch); 913 } 914 } 915 #endif 916 917 switch (ext_info->frame) { 918 case XGE_HAL_FRAME_TYPE_DIX: 919 ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE; 920 break; 921 case XGE_HAL_FRAME_TYPE_LLC: 922 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 923 XGE_HAL_HEADER_802_2_SIZE); 924 break; 925 case XGE_HAL_FRAME_TYPE_SNAP: 926 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 927 XGE_HAL_HEADER_SNAP_SIZE); 928 break; 929 default: // XGE_HAL_FRAME_TYPE_IPX, etc. 930 return XGE_HAL_FAIL; 931 } 932 933 934 if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) { 935 ip_off += XGE_HAL_HEADER_VLAN_SIZE; 936 } 937 938 /* Grab ip, tcp headers */ 939 *ip = (iplro_t *)((char*)buffer + ip_off); 940 } /* !*ip */ 941 942 ip_length = (u8)((*ip)->version_ihl & 0x0F); 943 ip_length = ip_length <<2; 944 *tcp = (tcplro_t *)((char *)*ip + ip_length); 945 946 xge_debug_ring(XGE_TRACE, "ip_length:%d ip:"XGE_OS_LLXFMT 947 " tcp:"XGE_OS_LLXFMT"", (int)ip_length, 948 (unsigned long long)(ulong_t)*ip, (unsigned long long)(ulong_t)*tcp); 949 950 return XGE_HAL_OK; 951 952 } 953 954 955 /* 956 * __hal_open_lro_session: Open a new LRO session. 957 * @buffer: Ethernet frame. 958 * @ip: ip header. 959 * @tcp: tcp header. 960 * @lro: lro pointer 961 * @ext_info: Descriptor info. 962 * @hldev: Hal context. 963 * @ring_lro: LRO descriptor per rx ring. 964 * @slot: Bucket no. 965 * @tcp_seg_len: Length of tcp segment. 966 * @ts_off: time stamp offset in the packet. 967 */ 968 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void 969 __hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro, 970 xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro, int slot, 971 u32 tcp_seg_len, int ts_off) 972 { 973 974 lro_t *lro_new = &ring_lro->lro_pool[slot]; 975 976 lro_new->in_use = 1; 977 lro_new->ll_hdr = buffer; 978 lro_new->ip_hdr = ip; 979 lro_new->tcp_hdr = tcp; 980 lro_new->tcp_next_seq_num = tcp_seg_len + xge_os_ntohl( 981 tcp->seq); 982 lro_new->tcp_seq_num = tcp->seq; 983 lro_new->tcp_ack_num = tcp->ack_seq; 984 lro_new->sg_num = 1; 985 lro_new->total_length = xge_os_ntohs(ip->tot_len); 986 lro_new->frags_len = 0; 987 lro_new->ts_off = ts_off; 988 989 hldev->stats.sw_dev_info_stats.tot_frms_lroised++; 990 hldev->stats.sw_dev_info_stats.tot_lro_sessions++; 991 992 *lro = ring_lro->lro_recent = lro_new; 993 return; 994 } 995 /* 996 * __hal_lro_get_free_slot: Get a free LRO bucket. 997 * @ring_lro: LRO descriptor per ring. 998 */ 999 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int 1000 __hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro) 1001 { 1002 int i; 1003 1004 for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { 1005 lro_t *lro_temp = &ring_lro->lro_pool[i]; 1006 1007 if (!lro_temp->in_use) 1008 return i; 1009 } 1010 return -1; 1011 } 1012 1013 /* 1014 * __hal_get_lro_session: Gets matching LRO session or creates one. 1015 * @eth_hdr: Ethernet header. 1016 * @ip: ip header. 1017 * @tcp: tcp header. 1018 * @lro: lro pointer 1019 * @ext_info: Descriptor info. 1020 * @hldev: Hal context. 1021 * @ring_lro: LRO descriptor per rx ring 1022 */ 1023 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1024 __hal_get_lro_session (u8 *eth_hdr, 1025 iplro_t *ip, 1026 tcplro_t *tcp, 1027 lro_t **lro, 1028 xge_hal_dtr_info_t *ext_info, 1029 xge_hal_device_t *hldev, 1030 xge_hal_lro_desc_t *ring_lro, 1031 lro_t **lro_end3 /* Valid only when ret=END_3 */) 1032 { 1033 lro_t *lro_match; 1034 int i, free_slot = -1; 1035 u32 tcp_seg_len; 1036 int ts_off = -1; 1037 1038 *lro = lro_match = NULL; 1039 /* 1040 * Compare the incoming frame with the lro session left from the 1041 * previous call. There is a good chance that this incoming frame 1042 * matches the lro session. 1043 */ 1044 if (ring_lro->lro_recent && ring_lro->lro_recent->in_use) { 1045 if (__hal_lro_check_for_session_match(ring_lro->lro_recent, 1046 tcp, ip) 1047 == XGE_HAL_OK) 1048 lro_match = ring_lro->lro_recent; 1049 } 1050 1051 if (!lro_match) { 1052 /* 1053 * Search in the pool of LROs for the session that matches 1054 * the incoming frame. 1055 */ 1056 for (i = 0; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { 1057 lro_t *lro_temp = &ring_lro->lro_pool[i]; 1058 1059 if (!lro_temp->in_use) { 1060 if (free_slot == -1) 1061 free_slot = i; 1062 continue; 1063 } 1064 1065 if (__hal_lro_check_for_session_match(lro_temp, tcp, 1066 ip) == XGE_HAL_OK) { 1067 lro_match = lro_temp; 1068 break; 1069 } 1070 } 1071 } 1072 1073 1074 if (lro_match) { 1075 /* 1076 * Matching LRO Session found 1077 */ 1078 *lro = lro_match; 1079 1080 if (lro_match->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) { 1081 xge_debug_ring(XGE_ERR, "**retransmit **" 1082 "found***"); 1083 hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++; 1084 return XGE_HAL_INF_LRO_END_2; 1085 } 1086 1087 if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info)) 1088 { 1089 return XGE_HAL_INF_LRO_END_2; 1090 } 1091 1092 if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp, lro_match, 1093 &ts_off)) { 1094 /* 1095 * Close the current session and open a new 1096 * LRO session with this packet, 1097 * provided it has tcp payload 1098 */ 1099 tcp_seg_len = __hal_tcp_seg_len(ip, tcp); 1100 if (tcp_seg_len == 0) 1101 { 1102 return XGE_HAL_INF_LRO_END_2; 1103 } 1104 1105 /* Get a free bucket */ 1106 free_slot = __hal_lro_get_free_slot(ring_lro); 1107 if (free_slot == -1) 1108 { 1109 return XGE_HAL_INF_LRO_END_2; 1110 } 1111 1112 /* 1113 * Open a new LRO session 1114 */ 1115 __hal_open_lro_session (eth_hdr, ip, tcp, lro_end3, 1116 hldev, ring_lro, free_slot, tcp_seg_len, 1117 ts_off); 1118 1119 return XGE_HAL_INF_LRO_END_3; 1120 } 1121 1122 /* 1123 * The frame is good, in-sequence, can be LRO-ed; 1124 * take its (latest) ACK - unless it is a dupack. 1125 * Note: to be exact need to check window size as well.. 1126 */ 1127 if (lro_match->tcp_ack_num == tcp->ack_seq && 1128 lro_match->tcp_seq_num == tcp->seq) { 1129 hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++; 1130 return XGE_HAL_INF_LRO_END_2; 1131 } 1132 1133 lro_match->tcp_seq_num = tcp->seq; 1134 lro_match->tcp_ack_num = tcp->ack_seq; 1135 lro_match->frags_len += __hal_tcp_seg_len(ip, tcp); 1136 1137 ring_lro->lro_recent = lro_match; 1138 1139 return XGE_HAL_INF_LRO_CONT; 1140 } 1141 1142 /* ********** New Session ***************/ 1143 if (free_slot == -1) 1144 return XGE_HAL_INF_LRO_UNCAPABLE; 1145 1146 if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info)) 1147 return XGE_HAL_INF_LRO_UNCAPABLE; 1148 1149 if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp, NULL, &ts_off)) 1150 return XGE_HAL_INF_LRO_UNCAPABLE; 1151 1152 xge_debug_ring(XGE_TRACE, "Creating lro session."); 1153 1154 /* 1155 * Open a LRO session, provided the packet contains payload. 1156 */ 1157 tcp_seg_len = __hal_tcp_seg_len(ip, tcp); 1158 if (tcp_seg_len == 0) 1159 return XGE_HAL_INF_LRO_UNCAPABLE; 1160 1161 __hal_open_lro_session (eth_hdr, ip, tcp, lro, hldev, ring_lro, free_slot, 1162 tcp_seg_len, ts_off); 1163 1164 return XGE_HAL_INF_LRO_BEGIN; 1165 } 1166 1167 /* 1168 * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal. 1169 * @ip: ip header. 1170 * @tcp: tcp header. 1171 * @lro: lro pointer 1172 * @hldev: Hal context. 1173 */ 1174 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1175 __hal_lro_under_optimal_thresh (iplro_t *ip, 1176 tcplro_t *tcp, 1177 lro_t *lro, 1178 xge_hal_device_t *hldev) 1179 { 1180 if (!lro) return XGE_HAL_FAIL; 1181 1182 if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) > 1183 hldev->config.lro_frm_len) { 1184 xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:" 1185 "max length %d ", hldev->config.lro_frm_len); 1186 hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++; 1187 return XGE_HAL_FAIL; 1188 } 1189 1190 if (lro->sg_num == hldev->config.lro_sg_size) { 1191 xge_debug_ring(XGE_TRACE, "Max sg count exceeded:" 1192 "max sg %d ", hldev->config.lro_sg_size); 1193 hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; 1194 return XGE_HAL_FAIL; 1195 } 1196 1197 return XGE_HAL_OK; 1198 } 1199 1200 /* 1201 * __hal_collapse_ip_hdr: Collapses ip header. 1202 * @ip: ip header. 1203 * @tcp: tcp header. 1204 * @lro: lro pointer 1205 * @hldev: Hal context. 1206 */ 1207 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1208 __hal_collapse_ip_hdr ( iplro_t *ip, 1209 tcplro_t *tcp, 1210 lro_t *lro, 1211 xge_hal_device_t *hldev) 1212 { 1213 1214 lro->total_length += __hal_tcp_seg_len(ip, tcp); 1215 1216 /* May be we have to handle time stamps or more options */ 1217 1218 return XGE_HAL_OK; 1219 1220 } 1221 1222 /* 1223 * __hal_collapse_tcp_hdr: Collapses tcp header. 1224 * @ip: ip header. 1225 * @tcp: tcp header. 1226 * @lro: lro pointer 1227 * @hldev: Hal context. 1228 */ 1229 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1230 __hal_collapse_tcp_hdr ( iplro_t *ip, 1231 tcplro_t *tcp, 1232 lro_t *lro, 1233 xge_hal_device_t *hldev) 1234 { 1235 lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp); 1236 return XGE_HAL_OK; 1237 1238 } 1239 1240 /* 1241 * __hal_append_lro: Appends new frame to existing LRO session. 1242 * @ip: ip header. 1243 * @tcp: IN tcp header, OUT tcp payload. 1244 * @seg_len: tcp payload length. 1245 * @lro: lro pointer 1246 * @hldev: Hal context. 1247 */ 1248 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1249 __hal_append_lro(iplro_t *ip, 1250 tcplro_t **tcp, 1251 u32 *seg_len, 1252 lro_t *lro, 1253 xge_hal_device_t *hldev) 1254 { 1255 (void) __hal_collapse_ip_hdr(ip, *tcp, lro, hldev); 1256 (void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev); 1257 // Update mbuf chain will be done in ll driver. 1258 // xge_hal_accumulate_large_rx on success of appending new frame to 1259 // lro will return to ll driver tcpdata pointer, and tcp payload length. 1260 // along with return code lro frame appended. 1261 1262 lro->sg_num++; 1263 *seg_len = __hal_tcp_seg_len(ip, *tcp); 1264 *tcp = (tcplro_t *)((char *)*tcp + (((*tcp)->doff_res)>>2)); 1265 1266 return XGE_HAL_OK; 1267 1268 } 1269 1270 /** 1271 * __xge_hal_accumulate_large_rx: LRO a given frame 1272 * frames 1273 * @ring: rx ring number 1274 * @eth_hdr: ethernet header. 1275 * @ip_hdr: ip header (optional) 1276 * @tcp: tcp header. 1277 * @seglen: packet length. 1278 * @p_lro: lro pointer. 1279 * @ext_info: descriptor info, see xge_hal_dtr_info_t{}. 1280 * @hldev: HAL device. 1281 * @lro_end3: for lro_end3 output 1282 * 1283 * LRO the newly received frame, i.e. attach it (if possible) to the 1284 * already accumulated (i.e., already LRO-ed) received frames (if any), 1285 * to form one super-sized frame for the subsequent processing 1286 * by the stack. 1287 */ 1288 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1289 xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp, 1290 u32 *seglen, lro_t **p_lro, 1291 xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, 1292 lro_t **lro_end3) 1293 { 1294 iplro_t *ip = (iplro_t *)ip_hdr; 1295 xge_hal_status_e ret; 1296 lro_t *lro; 1297 1298 xge_debug_ring(XGE_TRACE, "Entered accumu lro. "); 1299 if (XGE_HAL_OK != __hal_lro_capable(eth_hdr, &ip, (tcplro_t **)tcp, 1300 ext_info)) 1301 return XGE_HAL_INF_LRO_UNCAPABLE; 1302 1303 /* 1304 * This function shall get matching LRO or else 1305 * create one and return it 1306 */ 1307 ret = __hal_get_lro_session(eth_hdr, ip, (tcplro_t *)*tcp, 1308 p_lro, ext_info, hldev, &hldev->lro_desc[ring], 1309 lro_end3); 1310 xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret); 1311 lro = *p_lro; 1312 if (XGE_HAL_INF_LRO_CONT == ret) { 1313 if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip, 1314 (tcplro_t *)*tcp, lro, hldev)) { 1315 (void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen, 1316 lro, hldev); 1317 hldev->stats.sw_dev_info_stats.tot_frms_lroised++; 1318 1319 if (lro->sg_num >= hldev->config.lro_sg_size) { 1320 hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++; 1321 ret = XGE_HAL_INF_LRO_END_1; 1322 } 1323 1324 } else ret = XGE_HAL_INF_LRO_END_2; 1325 } 1326 1327 /* 1328 * Since its time to flush, 1329 * update ip header so that it can be sent up 1330 */ 1331 if ((ret == XGE_HAL_INF_LRO_END_1) || 1332 (ret == XGE_HAL_INF_LRO_END_2) || 1333 (ret == XGE_HAL_INF_LRO_END_3)) { 1334 lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length); 1335 lro->ip_hdr->check = xge_os_htons(0); 1336 lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), 1337 (lro->ip_hdr->version_ihl & 0x0F)); 1338 lro->tcp_hdr->ack_seq = lro->tcp_ack_num; 1339 } 1340 1341 return (ret); 1342 } 1343 1344 /** 1345 * xge_hal_accumulate_large_rx: LRO a given frame 1346 * frames 1347 * @buffer: Ethernet frame. 1348 * @tcp: tcp header. 1349 * @seglen: packet length. 1350 * @p_lro: lro pointer. 1351 * @ext_info: descriptor info, see xge_hal_dtr_info_t{}. 1352 * @hldev: HAL device. 1353 * @lro_end3: for lro_end3 output 1354 * 1355 * LRO the newly received frame, i.e. attach it (if possible) to the 1356 * already accumulated (i.e., already LRO-ed) received frames (if any), 1357 * to form one super-sized frame for the subsequent processing 1358 * by the stack. 1359 */ 1360 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1361 xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen, 1362 lro_t **p_lro, xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev, 1363 lro_t **lro_end3) 1364 { 1365 int ring = 0; 1366 return xge_hal_lro_process_rx(ring, buffer, NULL, tcp, seglen, p_lro, 1367 ext_info, hldev, lro_end3); 1368 } 1369 1370 /** 1371 * xge_hal_lro_close_session: Close LRO session 1372 * @lro: LRO Session. 1373 * @hldev: HAL Context. 1374 */ 1375 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void 1376 xge_hal_lro_close_session (lro_t *lro) 1377 { 1378 lro->in_use = 0; 1379 } 1380 1381 /** 1382 * xge_hal_lro_next_session: Returns next LRO session in the list or NULL 1383 * if none exists. 1384 * @hldev: HAL Context. 1385 * @ring: rx ring number. 1386 */ 1387 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * 1388 xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring) 1389 { 1390 xge_hal_lro_desc_t *ring_lro = &hldev->lro_desc[ring]; 1391 int i; 1392 int start_idx = ring_lro->lro_next_idx; 1393 1394 for(i = start_idx; i < XGE_HAL_LRO_MAX_BUCKETS; i++) { 1395 lro_t *lro = &ring_lro->lro_pool[i]; 1396 1397 if (!lro->in_use) 1398 continue; 1399 1400 lro->ip_hdr->tot_len = xge_os_htons(lro->total_length); 1401 lro->ip_hdr->check = xge_os_htons(0); 1402 lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), 1403 (lro->ip_hdr->version_ihl & 0x0F)); 1404 ring_lro->lro_next_idx = i + 1; 1405 return lro; 1406 } 1407 1408 ring_lro->lro_next_idx = 0; 1409 return NULL; 1410 1411 } 1412 1413 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * 1414 xge_hal_lro_get_next_session(xge_hal_device_t *hldev) 1415 { 1416 int ring = 0; /* assume default ring=0 */ 1417 return xge_hal_lro_next_session(hldev, ring); 1418 } 1419 #endif 1420