1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2002-2005 Neterion, Inc. 24 * All right Reserved. 25 * 26 * FileName : xgehal-device-fp.c 27 * 28 * Description: HAL device object functionality (fast path) 29 * 30 * Created: 10 June 2004 31 */ 32 33 #ifdef XGE_DEBUG_FP 34 #include "xgehal-device.h" 35 #endif 36 37 #include "xgehal-ring.h" 38 #include "xgehal-fifo.h" 39 40 /** 41 * xge_hal_device_bar0 - Get BAR0 mapped address. 42 * @hldev: HAL device handle. 43 * 44 * Returns: BAR0 address of the specified device. 45 */ 46 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 47 xge_hal_device_bar0(xge_hal_device_t *hldev) 48 { 49 return hldev->bar0; 50 } 51 52 /** 53 * xge_hal_device_isrbar0 - Get BAR0 mapped address. 54 * @hldev: HAL device handle. 55 * 56 * Returns: BAR0 address of the specified device. 57 */ 58 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 59 xge_hal_device_isrbar0(xge_hal_device_t *hldev) 60 { 61 return hldev->isrbar0; 62 } 63 64 /** 65 * xge_hal_device_bar1 - Get BAR1 mapped address. 66 * @hldev: HAL device handle. 67 * 68 * Returns: BAR1 address of the specified device. 69 */ 70 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE char * 71 xge_hal_device_bar1(xge_hal_device_t *hldev) 72 { 73 return hldev->bar1; 74 } 75 76 /** 77 * xge_hal_device_bar0_set - Set BAR0 mapped address. 78 * @hldev: HAL device handle. 79 * @bar0: BAR0 mapped address. 80 * * Set BAR0 address in the HAL device object. 81 */ 82 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 83 xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0) 84 { 85 xge_assert(bar0); 86 hldev->bar0 = bar0; 87 } 88 89 /** 90 * xge_hal_device_isrbar0_set - Set BAR0 mapped address. 91 * @hldev: HAL device handle. 92 * @isrbar0: BAR0 mapped address. 93 * * Set BAR0 address in the HAL device object. 94 */ 95 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 96 xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0) 97 { 98 xge_assert(isrbar0); 99 hldev->isrbar0 = isrbar0; 100 } 101 102 /** 103 * xge_hal_device_bar1_set - Set BAR1 mapped address. 104 * @hldev: HAL device handle. 105 * @channelh: Channel handle. 106 * @bar1: BAR1 mapped address. 107 * 108 * Set BAR1 address for the given channel. 109 */ 110 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 111 xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh, 112 char *bar1) 113 { 114 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 115 116 xge_assert(bar1); 117 xge_assert(fifo); 118 119 /* Initializing the BAR1 address as the start of 120 * the FIFO queue pointer and as a location of FIFO control 121 * word. */ 122 fifo->hw_pair = 123 (xge_hal_fifo_hw_pair_t *) (bar1 + 124 (fifo->channel.post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); 125 hldev->bar1 = bar1; 126 } 127 128 129 /** 130 * xge_hal_device_rev - Get Device revision number. 131 * @hldev: HAL device handle. 132 * 133 * Returns: Device revision number 134 */ 135 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE int 136 xge_hal_device_rev(xge_hal_device_t *hldev) 137 { 138 return hldev->revision; 139 } 140 141 142 /** 143 * xge_hal_device_begin_irq - Begin IRQ processing. 144 * @hldev: HAL device handle. 145 * @reason: "Reason" for the interrupt, the value of Xframe's 146 * general_int_status register. 147 * 148 * The function performs two actions, It first checks whether (shared IRQ) the 149 * interrupt was raised by the device. Next, it masks the device interrupts. 150 * 151 * Note: 152 * xge_hal_device_begin_irq() does not flush MMIO writes through the 153 * bridge. Therefore, two back-to-back interrupts are potentially possible. 154 * It is the responsibility of the ULD to make sure that only one 155 * xge_hal_device_continue_irq() runs at a time. 156 * 157 * Returns: 0, if the interrupt is not "ours" (note that in this case the 158 * device remain enabled). 159 * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter 160 * status. 161 * See also: xge_hal_device_handle_irq() 162 */ 163 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 164 xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason) 165 { 166 u64 val64; 167 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 168 169 hldev->stats.sw_dev_info_stats.total_intr_cnt++; 170 171 val64 = xge_os_pio_mem_read64(hldev->pdev, 172 hldev->regh0, &isrbar0->general_int_status); 173 if (xge_os_unlikely(!val64)) { 174 /* not Xframe interrupt */ 175 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 176 *reason = 0; 177 return XGE_HAL_ERR_WRONG_IRQ; 178 } 179 180 if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) { 181 u64 adapter_status = 182 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 183 &isrbar0->adapter_status); 184 if (adapter_status == XGE_HAL_ALL_FOXES) { 185 (void) xge_queue_produce(hldev->queueh, 186 XGE_HAL_EVENT_SLOT_FREEZE, 187 hldev, 188 1, /* critical: slot freeze */ 189 sizeof(u64), 190 (void*)&adapter_status); 191 *reason = 0; 192 return XGE_HAL_ERR_CRITICAL; 193 } 194 } 195 196 *reason = val64; 197 198 /* separate fast path, i.e. no errors */ 199 if (val64 & XGE_HAL_GEN_INTR_RXTRAFFIC) { 200 hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++; 201 return XGE_HAL_OK; 202 } 203 if (val64 & XGE_HAL_GEN_INTR_TXTRAFFIC) { 204 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++; 205 return XGE_HAL_OK; 206 } 207 208 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXPIC)) { 209 xge_hal_status_e status; 210 status = __hal_device_handle_txpic(hldev, val64); 211 if (status != XGE_HAL_OK) { 212 return status; 213 } 214 } 215 216 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXDMA)) { 217 xge_hal_status_e status; 218 status = __hal_device_handle_txdma(hldev, val64); 219 if (status != XGE_HAL_OK) { 220 return status; 221 } 222 } 223 224 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXMAC)) { 225 xge_hal_status_e status; 226 status = __hal_device_handle_txmac(hldev, val64); 227 if (status != XGE_HAL_OK) { 228 return status; 229 } 230 } 231 232 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_TXXGXS)) { 233 xge_hal_status_e status; 234 status = __hal_device_handle_txxgxs(hldev, val64); 235 if (status != XGE_HAL_OK) { 236 return status; 237 } 238 } 239 240 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXPIC)) { 241 xge_hal_status_e status; 242 status = __hal_device_handle_rxpic(hldev, val64); 243 if (status != XGE_HAL_OK) { 244 return status; 245 } 246 } 247 248 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXDMA)) { 249 xge_hal_status_e status; 250 status = __hal_device_handle_rxdma(hldev, val64); 251 if (status != XGE_HAL_OK) { 252 return status; 253 } 254 } 255 256 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXMAC)) { 257 xge_hal_status_e status; 258 status = __hal_device_handle_rxmac(hldev, val64); 259 if (status != XGE_HAL_OK) { 260 return status; 261 } 262 } 263 264 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_RXXGXS)) { 265 xge_hal_status_e status; 266 status = __hal_device_handle_rxxgxs(hldev, val64); 267 if (status != XGE_HAL_OK) { 268 return status; 269 } 270 } 271 272 if (xge_os_unlikely(val64 & XGE_HAL_GEN_INTR_MC)) { 273 xge_hal_status_e status; 274 status = __hal_device_handle_mc(hldev, val64); 275 if (status != XGE_HAL_OK) { 276 return status; 277 } 278 } 279 280 return XGE_HAL_OK; 281 } 282 283 /** 284 * xge_hal_device_clear_rx - Acknowledge (that is, clear) the 285 * condition that has caused the RX interrupt. 286 * @hldev: HAL device handle. 287 * 288 * Acknowledge (that is, clear) the condition that has caused 289 * the Rx interrupt. 290 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), 291 * xge_hal_device_clear_tx(), xge_hal_device_mask_rx(). 292 */ 293 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 294 xge_hal_device_clear_rx(xge_hal_device_t *hldev) 295 { 296 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 297 298 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 299 0xFFFFFFFFFFFFFFFFULL, 300 &isrbar0->rx_traffic_int); 301 } 302 303 /** 304 * xge_hal_device_clear_tx - Acknowledge (that is, clear) the 305 * condition that has caused the TX interrupt. 306 * @hldev: HAL device handle. 307 * 308 * Acknowledge (that is, clear) the condition that has caused 309 * the Tx interrupt. 310 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(), 311 * xge_hal_device_clear_rx(), xge_hal_device_mask_tx(). 312 */ 313 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 314 xge_hal_device_clear_tx(xge_hal_device_t *hldev) 315 { 316 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 317 318 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 319 0xFFFFFFFFFFFFFFFFULL, 320 &isrbar0->tx_traffic_int); 321 } 322 323 /** 324 * xge_hal_device_poll_rx_channels - Poll Rx channels for completed 325 * descriptors and process the same. 326 * @hldev: HAL device handle. 327 * 328 * The function polls the Rx channels for the completed descriptors and calls 329 * the upper-layer driver (ULD) via supplied completion callback. 330 * 331 * Returns: XGE_HAL_OK, if the polling is completed successful. 332 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 333 * descriptors available which are yet to be processed. 334 * 335 * See also: xge_hal_device_poll_tx_channels(), xge_hal_device_continue_irq(). 336 */ 337 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 338 xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev) 339 { 340 xge_list_t *item; 341 xge_hal_channel_t *channel; 342 xge_hal_dtr_h first_dtrh; 343 u8 t_code; 344 345 /* for each opened rx channel */ 346 xge_list_for_each(item, &hldev->ring_channels) { 347 channel = xge_container_of(item, 348 xge_hal_channel_t, item); 349 350 ((xge_hal_ring_t*)channel)->cmpl_cnt = 0; 351 if (xge_hal_ring_dtr_next_completed (channel, &first_dtrh, 352 &t_code) == XGE_HAL_OK) { 353 if (channel->callback(channel, first_dtrh, 354 t_code, channel->userdata) != XGE_HAL_OK) { 355 return XGE_HAL_COMPLETIONS_REMAIN; 356 } 357 } 358 } 359 360 return XGE_HAL_OK; 361 } 362 363 /** 364 * xge_hal_device_poll_tx_channels - Poll Tx channels for completed 365 * descriptors and process the same. 366 * @hldev: HAL device handle. 367 * 368 * The function polls the Tx channels for the completed descriptors and calls 369 * the upper-layer driver (ULD) via supplied completion callback. 370 * 371 * Returns: XGE_HAL_OK, if the polling is completed successful. 372 * XGE_HAL_COMPLETIONS_REMAIN: There are still more completed 373 * descriptors available which are yet to be processed. 374 * 375 * See also: xge_hal_device_poll_rx_channels(), xge_hal_device_continue_irq(). 376 */ 377 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 378 xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev) 379 { 380 xge_list_t *item; 381 xge_hal_channel_t *channel; 382 xge_hal_dtr_h first_dtrh; 383 u8 t_code; 384 385 /* for each opened tx channel */ 386 xge_list_for_each(item, &hldev->fifo_channels) { 387 channel = xge_container_of(item, 388 xge_hal_channel_t, item); 389 390 if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh, 391 &t_code) == XGE_HAL_OK) { 392 if (channel->callback(channel, first_dtrh, 393 t_code, channel->userdata) != XGE_HAL_OK) { 394 return XGE_HAL_COMPLETIONS_REMAIN; 395 } 396 } 397 } 398 399 return XGE_HAL_OK; 400 } 401 402 /** 403 * xge_hal_device_mask_tx - Mask Tx interrupts. 404 * @hldev: HAL device handle. 405 * 406 * Mask Tx device interrupts. 407 * 408 * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(), 409 * xge_hal_device_clear_tx(). 410 */ 411 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 412 xge_hal_device_mask_tx(xge_hal_device_t *hldev) 413 { 414 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 415 416 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 417 0xFFFFFFFFFFFFFFFFULL, 418 &isrbar0->tx_traffic_mask); 419 } 420 421 /** 422 * xge_hal_device_mask_rx - Mask Rx interrupts. 423 * @hldev: HAL device handle. 424 * 425 * Mask Rx device interrupts. 426 * 427 * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(), 428 * xge_hal_device_clear_rx(). 429 */ 430 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 431 xge_hal_device_mask_rx(xge_hal_device_t *hldev) 432 { 433 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 434 435 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 436 0xFFFFFFFFFFFFFFFFULL, 437 &isrbar0->rx_traffic_mask); 438 } 439 440 /** 441 * xge_hal_device_mask_all - Mask all device interrupts. 442 * @hldev: HAL device handle. 443 * 444 * Mask all device interrupts. 445 * 446 * See also: xge_hal_device_unmask_all() 447 */ 448 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 449 xge_hal_device_mask_all(xge_hal_device_t *hldev) 450 { 451 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 452 453 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 454 0xFFFFFFFFFFFFFFFFULL, 455 &isrbar0->general_int_mask); 456 } 457 458 /** 459 * xge_hal_device_unmask_tx - Unmask Tx interrupts. 460 * @hldev: HAL device handle. 461 * 462 * Unmask Tx device interrupts. 463 * 464 * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx(). 465 */ 466 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 467 xge_hal_device_unmask_tx(xge_hal_device_t *hldev) 468 { 469 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 470 471 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 472 0x0ULL, 473 &isrbar0->tx_traffic_mask); 474 } 475 476 /** 477 * xge_hal_device_unmask_rx - Unmask Rx interrupts. 478 * @hldev: HAL device handle. 479 * 480 * Unmask Rx device interrupts. 481 * 482 * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx(). 483 */ 484 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 485 xge_hal_device_unmask_rx(xge_hal_device_t *hldev) 486 { 487 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 488 489 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 490 0x0ULL, 491 &isrbar0->rx_traffic_mask); 492 } 493 494 /** 495 * xge_hal_device_unmask_all - Unmask all device interrupts. 496 * @hldev: HAL device handle. 497 * 498 * Unmask all device interrupts. 499 * 500 * See also: xge_hal_device_mask_all() 501 */ 502 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void 503 xge_hal_device_unmask_all(xge_hal_device_t *hldev) 504 { 505 xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)hldev->isrbar0; 506 507 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 508 0x0ULL, 509 &isrbar0->general_int_mask); 510 } 511 512 513 /** 514 * xge_hal_device_continue_irq - Continue handling IRQ: process all 515 * completed descriptors. 516 * @hldev: HAL device handle. 517 * 518 * Process completed descriptors and unmask the device interrupts. 519 * 520 * The xge_hal_device_continue_irq() walks all open channels 521 * and calls upper-layer driver (ULD) via supplied completion 522 * callback. Note that the completion callback is specified at channel open 523 * time, see xge_hal_channel_open(). 524 * 525 * Note that the xge_hal_device_continue_irq is part of the _fast_ path. 526 * To optimize the processing, the function does _not_ check for 527 * errors and alarms. 528 * 529 * The latter is done in a polling fashion, via xge_hal_device_poll(). 530 * 531 * Returns: XGE_HAL_OK. 532 * 533 * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(), 534 * xge_hal_ring_dtr_next_completed(), 535 * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}. 536 */ 537 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 538 xge_hal_device_continue_irq(xge_hal_device_t *hldev) 539 { 540 xge_list_t *item; 541 xge_hal_channel_t *channel; 542 xge_hal_dtr_h first_dtrh; 543 int got_rx = 0, got_tx = 0; 544 unsigned int isr_polling_cnt = (unsigned int) hldev->config.isr_polling_cnt; 545 u8 t_code; 546 547 _try_again: 548 549 /* for each opened rx channel */ 550 xge_list_for_each(item, &hldev->ring_channels) { 551 channel = xge_container_of(item, 552 xge_hal_channel_t, item); 553 554 ((xge_hal_ring_t*)channel)->cmpl_cnt = 0; 555 if (xge_hal_ring_dtr_next_completed (channel, &first_dtrh, 556 &t_code) == XGE_HAL_OK) { 557 channel->callback(channel, first_dtrh, 558 t_code, channel->userdata); 559 got_rx++; 560 } 561 562 if (hldev->terminating) 563 return XGE_HAL_OK; 564 565 } 566 567 /* Note. 568 * All interrupts are masked by general_int_status at this point, 569 * i.e. no new interrupts going to be produced by the adapter. 570 * We intentionally do not mask rx/tx interrupts right after 571 * walking to continue processing new descriptors on next 572 * interation if configured. */ 573 574 /* for each opened tx channel */ 575 xge_list_for_each(item, &hldev->fifo_channels) { 576 channel = xge_container_of(item, 577 xge_hal_channel_t, item); 578 579 if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh, 580 &t_code) == XGE_HAL_OK) { 581 channel->callback(channel, first_dtrh, 582 t_code, channel->userdata); 583 got_tx++; 584 } 585 586 if (hldev->terminating) 587 return XGE_HAL_OK; 588 589 } 590 591 if (got_rx || got_tx) { 592 xge_hal_pci_bar0_t *isrbar0 = 593 (xge_hal_pci_bar0_t *)hldev->isrbar0; 594 got_tx = got_rx = 0; 595 if (isr_polling_cnt--) 596 goto _try_again; 597 /* to avoid interrupt loss, we force bridge to flush cached 598 * writes, in simple case OSDEP needs to just readl(), some 599 * OSes (e.g. M$ Windows) has special bridge flush API */ 600 (void) xge_os_flush_bridge(hldev->pdev, hldev->regh0, 601 &isrbar0->general_int_status); 602 } else if (isr_polling_cnt == hldev->config.isr_polling_cnt) { 603 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 604 } 605 606 return XGE_HAL_OK; 607 } 608 609 /** 610 * xge_hal_device_handle_irq - Handle device IRQ. 611 * @hldev: HAL device handle. 612 * 613 * Perform the complete handling of the line interrupt. The function 614 * performs two calls. 615 * First it uses xge_hal_device_begin_irq() to check the reason for 616 * the interrupt and mask the device interrupts. 617 * Second, it calls xge_hal_device_continue_irq() to process all 618 * completed descriptors and re-enable the interrupts. 619 * 620 * Returns: XGE_HAL_OK - success; 621 * XGE_HAL_ERR_WRONG_IRQ - (shared) IRQ produced by other device. 622 * 623 * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(). 624 */ 625 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE xge_hal_status_e 626 xge_hal_device_handle_irq(xge_hal_device_t *hldev) 627 { 628 u64 reason; 629 xge_hal_status_e status; 630 631 xge_hal_device_mask_all(hldev); 632 633 status = xge_hal_device_begin_irq(hldev, &reason); 634 if (status != XGE_HAL_OK) { 635 xge_hal_device_unmask_all(hldev); 636 return status; 637 } 638 639 if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) { 640 xge_hal_device_clear_rx(hldev); 641 } 642 643 status = xge_hal_device_continue_irq(hldev); 644 645 xge_hal_device_clear_tx(hldev); 646 647 xge_hal_device_unmask_all(hldev); 648 649 return status; 650 } 651 652 #if defined(XGE_HAL_CONFIG_LRO) 653 654 /* 655 * __hal_tcp_seg_len: Find the tcp seg len. 656 * @ip: ip header. 657 * @tcp: tcp header. 658 * returns: Tcp seg length. 659 */ 660 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16 661 __hal_tcp_seg_len(iplro_t *ip, tcplro_t *tcp) 662 { 663 u16 ret; 664 665 ret = (xge_os_ntohs(ip->tot_len) - 666 ((ip->version_ihl & 0x0F)<<2) - 667 ((tcp->doff_res)>>2)); 668 return (ret); 669 } 670 671 /* 672 * __hal_ip_lro_capable: Finds whether ip is lro capable. 673 * @ip: ip header. 674 * @ext_info: descriptor info. 675 */ 676 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 677 __hal_ip_lro_capable(iplro_t *ip, 678 xge_hal_dtr_info_t *ext_info) 679 { 680 681 #ifdef XGE_LL_DEBUG_DUMP_PKT 682 { 683 u16 i; 684 u8 ch, *iph = (u8 *)ip; 685 686 xge_debug_ring(XGE_TRACE, "Dump Ip:" ); 687 for (i =0; i < 40; i++) { 688 ch = ntohs(*((u8 *)(iph + i)) ); 689 printf("i:%d %02x, ",i,ch); 690 } 691 } 692 #endif 693 694 if (ip->version_ihl != IP_FAST_PATH_HDR_MASK) { 695 xge_debug_ring(XGE_ERR, "iphdr !=45 :%d",ip->version_ihl); 696 return XGE_HAL_FAIL; 697 } 698 699 if (ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) { 700 xge_debug_ring(XGE_ERR, "IP fragmented"); 701 return XGE_HAL_FAIL; 702 } 703 704 return XGE_HAL_OK; 705 } 706 707 /* 708 * __hal_tcp_lro_capable: Finds whether tcp is lro capable. 709 * @ip: ip header. 710 * @tcp: tcp header. 711 */ 712 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 713 __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp) 714 { 715 #ifdef XGE_LL_DEBUG_DUMP_PKT 716 { 717 u8 ch; 718 u16 i; 719 720 xge_debug_ring(XGE_TRACE, "Dump Tcp:" ); 721 for (i =0; i < 20; i++) { 722 ch = ntohs(*((u8 *)((u8 *)tcp + i)) ); 723 xge_os_printf("i:%d %02x, ",i,ch); 724 } 725 } 726 #endif 727 if ((TCP_FAST_PATH_HDR_MASK1 != tcp->doff_res) || 728 ((TCP_FAST_PATH_HDR_MASK2 != tcp->ctrl) && 729 (TCP_FAST_PATH_HDR_MASK3 != tcp->ctrl))) { 730 xge_debug_ring(XGE_ERR, "tcphdr not fastpth %02x %02x \n", tcp->doff_res, tcp->ctrl); 731 return XGE_HAL_FAIL; 732 } 733 734 return XGE_HAL_OK; 735 } 736 737 /* 738 * __hal_lro_capable: Finds whether frame is lro capable. 739 * @buffer: Ethernet frame. 740 * @ip: ip frame. 741 * @tcp: tcp frame. 742 * @ext_info: Descriptor info. 743 * @hldev: Hal context. 744 */ 745 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 746 __hal_lro_capable( u8 *buffer, 747 iplro_t **ip, 748 tcplro_t **tcp, 749 xge_hal_dtr_info_t *ext_info, 750 xge_hal_device_t *hldev) 751 { 752 u8 ip_off, ip_length; 753 754 if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_TCP)) { 755 xge_debug_ring(XGE_ERR, "Cant do lro %d", ext_info->proto); 756 return XGE_HAL_FAIL; 757 } 758 #ifdef XGE_LL_DEBUG_DUMP_PKT 759 { 760 u8 ch; 761 u16 i; 762 763 xge_os_printf("Dump Eth:" ); 764 for (i =0; i < 60; i++) { 765 ch = ntohs(*((u8 *)(buffer + i)) ); 766 xge_os_printf("i:%d %02x, ",i,ch); 767 } 768 } 769 #endif 770 771 switch (ext_info->frame) { 772 case XGE_HAL_FRAME_TYPE_DIX: 773 ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE; 774 break; 775 case XGE_HAL_FRAME_TYPE_LLC: 776 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 777 XGE_HAL_HEADER_802_2_SIZE); 778 break; 779 case XGE_HAL_FRAME_TYPE_SNAP: 780 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + 781 XGE_HAL_HEADER_SNAP_SIZE); 782 break; 783 default: // XGE_HAL_FRAME_TYPE_IPX, etc. 784 return XGE_HAL_FAIL; 785 } 786 787 788 if (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED) { 789 ip_off += XGE_HAL_HEADER_VLAN_SIZE; 790 } 791 792 /* Grab ip, tcp headers */ 793 *ip = (iplro_t *)((char*)buffer + ip_off); 794 795 ip_length = (u8)((*ip)->version_ihl & 0x0F); 796 ip_length = ip_length <<2; 797 *tcp = (tcplro_t *)((unsigned long)*ip + ip_length); 798 799 xge_debug_ring(XGE_TRACE, "ip_length:%d ip:%llx tcp:%llx", (int)ip_length, 800 (u64)(unsigned long)*ip, (u64)(unsigned long)*tcp); 801 802 return XGE_HAL_OK; 803 804 } 805 806 /** 807 * xge_hal_lro_free - Used to recycle lro memory. 808 * @lro: LRO memory. 809 * @hldev: Hal device structure. 810 * 811 */ 812 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void 813 xge_hal_lro_free(lro_t *lro, xge_hal_device_t *hldev) 814 { 815 lro->in_use = 0; 816 #if 1 // For debug. 817 xge_os_memzero(lro, sizeof(lro_t)); 818 #endif 819 } 820 821 /* 822 * __hal_lro_malloc - Gets LRO from free memory pool. 823 * @hldev: Hal device structure. 824 */ 825 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * 826 __hal_lro_malloc(xge_hal_device_t *hldev) 827 { 828 hldev->g_lro_pool->in_use = 1; 829 return (hldev->g_lro_pool); 830 } 831 832 833 /* 834 * __hal_get_lro_session: Gets matching LRO session or creates one. 835 * @buffer: Ethernet frame. 836 * @ip: ip header. 837 * @tcp: tcp header. 838 * @lro: lro pointer 839 * @ext_info: Descriptor info. 840 * @hldev: Hal context. 841 * Note: Current implementation will contain only one LRO session. 842 * Global lro will not exist once more LRO sessions are permitted. 843 */ 844 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 845 __hal_get_lro_session (u8 *buffer, 846 iplro_t *ip, 847 tcplro_t *tcp, 848 lro_t **lro, 849 xge_hal_dtr_info_t *ext_info, 850 xge_hal_device_t *hldev) 851 { 852 xge_hal_status_e ret; 853 lro_t *g_lro; 854 int i, free_slot = -1; 855 856 /*********************************************************** 857 Search in the pool of LROs for the session that matches the incoming 858 frame. 859 ************************************************************/ 860 *lro = g_lro = NULL; 861 for (i = 0; i < XGE_HAL_MAX_LRO_SESSIONS; i++) { 862 g_lro = &hldev->g_lro_pool[i]; 863 864 if (!g_lro->in_use) { 865 if (free_slot == -1) 866 free_slot = i; 867 continue; 868 } 869 870 /* Match Source address field */ 871 if ((g_lro->ip_hdr->saddr != ip->saddr)) 872 continue; 873 874 /* Match Destination address field */ 875 if ((g_lro->ip_hdr->daddr != ip->daddr)) 876 continue; 877 878 879 /* Match Source Port field */ 880 if ((g_lro->tcp_hdr->source != tcp->source)) 881 continue; 882 883 884 /* Match Destination Port field */ 885 if ((g_lro->tcp_hdr->dest != tcp->dest)) 886 continue; 887 888 *lro = g_lro; 889 890 if (g_lro->tcp_next_seq_num != xge_os_ntohl(tcp->seq)) { 891 xge_debug_ring(XGE_ERR, "**retransmit **" 892 "found***"); 893 return XGE_HAL_INF_LRO_END_2; 894 } 895 896 if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info)) 897 return XGE_HAL_INF_LRO_END_2; 898 899 if (XGE_HAL_OK != __hal_tcp_lro_capable(ip, tcp)) 900 return XGE_HAL_INF_LRO_END_2; 901 902 /* 903 * The frame is good, in-sequence, can be LRO-ed; 904 * take its (latest) ACK - unless it is a dupack. 905 * Note: to be exact need to check window size as well.. 906 */ 907 if (g_lro->tcp_ack_num == tcp->ack_seq && 908 g_lro->tcp_seq_num == tcp->seq) 909 return XGE_HAL_INF_LRO_END_2; 910 911 g_lro->tcp_seq_num = tcp->seq; 912 g_lro->tcp_ack_num = tcp->ack_seq; 913 g_lro->frags_len += __hal_tcp_seg_len(ip, tcp); 914 915 return XGE_HAL_INF_LRO_CONT; 916 } 917 918 if (free_slot == -1) 919 return XGE_HAL_INF_LRO_UNCAPABLE; 920 921 g_lro = &hldev->g_lro_pool[free_slot]; 922 if (XGE_HAL_FAIL == __hal_ip_lro_capable(ip, ext_info)) 923 return XGE_HAL_INF_LRO_UNCAPABLE; 924 925 if (XGE_HAL_FAIL == __hal_tcp_lro_capable(ip, tcp)) 926 return XGE_HAL_INF_LRO_UNCAPABLE; 927 928 *lro = g_lro; 929 xge_debug_ring(XGE_TRACE, "Creating lro session."); 930 931 g_lro->in_use = 1; 932 g_lro->ll_hdr = buffer; 933 g_lro->ip_hdr = ip; 934 g_lro->tcp_hdr = tcp; 935 g_lro->tcp_next_seq_num = __hal_tcp_seg_len(ip, tcp) + 936 xge_os_ntohl(tcp->seq); 937 g_lro->tcp_seq_num = tcp->seq; 938 g_lro->tcp_ack_num = tcp->ack_seq; 939 g_lro->sg_num = 1; 940 g_lro->total_length = xge_os_ntohs(ip->tot_len); 941 g_lro->frags_len = 0; 942 hldev->stats.sw_dev_info_stats.tot_frms_lroised++; 943 hldev->stats.sw_dev_info_stats.tot_lro_sessions++; 944 945 return XGE_HAL_INF_LRO_BEGIN; 946 } 947 948 /* 949 * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal. 950 * @ip: ip header. 951 * @tcp: tcp header. 952 * @lro: lro pointer 953 * @hldev: Hal context. 954 */ 955 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 956 __hal_lro_under_optimal_thresh (iplro_t *ip, 957 tcplro_t *tcp, 958 lro_t *lro, 959 xge_hal_device_t *hldev) 960 { 961 if (!lro) return XGE_HAL_FAIL; 962 963 if ((lro->total_length + __hal_tcp_seg_len(ip, tcp) ) > 964 CONFIG_LRO_MAX_ACCUM_LENGTH) { 965 xge_debug_ring(XGE_TRACE, "Max accumulation length exceeded: max length %d \n", CONFIG_LRO_MAX_ACCUM_LENGTH); 966 return XGE_HAL_FAIL; 967 } 968 969 if (lro->sg_num == CONFIG_LRO_MAX_SG_NUM) { 970 xge_debug_ring(XGE_TRACE, "Max sg count exceeded: max sg %d \n", CONFIG_LRO_MAX_SG_NUM); 971 return XGE_HAL_FAIL; 972 } 973 974 return XGE_HAL_OK; 975 } 976 977 /* 978 * __hal_collapse_ip_hdr: Collapses ip header. 979 * @ip: ip header. 980 * @tcp: tcp header. 981 * @lro: lro pointer 982 * @hldev: Hal context. 983 */ 984 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 985 __hal_collapse_ip_hdr ( iplro_t *ip, 986 tcplro_t *tcp, 987 lro_t *lro, 988 xge_hal_device_t *hldev) 989 { 990 991 lro->total_length += __hal_tcp_seg_len(ip, tcp); 992 993 /* May be we have to handle time stamps or more options */ 994 995 return XGE_HAL_OK; 996 997 } 998 999 /* 1000 * __hal_collapse_tcp_hdr: Collapses tcp header. 1001 * @ip: ip header. 1002 * @tcp: tcp header. 1003 * @lro: lro pointer 1004 * @hldev: Hal context. 1005 */ 1006 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1007 __hal_collapse_tcp_hdr ( iplro_t *ip, 1008 tcplro_t *tcp, 1009 lro_t *lro, 1010 xge_hal_device_t *hldev) 1011 { 1012 1013 lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp); 1014 return XGE_HAL_OK; 1015 1016 } 1017 1018 /* 1019 * __hal_append_lro: Appends new frame to existing LRO session. 1020 * @ip: ip header. 1021 * @tcp: tcp header. 1022 * @seg_len: tcp payload length. 1023 * @lro: lro pointer 1024 * @hldev: Hal context. 1025 */ 1026 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1027 __hal_append_lro(iplro_t *ip, 1028 tcplro_t **tcp, 1029 u32 *seg_len, 1030 lro_t *lro, 1031 xge_hal_device_t *hldev) 1032 { 1033 __hal_collapse_ip_hdr(ip, *tcp, lro, hldev); 1034 __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev); 1035 // Update mbuf chain will be done in ll driver. 1036 // xge_hal_accumulate_large_rx on success of appending new frame to 1037 // lro will return to ll driver tcpdata pointer, and tcp payload length. 1038 // along with return code lro frame appended. 1039 1040 lro->sg_num++; 1041 *seg_len = __hal_tcp_seg_len(ip, *tcp); 1042 *tcp = (tcplro_t *)((unsigned long)*tcp + (((*tcp)->doff_res)>>2)); 1043 1044 return XGE_HAL_OK; 1045 1046 } 1047 1048 /** 1049 * xge_hal_accumulate_large_rx: LRO a given frame 1050 * frames 1051 * @buffer: Ethernet frame. 1052 * @tcp: tcp header. 1053 * @seglen: packet length. 1054 * @p_lro: lro pointer. 1055 * @ext_info: descriptor info, see xge_hal_dtr_info_t{}. 1056 * @hldev: HAL device. 1057 * 1058 * LRO the newly received frame, i.e. attach it (if possible) to the 1059 * already accumulated (i.e., already LRO-ed) received frames (if any), 1060 * to form one super-sized frame for the subsequent processing 1061 * by the stack. 1062 */ 1063 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e 1064 xge_hal_accumulate_large_rx(u8 *buffer, 1065 u8 **tcp, 1066 u32 *seglen, 1067 lro_t **p_lro, 1068 xge_hal_dtr_info_t *ext_info, 1069 xge_hal_device_t *hldev) 1070 { 1071 iplro_t *ip; 1072 xge_hal_status_e ret; 1073 lro_t *lro; 1074 1075 xge_debug_ring(XGE_TRACE, "Entered accumu lro. "); 1076 if (XGE_HAL_OK != __hal_lro_capable(buffer, &ip, (tcplro_t **)tcp, 1077 ext_info, hldev)) 1078 return XGE_HAL_INF_LRO_UNCAPABLE; 1079 1080 /* 1081 * This function shall get matching LRO or else 1082 * create one and return it 1083 */ 1084 ret = __hal_get_lro_session(buffer, ip, 1085 (tcplro_t *)*tcp, 1086 p_lro, ext_info, hldev); 1087 xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret); 1088 lro = *p_lro; 1089 if (XGE_HAL_INF_LRO_CONT == ret) { 1090 if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip, 1091 (tcplro_t *)*tcp, lro, hldev)) { 1092 __hal_append_lro(ip,(tcplro_t **) tcp, seglen, 1093 lro, 1094 hldev); 1095 hldev->stats.sw_dev_info_stats.tot_frms_lroised++; 1096 1097 if (lro->sg_num >= CONFIG_LRO_MAX_SG_NUM) 1098 ret = XGE_HAL_INF_LRO_END_1; 1099 1100 } else ret = XGE_HAL_INF_LRO_END_2; 1101 } 1102 1103 /* 1104 * Since its time to flush, 1105 * update ip header so that it can be sent up 1106 */ 1107 if ((ret == XGE_HAL_INF_LRO_END_1) || 1108 (ret == XGE_HAL_INF_LRO_END_2)) { 1109 lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length); 1110 lro->ip_hdr->check = xge_os_htons(0); 1111 lro->ip_hdr->check = 1112 XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), 1113 (lro->ip_hdr->version_ihl & 0x0F)); 1114 lro->tcp_hdr->ack_seq = lro->tcp_ack_num; 1115 } 1116 1117 return (ret); 1118 } 1119 1120 /** 1121 * xge_hal_lro_exist: Returns LRO list head if any. 1122 * @hldev: Hal context. 1123 */ 1124 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t * 1125 xge_hal_lro_exist (xge_hal_device_t *hldev) 1126 { 1127 1128 if (hldev->g_lro_pool->in_use) { 1129 /* Since its time to flush, Update ip header so that it can be sent up*/ 1130 lro_t *lro; 1131 lro = hldev->g_lro_pool; 1132 lro->ip_hdr->tot_len = xge_os_htons(lro->total_length); 1133 lro->ip_hdr->check = xge_os_htons(0); 1134 lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)), 1135 (lro->ip_hdr->version_ihl & 0x0F)); 1136 return (hldev->g_lro_pool); 1137 } 1138 1139 return NULL; 1140 } 1141 #endif 1142