1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright (c) 2002-2006 Neterion, Inc. 22 */ 23 24 #include "xgehal-fifo.h" 25 #include "xgehal-device.h" 26 27 static xge_hal_status_e 28 __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh, 29 void *memblock, 30 int memblock_index, 31 xge_hal_mempool_dma_t *dma_object, 32 void *item, 33 int index, 34 int is_last, 35 void *userdata) 36 { 37 int memblock_item_idx; 38 xge_hal_fifo_txdl_priv_t *txdl_priv; 39 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)item; 40 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata; 41 42 xge_assert(item); 43 txdl_priv = (xge_hal_fifo_txdl_priv_t *) \ 44 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, 45 memblock_index, 46 item, 47 &memblock_item_idx); 48 xge_assert(txdl_priv); 49 50 /* pre-format HAL's TxDL's private */ 51 txdl_priv->dma_offset = (char*)item - (char*)memblock; 52 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; 53 txdl_priv->dma_handle = dma_object->handle; 54 txdl_priv->memblock = memblock; 55 txdl_priv->first_txdp = (xge_hal_fifo_txd_t *)item; 56 txdl_priv->next_txdl_priv = NULL; 57 txdl_priv->dang_txdl = NULL; 58 txdl_priv->dang_frags = 0; 59 txdl_priv->alloc_frags = 0; 60 61 #ifdef XGE_DEBUG_ASSERT 62 txdl_priv->dma_object = dma_object; 63 #endif 64 txdp->host_control = (u64)(ulong_t)txdl_priv; 65 66 #ifdef XGE_HAL_ALIGN_XMIT 67 txdl_priv->align_vaddr = NULL; 68 txdl_priv->align_dma_addr = (dma_addr_t)0; 69 70 #ifndef XGE_HAL_ALIGN_XMIT_ALLOC_RT 71 { 72 xge_hal_status_e status; 73 if (fifo->config->alignment_size) { 74 status =__hal_fifo_dtr_align_alloc_map(fifo, txdp); 75 if (status != XGE_HAL_OK) { 76 xge_debug_mm(XGE_ERR, 77 "align buffer[%d] %d bytes, status %d", 78 index, 79 fifo->align_size, 80 status); 81 return status; 82 } 83 } 84 } 85 #endif 86 #endif 87 88 if (fifo->channel.dtr_init) { 89 fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index, 90 fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL); 91 } 92 93 return XGE_HAL_OK; 94 } 95 96 97 static xge_hal_status_e 98 __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh, 99 void *memblock, 100 int memblock_index, 101 xge_hal_mempool_dma_t *dma_object, 102 void *item, 103 int index, 104 int is_last, 105 void *userdata) 106 { 107 int memblock_item_idx; 108 xge_hal_fifo_txdl_priv_t *txdl_priv; 109 #ifdef XGE_HAL_ALIGN_XMIT 110 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata; 111 #endif 112 113 xge_assert(item); 114 115 txdl_priv = (xge_hal_fifo_txdl_priv_t *) \ 116 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh, 117 memblock_index, 118 item, 119 &memblock_item_idx); 120 xge_assert(txdl_priv); 121 122 #ifdef XGE_HAL_ALIGN_XMIT 123 if (fifo->config->alignment_size) { 124 if (txdl_priv->align_dma_addr != 0) { 125 xge_os_dma_unmap(fifo->channel.pdev, 126 txdl_priv->align_dma_handle, 127 txdl_priv->align_dma_addr, 128 fifo->align_size, 129 XGE_OS_DMA_DIR_TODEVICE); 130 131 txdl_priv->align_dma_addr = 0; 132 } 133 134 if (txdl_priv->align_vaddr != NULL) { 135 xge_os_dma_free(fifo->channel.pdev, 136 txdl_priv->align_vaddr, 137 fifo->align_size, 138 &txdl_priv->align_dma_acch, 139 &txdl_priv->align_dma_handle); 140 141 txdl_priv->align_vaddr = NULL; 142 } 143 } 144 #endif 145 146 return XGE_HAL_OK; 147 } 148 149 xge_hal_status_e 150 __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr) 151 { 152 xge_hal_device_t *hldev; 153 xge_hal_status_e status; 154 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 155 xge_hal_fifo_queue_t *queue; 156 int i, txdl_size, max_arr_index, mid_point; 157 xge_hal_dtr_h dtrh; 158 159 hldev = (xge_hal_device_t *)fifo->channel.devh; 160 fifo->config = &hldev->config.fifo; 161 queue = &fifo->config->queue[attr->post_qid]; 162 163 #if defined(XGE_HAL_TX_MULTI_RESERVE) 164 xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev); 165 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 166 xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh); 167 #endif 168 #if defined(XGE_HAL_TX_MULTI_POST) 169 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 170 fifo->post_lock_ptr = &hldev->xena_post_lock; 171 } else { 172 xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev); 173 fifo->post_lock_ptr = &fifo->channel.post_lock; 174 } 175 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 176 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 177 fifo->post_lock_ptr = &hldev->xena_post_lock; 178 } else { 179 xge_os_spin_lock_init_irq(&fifo->channel.post_lock, 180 hldev->irqh); 181 fifo->post_lock_ptr = &fifo->channel.post_lock; 182 } 183 #endif 184 185 fifo->align_size = 186 fifo->config->alignment_size * fifo->config->max_aligned_frags; 187 188 /* Initializing the BAR1 address as the start of 189 * the FIFO queue pointer and as a location of FIFO control 190 * word. */ 191 fifo->hw_pair = 192 (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 + 193 (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET)); 194 195 /* apply "interrupts per txdl" attribute */ 196 fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ; 197 if (queue->intr) { 198 fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST; 199 } 200 fifo->no_snoop_bits = 201 (int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits)); 202 203 /* 204 * FIFO memory management strategy: 205 * 206 * TxDL splitted into three independent parts: 207 * - set of TxD's 208 * - TxD HAL private part 209 * - upper layer private part 210 * 211 * Adaptative memory allocation used. i.e. Memory allocated on 212 * demand with the size which will fit into one memory block. 213 * One memory block may contain more than one TxDL. In simple case 214 * memory block size can be equal to CPU page size. On more 215 * sophisticated OS's memory block can be contigious across 216 * several pages. 217 * 218 * During "reserve" operations more memory can be allocated on demand 219 * for example due to FIFO full condition. 220 * 221 * Pool of memory memblocks never shrinks except __hal_fifo_close 222 * routine which will essentially stop channel and free the resources. 223 */ 224 225 /* TxDL common private size == TxDL private + ULD private */ 226 fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) + 227 attr->per_dtr_space; 228 fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) / 229 __xge_os_cacheline_size) * 230 __xge_os_cacheline_size; 231 232 /* recompute txdl size to be cacheline aligned */ 233 fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t); 234 txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) / 235 __xge_os_cacheline_size) * __xge_os_cacheline_size; 236 237 if (fifo->txdl_size != txdl_size) 238 xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d", 239 fifo->config->max_frags, fifo->txdl_size, txdl_size, 240 __xge_os_cacheline_size); 241 242 fifo->txdl_size = txdl_size; 243 244 /* since dtr_init() callback will be called from item_alloc(), 245 * the same way channels userdata might be used prior to 246 * channel_initialize() */ 247 fifo->channel.dtr_init = attr->dtr_init; 248 fifo->channel.userdata = attr->userdata; 249 fifo->txdl_per_memblock = fifo->config->memblock_size / 250 fifo->txdl_size; 251 252 fifo->mempool = __hal_mempool_create(hldev->pdev, 253 fifo->config->memblock_size, 254 fifo->txdl_size, 255 fifo->priv_size, 256 queue->initial, 257 queue->max, 258 __hal_fifo_mempool_item_alloc, 259 __hal_fifo_mempool_item_free, 260 fifo); 261 if (fifo->mempool == NULL) { 262 return XGE_HAL_ERR_OUT_OF_MEMORY; 263 } 264 265 status = __hal_channel_initialize(channelh, attr, 266 (void **) __hal_mempool_items_arr(fifo->mempool), 267 queue->initial, queue->max, 268 fifo->config->reserve_threshold); 269 if (status != XGE_HAL_OK) { 270 __hal_fifo_close(channelh); 271 return status; 272 } 273 xge_debug_fifo(XGE_TRACE, 274 "DTR reserve_length:%d reserve_top:%d\n" 275 "max_frags:%d reserve_threshold:%d\n" 276 "memblock_size:%d alignment_size:%d max_aligned_frags:%d", 277 fifo->channel.reserve_length, fifo->channel.reserve_top, 278 fifo->config->max_frags, fifo->config->reserve_threshold, 279 fifo->config->memblock_size, fifo->config->alignment_size, 280 fifo->config->max_aligned_frags); 281 282 #ifdef XGE_DEBUG_ASSERT 283 for ( i = 0; i < fifo->channel.reserve_length; i++) { 284 xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d" 285 " handle:%p", i, fifo->channel.reserve_arr[i]); 286 } 287 #endif 288 289 xge_assert(fifo->channel.reserve_length); 290 /* reverse the FIFO dtr array */ 291 max_arr_index = fifo->channel.reserve_length - 1; 292 max_arr_index -=fifo->channel.reserve_top; 293 xge_assert(max_arr_index); 294 mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2; 295 for (i = 0; i < mid_point; i++) { 296 dtrh = fifo->channel.reserve_arr[i]; 297 fifo->channel.reserve_arr[i] = 298 fifo->channel.reserve_arr[max_arr_index - i]; 299 fifo->channel.reserve_arr[max_arr_index - i] = dtrh; 300 } 301 302 #ifdef XGE_DEBUG_ASSERT 303 for ( i = 0; i < fifo->channel.reserve_length; i++) { 304 xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d" 305 " handle:%p", i, fifo->channel.reserve_arr[i]); 306 } 307 #endif 308 309 return XGE_HAL_OK; 310 } 311 312 void 313 __hal_fifo_close(xge_hal_channel_h channelh) 314 { 315 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 316 xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh; 317 318 if (fifo->mempool) { 319 __hal_mempool_destroy(fifo->mempool); 320 } 321 322 __hal_channel_terminate(channelh); 323 324 #if defined(XGE_HAL_TX_MULTI_RESERVE) 325 xge_os_spin_lock_destroy(&fifo->channel.reserve_lock, hldev->pdev); 326 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 327 xge_os_spin_lock_destroy_irq(&fifo->channel.reserve_lock, hldev->pdev); 328 #endif 329 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 330 #if defined(XGE_HAL_TX_MULTI_POST) 331 xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev); 332 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 333 xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock, 334 hldev->pdev); 335 #endif 336 } 337 } 338 339 void 340 __hal_fifo_hw_initialize(xge_hal_device_h devh) 341 { 342 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 343 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 344 u64* tx_fifo_partitions[4]; 345 u64* tx_fifo_wrr[5]; 346 u64 tx_fifo_wrr_value[5]; 347 u64 val64, part0; 348 int i; 349 350 /* Tx DMA Initialization */ 351 352 tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0; 353 tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1; 354 tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2; 355 tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3; 356 357 tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0; 358 tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1; 359 tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2; 360 tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3; 361 tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4; 362 363 tx_fifo_wrr_value[0] = XGE_HAL_FIFO_WRR_0; 364 tx_fifo_wrr_value[1] = XGE_HAL_FIFO_WRR_1; 365 tx_fifo_wrr_value[2] = XGE_HAL_FIFO_WRR_2; 366 tx_fifo_wrr_value[3] = XGE_HAL_FIFO_WRR_3; 367 tx_fifo_wrr_value[4] = XGE_HAL_FIFO_WRR_4; 368 369 /* Note: WRR calendar must be configured before the transmit 370 * FIFOs are enabled! page 6-77 user guide */ 371 372 if (!hldev->config.rts_qos_en) { 373 /* all zeroes for Round-Robin */ 374 for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { 375 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, 376 tx_fifo_wrr[i]); 377 } 378 379 /* reset all of them but '0' */ 380 for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) { 381 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 382 tx_fifo_partitions[i]); 383 } 384 } else { /* Change the default settings */ 385 386 for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) { 387 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 388 tx_fifo_wrr_value[i], tx_fifo_wrr[i]); 389 } 390 } 391 392 /* configure only configured FIFOs */ 393 val64 = 0; part0 = 0; 394 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 395 int reg_half = i % 2; 396 int reg_num = i / 2; 397 398 if (hldev->config.fifo.queue[i].configured) { 399 int priority = hldev->config.fifo.queue[i].priority; 400 val64 |= 401 vBIT((hldev->config.fifo.queue[i].max-1), 402 (((reg_half) * 32) + 19), 403 13) | vBIT(priority, (((reg_half)*32) + 5), 3); 404 } 405 406 /* NOTE: do write operation for each second u64 half 407 * or force for first one if configured number 408 * is even */ 409 if (reg_half) { 410 if (reg_num == 0) { 411 /* skip partition '0', must write it once at 412 * the end */ 413 part0 = val64; 414 } else { 415 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 416 val64, tx_fifo_partitions[reg_num]); 417 xge_debug_fifo(XGE_TRACE, 418 "fifo partition_%d at: " 419 "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, 420 reg_num, (unsigned long long)(ulong_t) 421 tx_fifo_partitions[reg_num], 422 (unsigned long long)val64); 423 } 424 val64 = 0; 425 } 426 } 427 428 part0 |= BIT(0); /* to enable the FIFO partition. */ 429 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0, 430 tx_fifo_partitions[0]); 431 xge_os_wmb(); 432 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32), 433 tx_fifo_partitions[0]); 434 xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: " 435 "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT, 436 (unsigned long long)(ulong_t) 437 tx_fifo_partitions[0], 438 (unsigned long long) part0); 439 440 /* 441 * Initialization of Tx_PA_CONFIG register to ignore packet 442 * integrity checking. 443 */ 444 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 445 &bar0->tx_pa_cfg); 446 val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR | 447 XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI | 448 XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL | 449 XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR; 450 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 451 &bar0->tx_pa_cfg); 452 453 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX) 454 return; 455 456 /* 457 * Assign MSI-X vectors 458 */ 459 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 460 xge_list_t *item; 461 xge_hal_channel_t *channel = NULL; 462 463 if (!hldev->config.fifo.queue[i].configured || 464 !hldev->config.fifo.queue[i].intr_vector) 465 continue; 466 467 /* find channel */ 468 xge_list_for_each(item, &hldev->free_channels) { 469 xge_hal_channel_t *tmp; 470 tmp = xge_container_of(item, xge_hal_channel_t, 471 item); 472 if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO && 473 tmp->post_qid == i) { 474 channel = tmp; 475 break; 476 } 477 } 478 479 if (channel) { 480 (void) xge_hal_channel_msix_set(channel, 481 hldev->config.fifo.queue[i].intr_vector); 482 } 483 } 484 485 xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized"); 486 } 487 488 #ifdef XGE_HAL_ALIGN_XMIT 489 void 490 __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 491 { 492 xge_hal_fifo_txdl_priv_t *txdl_priv; 493 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 494 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 495 496 txdl_priv = __hal_fifo_txdl_priv(txdp); 497 498 if (txdl_priv->align_dma_addr != 0) { 499 xge_os_dma_unmap(fifo->channel.pdev, 500 txdl_priv->align_dma_handle, 501 txdl_priv->align_dma_addr, 502 fifo->align_size, 503 XGE_OS_DMA_DIR_TODEVICE); 504 505 txdl_priv->align_dma_addr = 0; 506 } 507 508 if (txdl_priv->align_vaddr != NULL) { 509 xge_os_dma_free(fifo->channel.pdev, 510 txdl_priv->align_vaddr, 511 fifo->align_size, 512 &txdl_priv->align_dma_acch, 513 &txdl_priv->align_dma_handle); 514 515 516 txdl_priv->align_vaddr = NULL; 517 } 518 } 519 520 xge_hal_status_e 521 __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 522 { 523 xge_hal_fifo_txdl_priv_t *txdl_priv; 524 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 525 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 526 527 xge_assert(txdp); 528 529 txdl_priv = __hal_fifo_txdl_priv(txdp); 530 531 /* allocate alignment DMA-buffer */ 532 txdl_priv->align_vaddr = (char *)xge_os_dma_malloc(fifo->channel.pdev, 533 fifo->align_size, 534 XGE_OS_DMA_CACHELINE_ALIGNED | 535 XGE_OS_DMA_STREAMING, 536 &txdl_priv->align_dma_handle, 537 &txdl_priv->align_dma_acch); 538 if (txdl_priv->align_vaddr == NULL) { 539 return XGE_HAL_ERR_OUT_OF_MEMORY; 540 } 541 542 /* map it */ 543 txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev, 544 txdl_priv->align_dma_handle, txdl_priv->align_vaddr, 545 fifo->align_size, 546 XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING); 547 548 if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) { 549 __hal_fifo_dtr_align_free_unmap(channelh, dtrh); 550 return XGE_HAL_ERR_OUT_OF_MAPPING; 551 } 552 553 return XGE_HAL_OK; 554 } 555 #endif 556 557 558