1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2002-2005 Neterion, Inc. 24 * All right Reserved. 25 * 26 * FileName : xgehal-fifo-fp.c 27 * 28 * Description: Tx fifo object functionality (fast path) 29 * 30 * Created: 10 June 2004 31 */ 32 33 #ifdef XGE_DEBUG_FP 34 #include "xgehal-fifo.h" 35 #endif 36 37 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t* 38 __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh) 39 { 40 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t*)dtrh; 41 xge_hal_fifo_txdl_priv_t *txdl_priv; 42 43 xge_assert(txdp); 44 txdl_priv = (xge_hal_fifo_txdl_priv_t *) 45 (ulong_t)txdp->host_control; 46 47 xge_assert(txdl_priv); 48 xge_assert(txdl_priv->dma_object); 49 xge_assert(txdl_priv->dma_addr); 50 51 xge_assert(txdl_priv->dma_object->handle == txdl_priv->dma_handle); 52 53 return txdl_priv; 54 } 55 56 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 57 __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 58 u64 ctrl_1) 59 { 60 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 61 xge_hal_fifo_hw_pair_t *hw_pair = fifo->hw_pair; 62 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 63 xge_hal_fifo_txdl_priv_t *txdl_priv; 64 u64 ctrl; 65 66 txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA; 67 68 #ifdef XGE_DEBUG_ASSERT 69 /* make sure Xena overwrites the (illegal) t_code value on completion */ 70 XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5); 71 #endif 72 73 txdl_priv = __hal_fifo_txdl_priv(dtrh); 74 75 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 76 /* sync the TxDL to device */ 77 xge_os_dma_sync(fifo->channel.pdev, 78 txdl_priv->dma_handle, 79 txdl_priv->dma_addr, 80 txdl_priv->dma_offset, 81 txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */, 82 XGE_OS_DMA_DIR_TODEVICE); 83 #endif 84 /* write the pointer first */ 85 xge_os_pio_mem_write64(fifo->channel.pdev, 86 fifo->channel.regh1, 87 txdl_priv->dma_addr, 88 &hw_pair->txdl_pointer); 89 90 /* spec: 0x00 = 1 TxD in the list */ 91 ctrl = XGE_HAL_TX_FIFO_LAST_TXD_NUM(txdl_priv->frags - 1); 92 ctrl |= ctrl_1; 93 ctrl |= fifo->no_snoop_bits; 94 95 if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) { 96 ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC; 97 } 98 99 /* 100 * according to the XENA spec: 101 * 102 * It is important to note that pointers and list control words are 103 * always written in pairs: in the first write, the host must write a 104 * pointer, and in the second write, it must write the list control 105 * word. Any other access will result in an error. Also, all 16 bytes 106 * of the pointer/control structure must be written, including any 107 * reserved bytes. 108 */ 109 xge_os_wmb(); 110 111 /* 112 * we want touch work_arr in order with ownership bit set to HW 113 */ 114 __hal_channel_dtr_post(channelh, dtrh); 115 116 xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1, 117 ctrl, &hw_pair->list_control); 118 119 xge_debug_fifo(XGE_TRACE, "posted txdl 0x%llx ctrl 0x%llx " 120 "into 0x%llx", (unsigned long long)txdl_priv->dma_addr, 121 (unsigned long long)ctrl, 122 (unsigned long long)(ulong_t)&hw_pair->txdl_pointer); 123 124 #ifdef XGE_HAL_FIFO_DUMP_TXD 125 xge_os_printf("%llx:%llx:%llx:%llx dma %llx", 126 txdp->control_1, txdp->control_2, txdp->buffer_pointer, 127 txdp->host_control, txdl_priv->dma_addr); 128 #endif 129 130 fifo->channel.stats.total_posts++; 131 } 132 133 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 134 __hal_fifo_txdl_free_many(xge_hal_channel_h channelh, 135 xge_hal_fifo_txd_t *txdp, int list_size, int frags) 136 { 137 xge_hal_fifo_txdl_priv_t *current_txdl_priv; 138 xge_hal_fifo_txdl_priv_t *next_txdl_priv; 139 int invalid_frags = frags % list_size; 140 if (invalid_frags){ 141 xge_debug_fifo(XGE_ERR, 142 "freeing corrupt dtrh %p, fragments %d list size %d", 143 txdp, frags, list_size); 144 xge_assert(invalid_frags == 0); 145 } 146 while(txdp){ 147 xge_debug_fifo(XGE_TRACE, 148 "freeing linked dtrh %p, fragments %d list size %d", 149 txdp, frags, list_size); 150 current_txdl_priv = __hal_fifo_txdl_priv(txdp); 151 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK) 152 current_txdl_priv->allocated = 0; 153 #endif 154 __hal_channel_dtr_free(channelh, txdp); 155 next_txdl_priv = current_txdl_priv->next_txdl_priv; 156 xge_assert(frags); 157 frags -= list_size; 158 if (next_txdl_priv) { 159 current_txdl_priv->next_txdl_priv = NULL; 160 txdp = next_txdl_priv->first_txdp; 161 } 162 else { 163 xge_debug_fifo(XGE_TRACE, 164 "freed linked dtrh fragments %d list size %d", 165 frags, list_size); 166 break; 167 } 168 } 169 xge_assert(frags == 0) 170 } 171 172 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 173 __hal_fifo_txdl_restore_many(xge_hal_channel_h channelh, 174 xge_hal_fifo_txd_t *txdp, int txdl_count) 175 { 176 xge_hal_fifo_txdl_priv_t *current_txdl_priv; 177 xge_hal_fifo_txdl_priv_t *next_txdl_priv; 178 int i = txdl_count; 179 180 xge_assert(((xge_hal_channel_t *)channelh)->reserve_length + 181 txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial); 182 183 current_txdl_priv = __hal_fifo_txdl_priv(txdp); 184 do{ 185 xge_assert(i); 186 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK) 187 current_txdl_priv->allocated = 0; 188 #endif 189 next_txdl_priv = current_txdl_priv->next_txdl_priv; 190 txdp = current_txdl_priv->first_txdp; 191 current_txdl_priv->next_txdl_priv = NULL; 192 __hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i); 193 xge_debug_fifo(XGE_TRACE, 194 "dtrh %p restored at offset %d", txdp, i); 195 current_txdl_priv = next_txdl_priv; 196 } while(current_txdl_priv); 197 __hal_channel_dtr_restore(channelh, NULL, txdl_count); 198 } 199 /** 200 * xge_hal_fifo_dtr_private - Retrieve per-descriptor private data. 201 * @channelh: Channel handle. 202 * @dtrh: Descriptor handle. 203 * 204 * Retrieve per-descriptor private data. 205 * Note that ULD requests per-descriptor space via 206 * xge_hal_channel_open(). 207 * 208 * Returns: private ULD data associated with the descriptor. 209 * Usage: See ex_xmit{} and ex_tx_compl{}. 210 */ 211 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void* 212 xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh) 213 { 214 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 215 216 return ((char *)(ulong_t)txdp->host_control) + 217 sizeof(xge_hal_fifo_txdl_priv_t); 218 } 219 220 /** 221 * xge_hal_fifo_dtr_buffer_cnt - Get number of buffers carried by the 222 * descriptor. 223 * @dtrh: Descriptor handle. 224 * 225 * Returns: Number of buffers stored in the given descriptor. Can be used 226 * _after_ the descriptor is set up for posting (see 227 * xge_hal_fifo_dtr_post()) and _before_ it is deallocated (see 228 * xge_hal_fifo_dtr_free()). 229 * 230 */ 231 __HAL_STATIC_FIFO __HAL_INLINE_FIFO int 232 xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh) 233 { 234 xge_hal_fifo_txdl_priv_t *txdl_priv; 235 236 txdl_priv = __hal_fifo_txdl_priv(dtrh); 237 238 return txdl_priv->frags; 239 } 240 /** 241 * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more 242 * than single txdl. 243 * @channelh: Channel handle. 244 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter 245 * with a valid handle. 246 * @frags: minimum number of fragments to be reserved. 247 * 248 * Reserve TxDL(s) (that is, fifo descriptor) 249 * for the subsequent filling-in by upper layerdriver (ULD)) 250 * and posting on the corresponding channel (@channelh) 251 * via xge_hal_fifo_dtr_post(). 252 * 253 * Returns: XGE_HAL_OK - success; 254 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available 255 * 256 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(), 257 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}. 258 * Usage: See ex_xmit{}. 259 */ 260 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e 261 xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, 262 xge_hal_dtr_h *dtrh, const int frags) 263 { 264 xge_hal_status_e status = XGE_HAL_OK; 265 int alloc_frags = 0, dang_frags = 0; 266 xge_hal_fifo_txd_t *curr_txdp = NULL; 267 xge_hal_fifo_txd_t *next_txdp; 268 xge_hal_fifo_txdl_priv_t *next_txdl_priv, *curr_txdl_priv = NULL; 269 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 270 int max_frags = fifo->config->max_frags; 271 xge_hal_dtr_h dang_dtrh = NULL; 272 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 273 unsigned long flags=0; 274 #endif 275 xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d", 276 frags); 277 xge_assert(frags < (fifo->txdl_per_memblock * max_frags)); 278 #if defined(XGE_HAL_TX_MULTI_RESERVE) 279 xge_os_spin_lock(&fifo->channel.reserve_lock); 280 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 281 xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags); 282 #endif 283 while(alloc_frags < frags) { 284 status = __hal_channel_dtr_alloc(channelh, 285 (xge_hal_dtr_h *)&next_txdp); 286 if (status != XGE_HAL_OK){ 287 xge_debug_fifo(XGE_ERR, 288 "failed to allocate linked fragments rc %d", 289 status); 290 xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS); 291 if (*dtrh) { 292 xge_assert(alloc_frags/max_frags); 293 __hal_fifo_txdl_restore_many(channelh, 294 *dtrh, alloc_frags/max_frags); 295 } 296 if (dang_dtrh) { 297 xge_assert(dang_frags/max_frags); 298 __hal_fifo_txdl_restore_many(channelh, 299 dang_dtrh, dang_frags/max_frags); 300 } 301 break; 302 } 303 xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p" 304 " for frags %d", next_txdp, frags); 305 next_txdl_priv = __hal_fifo_txdl_priv(next_txdp); 306 xge_assert(next_txdl_priv); 307 xge_assert(next_txdl_priv->first_txdp == next_txdp); 308 next_txdl_priv->dang_txdl = NULL; 309 next_txdl_priv->dang_frags = 0; 310 next_txdl_priv->next_txdl_priv = NULL; 311 #if defined(XGE_OS_MEMORY_CHECK) 312 next_txdl_priv->allocated = 1; 313 #endif 314 if (!curr_txdp || !curr_txdl_priv) { 315 curr_txdp = next_txdp; 316 curr_txdl_priv = next_txdl_priv; 317 *dtrh = (xge_hal_dtr_h)next_txdp; 318 alloc_frags = max_frags; 319 continue; 320 } 321 if (curr_txdl_priv->memblock == 322 next_txdl_priv->memblock) { 323 xge_debug_fifo(XGE_TRACE, 324 "linking dtrh %p, with %p", 325 *dtrh, next_txdp); 326 xge_assert (next_txdp == 327 curr_txdp + max_frags); 328 alloc_frags += max_frags; 329 curr_txdl_priv->next_txdl_priv = next_txdl_priv; 330 } 331 else { 332 xge_assert(*dtrh); 333 xge_assert(dang_dtrh == NULL); 334 dang_dtrh = *dtrh; 335 dang_frags = alloc_frags; 336 xge_debug_fifo(XGE_TRACE, 337 "dangling dtrh %p, linked with dtrh %p", 338 *dtrh, next_txdp); 339 next_txdl_priv->dang_txdl = *dtrh; 340 next_txdl_priv->dang_frags = alloc_frags; 341 alloc_frags = max_frags; 342 *dtrh = next_txdp; 343 } 344 curr_txdp = next_txdp; 345 curr_txdl_priv = next_txdl_priv; 346 } 347 348 #if defined(XGE_HAL_TX_MULTI_RESERVE) 349 xge_os_spin_unlock(&fifo->channel.reserve_lock); 350 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 351 xge_os_spin_unlock_irq(&fifo->channel.reserve_lock, flags); 352 #endif 353 354 if (status == XGE_HAL_OK) { 355 xge_hal_fifo_txdl_priv_t * txdl_priv; 356 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh; 357 xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats; 358 txdl_priv = __hal_fifo_txdl_priv(txdp); 359 /* reset the TxDL's private */ 360 txdl_priv->align_dma_offset = 0; 361 txdl_priv->align_vaddr_start = txdl_priv->align_vaddr; 362 txdl_priv->align_used_frags = 0; 363 txdl_priv->frags = 0; 364 txdl_priv->alloc_frags = alloc_frags; 365 /* reset TxD0 */ 366 txdp->control_1 = txdp->control_2 = 0; 367 368 #if defined(XGE_OS_MEMORY_CHECK) 369 txdl_priv->allocated = 1; 370 #endif 371 /* update statistics */ 372 statsp->total_posts_dtrs_many++; 373 statsp->total_posts_frags_many += txdl_priv->alloc_frags; 374 if (txdl_priv->dang_frags){ 375 statsp->total_posts_dang_dtrs++; 376 statsp->total_posts_dang_frags += txdl_priv->dang_frags; 377 } 378 } 379 380 return status; 381 } 382 383 /** 384 * xge_hal_fifo_dtr_reserve - Reserve fifo descriptor. 385 * @channelh: Channel handle. 386 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter 387 * with a valid handle. 388 * 389 * Reserve a single TxDL (that is, fifo descriptor) 390 * for the subsequent filling-in by upper layerdriver (ULD)) 391 * and posting on the corresponding channel (@channelh) 392 * via xge_hal_fifo_dtr_post(). 393 * 394 * Note: it is the responsibility of ULD to reserve multiple descriptors 395 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor 396 * carries up to configured number (fifo.max_frags) of contiguous buffers. 397 * 398 * Returns: XGE_HAL_OK - success; 399 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available 400 * 401 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(), 402 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}. 403 * Usage: See ex_xmit{}. 404 */ 405 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e 406 xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) 407 { 408 xge_hal_status_e status; 409 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 410 unsigned long flags=0; 411 #endif 412 413 #if defined(XGE_HAL_TX_MULTI_RESERVE) 414 xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock); 415 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 416 xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock, 417 flags); 418 #endif 419 420 status = __hal_channel_dtr_alloc(channelh, dtrh); 421 422 #if defined(XGE_HAL_TX_MULTI_RESERVE) 423 xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock); 424 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 425 xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock, 426 flags); 427 #endif 428 429 if (status == XGE_HAL_OK) { 430 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh; 431 xge_hal_fifo_txdl_priv_t *txdl_priv; 432 433 txdl_priv = __hal_fifo_txdl_priv(txdp); 434 435 /* reset the TxDL's private */ 436 txdl_priv->align_dma_offset = 0; 437 txdl_priv->align_vaddr_start = txdl_priv->align_vaddr; 438 txdl_priv->align_used_frags = 0; 439 txdl_priv->frags = 0; 440 txdl_priv->alloc_frags = 441 ((xge_hal_fifo_t *)channelh)->config->max_frags; 442 txdl_priv->dang_txdl = NULL; 443 txdl_priv->dang_frags = 0; 444 txdl_priv->next_txdl_priv = NULL; 445 446 /* reset TxD0 */ 447 txdp->control_1 = txdp->control_2 = 0; 448 449 #if defined(XGE_OS_MEMORY_CHECK) 450 txdl_priv->allocated = 1; 451 #endif 452 } 453 454 return status; 455 } 456 457 /** 458 * xge_hal_fifo_dtr_reserve_sp - Reserve fifo descriptor and store it in 459 * the ULD-provided "scratch" memory. 460 * @channelh: Channel handle. 461 * @dtr_sp_size: Size of the %dtr_sp "scratch pad" that HAL can use for TxDL. 462 * @dtr_sp: "Scratch pad" supplied by upper-layer driver (ULD). 463 * 464 * Reserve TxDL and fill-in ULD supplied "scratch pad". The difference 465 * between this API and xge_hal_fifo_dtr_reserve() is (possibly) - 466 * performance. 467 * 468 * If upper-layer uses ULP-defined commands, and if those commands have enough 469 * space for HAL/Xframe descriptors - tnan it is better (read: faster) to fit 470 * all the per-command information into one command, which is typically 471 * one contiguous block. 472 * 473 * Note: Unlike xge_hal_fifo_dtr_reserve(), this function can be used to 474 * allocate a single descriptor for transmit operation. 475 * 476 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_free(), 477 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}. 478 */ 479 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e 480 xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh, int dtr_sp_size, 481 xge_hal_dtr_h dtr_sp) 482 { 483 /* FIXME: implement */ 484 return XGE_HAL_OK; 485 } 486 487 /** 488 * xge_hal_fifo_dtr_post - Post descriptor on the fifo channel. 489 * @channelh: Channel handle. 490 * @dtrh: Descriptor obtained via xge_hal_fifo_dtr_reserve() or 491 * xge_hal_fifo_dtr_reserve_sp() 492 * @frags: Number of contiguous buffers that are part of a single 493 * transmit operation. 494 * 495 * Post descriptor on the 'fifo' type channel for transmission. 496 * Prior to posting the descriptor should be filled in accordance with 497 * Host/Xframe interface specification for a given service (LL, etc.). 498 * 499 * See also: xge_hal_fifo_dtr_post_many(), xge_hal_ring_dtr_post(). 500 * Usage: See ex_xmit{}. 501 */ 502 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 503 xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 504 { 505 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 506 xge_hal_fifo_txdl_priv_t *txdl_priv; 507 xge_hal_fifo_txd_t *txdp_last; 508 xge_hal_fifo_txd_t *txdp_first; 509 #if defined(XGE_HAL_TX_MULTI_POST_IRQ) 510 unsigned long flags = 0; 511 #endif 512 513 txdl_priv = __hal_fifo_txdl_priv(dtrh); 514 515 txdp_first = (xge_hal_fifo_txd_t *)dtrh; 516 txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST; 517 txdp_first->control_2 |= fifo->interrupt_type; 518 519 txdp_last = (xge_hal_fifo_txd_t *)dtrh + (txdl_priv->frags - 1); 520 txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST; 521 522 #if defined(XGE_HAL_TX_MULTI_POST) 523 xge_os_spin_lock(fifo->post_lock_ptr); 524 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 525 xge_os_spin_lock_irq(fifo->post_lock_ptr, flags); 526 #endif 527 528 __hal_fifo_dtr_post_single(channelh, dtrh, 529 (u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST)); 530 531 #if defined(XGE_HAL_TX_MULTI_POST) 532 xge_os_spin_unlock(fifo->post_lock_ptr); 533 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 534 xge_os_spin_unlock_irq(fifo->post_lock_ptr, flags); 535 #endif 536 } 537 538 /** 539 * xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo 540 * channel. 541 * @channelh: Channel to post descriptor. 542 * @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[]. 543 * @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve(). 544 * @frags_arr: Number of fragments carried @dtrs descriptors. 545 * Note that frag_arr[i] corresponds to descriptor dtrs[i]. 546 * 547 * Post multi-descriptor on the fifo channel. The operation is atomic: 548 * all descriptrs are posted on the channel "back-to-back' without 549 * letting other posts (possibly driven by multiple transmitting threads) 550 * to interleave. 551 * 552 * See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post(). 553 */ 554 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 555 xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num, 556 xge_hal_dtr_h dtrs[]) 557 { 558 int i; 559 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 560 xge_hal_fifo_txd_t *txdp_last; 561 xge_hal_fifo_txd_t *txdp_first; 562 xge_hal_fifo_txdl_priv_t *txdl_priv_last; 563 #if defined(XGE_HAL_TX_MULTI_POST_IRQ) 564 unsigned long flags = 0; 565 #endif 566 567 xge_assert(num > 1); 568 569 txdp_first = (xge_hal_fifo_txd_t *)dtrs[0]; 570 txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST; 571 txdp_first->control_2 |= fifo->interrupt_type; 572 573 txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]); 574 txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] + 575 (txdl_priv_last->frags - 1); 576 txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST; 577 578 #if defined(XGE_HAL_TX_MULTI_POST) 579 xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock); 580 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 581 xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock, 582 flags); 583 #endif 584 585 for (i=0; i<num; i++) { 586 xge_hal_fifo_txdl_priv_t *txdl_priv; 587 u64 val64; 588 xge_hal_dtr_h dtrh = dtrs[i]; 589 590 txdl_priv = __hal_fifo_txdl_priv(dtrh); 591 txdl_priv = txdl_priv; /* Cheat lint */ 592 593 val64 = 0; 594 if (i == 0) { 595 val64 |= XGE_HAL_TX_FIFO_FIRST_LIST; 596 } else if (i == num -1) { 597 val64 |= XGE_HAL_TX_FIFO_LAST_LIST; 598 } 599 600 val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC; 601 __hal_fifo_dtr_post_single(channelh, dtrh, val64); 602 } 603 604 #if defined(XGE_HAL_TX_MULTI_POST) 605 xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock); 606 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 607 xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock, 608 flags); 609 #endif 610 611 fifo->channel.stats.total_posts_many++; 612 } 613 614 /** 615 * xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor. 616 * @channelh: Channel handle. 617 * @dtrh: Descriptor handle. Returned by HAL. 618 * @t_code: Transfer code, as per Xframe User Guide, 619 * Transmit Descriptor Format. 620 * Returned by HAL. 621 * 622 * Retrieve the _next_ completed descriptor. 623 * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy 624 * upper-layer driver (ULD) of new completed descriptors. After that 625 * the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest 626 * completions (the very first completion is passed by HAL via 627 * xge_hal_channel_callback_f). 628 * 629 * Implementation-wise, the upper-layer driver is free to call 630 * xge_hal_fifo_dtr_next_completed either immediately from inside the 631 * channel callback, or in a deferred fashion and separate (from HAL) 632 * context. 633 * 634 * Non-zero @t_code means failure to process the descriptor. 635 * The failure could happen, for instance, when the link is 636 * down, in which case Xframe completes the descriptor because it 637 * is not able to send the data out. 638 * 639 * For details please refer to Xframe User Guide. 640 * 641 * Returns: XGE_HAL_OK - success. 642 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 643 * are currently available for processing. 644 * 645 * See also: xge_hal_channel_callback_f{}, 646 * xge_hal_ring_dtr_next_completed(). 647 * Usage: See ex_tx_compl{}. 648 */ 649 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e 650 xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, 651 xge_hal_dtr_h *dtrh, u8 *t_code) 652 { 653 xge_hal_fifo_txd_t *txdp; 654 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 655 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 656 xge_hal_fifo_txdl_priv_t *txdl_priv; 657 #endif 658 659 __hal_channel_dtr_try_complete(channelh, dtrh); 660 txdp = (xge_hal_fifo_txd_t *)*dtrh; 661 if (txdp == NULL) { 662 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 663 } 664 665 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 666 txdl_priv = __hal_fifo_txdl_priv(txdp); 667 668 /* sync TxDL to read the ownership 669 * 670 * Note: 16bytes means Control_1 & Control_2 */ 671 xge_os_dma_sync(fifo->channel.pdev, 672 txdl_priv->dma_handle, 673 txdl_priv->dma_addr, 674 txdl_priv->dma_offset, 675 16, 676 XGE_OS_DMA_DIR_FROMDEVICE); 677 #endif 678 679 /* check whether host owns it */ 680 if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) { 681 682 xge_assert(txdp->host_control!=0); 683 684 __hal_channel_dtr_complete(channelh); 685 686 *t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1); 687 688 /* see XGE_HAL_SET_TXD_T_CODE() above.. */ 689 xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5); 690 691 return XGE_HAL_OK; 692 } 693 694 /* no more completions */ 695 *dtrh = 0; 696 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 697 } 698 699 /** 700 * xge_hal_fifo_dtr_free - Free descriptor. 701 * @channelh: Channel handle. 702 * @dtr: Descriptor handle. 703 * 704 * Free the reserved descriptor. This operation is "symmetrical" to 705 * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp. 706 * The "free-ing" completes the descriptor's lifecycle. 707 * 708 * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can 709 * be: 710 * 711 * - reserved (xge_hal_fifo_dtr_reserve); 712 * 713 * - posted (xge_hal_fifo_dtr_post); 714 * 715 * - completed (xge_hal_fifo_dtr_next_completed); 716 * 717 * - and recycled again (xge_hal_fifo_dtr_free). 718 * 719 * For alternative state transitions and more details please refer to 720 * the design doc. 721 * 722 * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve(). 723 * Usage: See ex_tx_compl{}. 724 */ 725 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 726 xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr) 727 { 728 #if defined(XGE_HAL_TX_MULTI_FREE_IRQ) 729 unsigned long flags = 0; 730 #endif 731 xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv( 732 (xge_hal_fifo_txd_t *)dtr); 733 int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags; 734 #if defined(XGE_HAL_TX_MULTI_FREE) 735 xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock); 736 #elif defined(XGE_HAL_TX_MULTI_FREE_IRQ) 737 xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock, 738 flags); 739 #endif 740 741 if (txdl_priv->alloc_frags > max_frags) { 742 xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *) 743 txdl_priv->dang_txdl; 744 int dang_frags = txdl_priv->dang_frags; 745 int alloc_frags = txdl_priv->alloc_frags; 746 txdl_priv->dang_txdl = NULL; 747 txdl_priv->dang_frags = 0; 748 txdl_priv->alloc_frags = 0; 749 /* dtrh must have a linked list of dtrh */ 750 xge_assert(txdl_priv->next_txdl_priv); 751 752 /* free any dangling dtrh first */ 753 if (dang_txdp) { 754 xge_debug_fifo(XGE_TRACE, 755 "freeing dangled dtrh %p for %d fragments", 756 dang_txdp, dang_frags); 757 __hal_fifo_txdl_free_many(channelh, dang_txdp, 758 max_frags, dang_frags); 759 } 760 761 /* now free the reserved dtrh list */ 762 xge_debug_fifo(XGE_TRACE, 763 "freeing dtrh %p list of %d fragments", dtr, 764 alloc_frags); 765 __hal_fifo_txdl_free_many(channelh, 766 (xge_hal_fifo_txd_t *)dtr, max_frags, 767 alloc_frags); 768 } 769 else 770 __hal_channel_dtr_free(channelh, dtr); 771 772 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK) 773 __hal_fifo_txdl_priv(dtr)->allocated = 0; 774 #endif 775 776 #if defined(XGE_HAL_TX_MULTI_FREE) 777 xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock); 778 #elif defined(XGE_HAL_TX_MULTI_FREE_IRQ) 779 xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock, 780 flags); 781 #endif 782 } 783 784 785 /** 786 * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill 787 * in fifo descriptor. 788 * @channelh: Channel handle. 789 * @dtrh: Descriptor handle. 790 * @frag_idx: Index of the data buffer in the caller's scatter-gather list� 791 * (of buffers). 792 * @vaddr: Virtual address of the data buffer. 793 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. 794 * @size: Size of the data buffer (in bytes). 795 * @misaligned_size: Size (in bytes) of the misaligned portion of the 796 * data buffer. Calculated by the caller, based on the platform/OS/other 797 * specific criteria, which is outside of HAL's domain. See notes below. 798 * 799 * This API is part of the transmit descriptor preparation for posting 800 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include 801 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits(). 802 * All three APIs fill in the fields of the fifo descriptor, 803 * in accordance with the Xframe specification. 804 * On the PCI-X based systems aligning transmit data typically provides better 805 * transmit performance. The typical alignment granularity: L2 cacheline size. 806 * However, HAL does not make assumptions in terms of the alignment granularity; 807 * this is specified via additional @misaligned_size parameter described above. 808 * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(), 809 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL 810 * provides a separate xge_hal_check_alignment() API sufficient to cover 811 * most (but not all) possible alignment criteria. 812 * If the buffer appears to be aligned, the ULD calls 813 * xge_hal_fifo_dtr_buffer_set(). 814 * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned(). 815 * 816 * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In 817 * addition to filling in the specified descriptor it aligns transmit data on 818 * the specified boundary. 819 * Note: Decision on whether to align or not to align a given contiguous 820 * transmit buffer is outside of HAL's domain. To this end ULD can use any 821 * programmable criteria, which can help to 1) boost transmit performance, 822 * and/or 2) provide a workaround for PCI bridge bugs, if any. 823 * 824 * See also: xge_hal_fifo_dtr_buffer_set(), 825 * xge_hal_check_alignment(). 826 * 827 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(), 828 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits() 829 */ 830 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e 831 xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh, 832 xge_hal_dtr_h dtrh, int frag_idx, void *vaddr, 833 dma_addr_t dma_pointer, int size, int misaligned_size) 834 { 835 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 836 xge_hal_fifo_txdl_priv_t *txdl_priv; 837 xge_hal_fifo_txd_t *txdp; 838 int remaining_size; 839 ptrdiff_t prev_boff; 840 841 txdl_priv = __hal_fifo_txdl_priv(dtrh); 842 txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags; 843 844 if (frag_idx != 0) { 845 txdp->control_1 = txdp->control_2 = 0; 846 } 847 848 /* On some systems buffer size could be zero. 849 * It is the responsibility of ULD and *not HAL* to 850 * detect it and skip it. */ 851 xge_assert(size > 0); 852 xge_assert(frag_idx < txdl_priv->alloc_frags); 853 xge_assert(misaligned_size != 0 && 854 misaligned_size <= fifo->config->alignment_size); 855 856 remaining_size = size - misaligned_size; 857 xge_assert(remaining_size >= 0); 858 859 xge_os_memcpy((char*)txdl_priv->align_vaddr_start, 860 vaddr, misaligned_size); 861 862 if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) { 863 return XGE_HAL_ERR_OUT_ALIGNED_FRAGS; 864 } 865 866 /* setup new buffer */ 867 prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; 868 txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff; 869 txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size); 870 fifo->channel.stats.total_buffers++; 871 txdl_priv->frags++; 872 txdl_priv->align_used_frags++; 873 txdl_priv->align_vaddr_start += fifo->config->alignment_size; 874 txdl_priv->align_dma_offset = 0; 875 876 #if defined(XGE_OS_DMA_REQUIRES_SYNC) 877 /* sync new buffer */ 878 xge_os_dma_sync(fifo->channel.pdev, 879 txdl_priv->align_dma_handle, 880 txdp->buffer_pointer, 881 0, 882 misaligned_size, 883 XGE_OS_DMA_DIR_TODEVICE); 884 #endif 885 886 if (remaining_size) { 887 xge_assert(frag_idx < txdl_priv->alloc_frags); 888 txdp++; 889 txdp->buffer_pointer = (u64)dma_pointer + 890 misaligned_size; 891 txdp->control_1 = 892 XGE_HAL_TXD_BUFFER0_SIZE(remaining_size); 893 txdp->control_2 = 0; 894 fifo->channel.stats.total_buffers++; 895 txdl_priv->frags++; 896 } 897 898 return XGE_HAL_OK; 899 } 900 901 /** 902 * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually 903 * contiguous data buffer to a single physically contiguous buffer. 904 * @channelh: Channel handle. 905 * @dtrh: Descriptor handle. 906 * @vaddr: Virtual address of the data buffer. 907 * @size: Size of the data buffer (in bytes). 908 * 909 * This API is part of the transmit descriptor preparation for posting 910 * (via xge_hal_fifo_dtr_post()). 911 * The main difference of this API wrt to the APIs 912 * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the 913 * contents of virtually contiguous data buffers received from 914 * upper layer into a single physically contiguous data buffer and the 915 * device will do a DMA from this buffer. 916 * 917 * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(), 918 * xge_hal_fifo_dtr_buffer_set_aligned(). 919 */ 920 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e 921 xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 922 void *vaddr, int size) 923 { 924 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 925 xge_hal_fifo_txdl_priv_t *txdl_priv; 926 927 xge_assert(size > 0); 928 929 txdl_priv = __hal_fifo_txdl_priv(dtrh); 930 931 if (txdl_priv->align_dma_offset + (unsigned int)size > (unsigned int)fifo->config->alignment_size) 932 return XGE_HAL_ERR_OUT_ALIGNED_FRAGS; /* FIXME */ 933 934 if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) { 935 return XGE_HAL_ERR_OUT_ALIGNED_FRAGS; 936 } 937 938 xge_os_memcpy((char*)txdl_priv->align_vaddr_start + 939 txdl_priv->align_dma_offset, vaddr, size); 940 941 txdl_priv->align_dma_offset += size; 942 return XGE_HAL_OK; 943 } 944 945 /** 946 * xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the 947 * single physically contiguous buffer. 948 * 949 * @channelh: Channel handle. 950 * @dtrh: Descriptor handle. 951 * @frag_idx: Index of the data buffer in the Txdl list. 952 * 953 * This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares 954 * a descriptor that consists of a single physically contiguous buffer 955 * which inturn contains the contents of one or more virtually contiguous 956 * buffers received from the upper layer. 957 * 958 * See Also: xge_hal_fifo_dtr_buffer_append(). 959 */ 960 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 961 xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 962 int frag_idx) 963 { 964 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 965 xge_hal_fifo_txdl_priv_t *txdl_priv; 966 xge_hal_fifo_txd_t *txdp; 967 ptrdiff_t prev_boff; 968 969 txdl_priv = __hal_fifo_txdl_priv(dtrh); 970 txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags; 971 972 xge_assert(frag_idx < fifo->config->max_frags); 973 974 if (frag_idx != 0) { 975 txdp->control_1 = txdp->control_2 = 0; 976 } 977 978 prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; 979 txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff; 980 txdp->control_1 |= 981 XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset); 982 fifo->channel.stats.total_buffers++; 983 txdl_priv->frags++; 984 txdl_priv->align_used_frags++; 985 986 #if defined(XGE_OS_DMA_REQUIRES_SYNC) 987 /* sync pre-mapped buffer */ 988 xge_os_dma_sync(fifo->channel.pdev, 989 txdl_priv->align_dma_handle, 990 txdp->buffer_pointer, 991 0, 992 txdl_priv->align_dma_offset, 993 XGE_OS_DMA_DIR_TODEVICE); 994 #endif 995 996 /* increment vaddr_start for the next buffer_append() iteration */ 997 txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset; 998 txdl_priv->align_dma_offset = 0; 999 } 1000 1001 /** 1002 * xge_hal_fifo_dtr_buffer_set - Set transmit buffer pointer in the 1003 * descriptor. 1004 * @channelh: Channel handle. 1005 * @dtrh: Descriptor handle. 1006 * @frag_idx: Index of the data buffer in the caller's scatter-gather list� 1007 * (of buffers). 1008 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. 1009 * @size: Size of the data buffer (in bytes). 1010 * 1011 * This API is part of the preparation of the transmit descriptor for posting 1012 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include 1013 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits(). 1014 * All three APIs fill in the fields of the fifo descriptor, 1015 * in accordance with the Xframe specification. 1016 * 1017 * See also: xge_hal_fifo_dtr_buffer_set_aligned(), 1018 * xge_hal_check_alignment(). 1019 * 1020 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(), 1021 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits() 1022 * Prepare transmit descriptor for transmission (via 1023 * xge_hal_fifo_dtr_post()). 1024 * See also: xge_hal_fifo_dtr_vlan_set(). 1025 * Note: Compare with xge_hal_fifo_dtr_buffer_set_aligned(). 1026 * 1027 * Usage: See ex_xmit{}. 1028 */ 1029 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 1030 xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 1031 int frag_idx, dma_addr_t dma_pointer, int size) 1032 { 1033 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh; 1034 xge_hal_fifo_txdl_priv_t *txdl_priv; 1035 xge_hal_fifo_txd_t *txdp; 1036 1037 txdl_priv = __hal_fifo_txdl_priv(dtrh); 1038 txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags; 1039 1040 if (frag_idx != 0) { 1041 txdp->control_1 = txdp->control_2 = 0; 1042 } 1043 1044 /* Note: 1045 * it is the responsibility of upper layers and not HAL 1046 * detect it and skip zero-size fragment 1047 */ 1048 xge_assert(size > 0); 1049 xge_assert(frag_idx < txdl_priv->alloc_frags); 1050 1051 txdp->buffer_pointer = (u64)dma_pointer; 1052 txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(size); 1053 fifo->channel.stats.total_buffers++; 1054 txdl_priv->frags++; 1055 } 1056 1057 /** 1058 * xge_hal_fifo_dtr_mss_set - Set MSS. 1059 * @dtrh: Descriptor handle. 1060 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the 1061 * ULD, which in turn inserts the MSS into the @dtrh. 1062 * 1063 * This API is part of the preparation of the transmit descriptor for posting 1064 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include 1065 * xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_buffer_set_aligned(), 1066 * and xge_hal_fifo_dtr_cksum_set_bits(). 1067 * All these APIs fill in the fields of the fifo descriptor, 1068 * in accordance with the Xframe specification. 1069 * 1070 * See also: xge_hal_fifo_dtr_reserve(), 1071 * xge_hal_fifo_dtr_post(), xge_hal_fifo_dtr_vlan_set(). 1072 * Usage: See ex_xmit{}. 1073 */ 1074 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 1075 xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss) 1076 { 1077 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 1078 1079 txdp->control_1 |= XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO); 1080 txdp->control_1 |= XGE_HAL_TXD_TCP_LSO_MSS(mss); 1081 } 1082 1083 /** 1084 * xge_hal_fifo_dtr_cksum_set_bits - Offload checksum. 1085 * @dtrh: Descriptor handle. 1086 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4, 1087 * and/or TCP and/or UDP. 1088 * 1089 * Ask Xframe to calculate IPv4 & transport checksums for _this_ transmit 1090 * descriptor. 1091 * This API is part of the preparation of the transmit descriptor for posting 1092 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include 1093 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_buffer_set_aligned(), 1094 * and xge_hal_fifo_dtr_buffer_set(). 1095 * All these APIs fill in the fields of the fifo descriptor, 1096 * in accordance with the Xframe specification. 1097 * 1098 * See also: xge_hal_fifo_dtr_reserve(), 1099 * xge_hal_fifo_dtr_post(), XGE_HAL_TXD_TX_CKO_IPV4_EN, 1100 * XGE_HAL_TXD_TX_CKO_TCP_EN. 1101 * Usage: See ex_xmit{}. 1102 */ 1103 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 1104 xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits) 1105 { 1106 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 1107 1108 txdp->control_2 |= cksum_bits; 1109 } 1110 1111 1112 /** 1113 * xge_hal_fifo_dtr_vlan_set - Set VLAN tag. 1114 * @dtrh: Descriptor handle. 1115 * @vlan_tag: 16bit VLAN tag. 1116 * 1117 * Insert VLAN tag into specified transmit descriptor. 1118 * The actual insertion of the tag into outgoing frame is done by the hardware. 1119 * See also: xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_mss_set(). 1120 */ 1121 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void 1122 xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag) 1123 { 1124 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 1125 1126 txdp->control_2 |= XGE_HAL_TXD_VLAN_ENABLE; 1127 txdp->control_2 |= XGE_HAL_TXD_VLAN_TAG(vlan_tag); 1128 } 1129 1130 1131