1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright (c) 2002-2006 Neterion, Inc. 22 */ 23 24 #ifdef XGE_DEBUG_FP 25 #include "xgehal-ring.h" 26 #endif 27 28 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t* 29 __hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh) 30 { 31 32 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 33 xge_hal_ring_rxd_priv_t *rxd_priv; 34 35 xge_assert(rxdp); 36 37 #if defined(XGE_HAL_USE_5B_MODE) 38 xge_assert(ring); 39 if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { 40 xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh; 41 #if defined (XGE_OS_PLATFORM_64BIT) 42 int memblock_idx = rxdp_5->host_control >> 16; 43 int i = rxdp_5->host_control & 0xFFFF; 44 rxd_priv = (xge_hal_ring_rxd_priv_t *) 45 ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i); 46 #else 47 /* 32-bit case */ 48 rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control; 49 #endif 50 } else 51 #endif 52 { 53 rxd_priv = (xge_hal_ring_rxd_priv_t *) 54 (ulong_t)rxdp->host_control; 55 } 56 57 xge_assert(rxd_priv); 58 xge_assert(rxd_priv->dma_object); 59 60 xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle); 61 62 xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset == 63 rxd_priv->dma_addr); 64 65 return rxd_priv; 66 } 67 68 __HAL_STATIC_RING __HAL_INLINE_RING int 69 __hal_ring_block_memblock_idx(xge_hal_ring_block_t *block) 70 { 71 return (int)*((u64 *)(void *)((char *)block + 72 XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)); 73 } 74 75 __HAL_STATIC_RING __HAL_INLINE_RING void 76 __hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx) 77 { 78 *((u64 *)(void *)((char *)block + 79 XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) = 80 memblock_idx; 81 } 82 83 84 __HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t 85 __hal_ring_block_next_pointer(xge_hal_ring_block_t *block) 86 { 87 return (dma_addr_t)*((u64 *)(void *)((char *)block + 88 XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)); 89 } 90 91 __HAL_STATIC_RING __HAL_INLINE_RING void 92 __hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block, 93 dma_addr_t dma_next) 94 { 95 *((u64 *)(void *)((char *)block + 96 XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; 97 } 98 99 /** 100 * xge_hal_ring_dtr_private - Get ULD private per-descriptor data. 101 * @channelh: Channel handle. 102 * @dtrh: Descriptor handle. 103 * 104 * Returns: private ULD info associated with the descriptor. 105 * ULD requests per-descriptor space via xge_hal_channel_open(). 106 * 107 * See also: xge_hal_fifo_dtr_private(). 108 * Usage: See ex_rx_compl{}. 109 */ 110 __HAL_STATIC_RING __HAL_INLINE_RING void* 111 xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 112 { 113 return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) + 114 sizeof(xge_hal_ring_rxd_priv_t); 115 } 116 117 /** 118 * xge_hal_ring_dtr_reserve - Reserve ring descriptor. 119 * @channelh: Channel handle. 120 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter 121 * with a valid handle. 122 * 123 * Reserve Rx descriptor for the subsequent filling-in (by upper layer 124 * driver (ULD)) and posting on the corresponding channel (@channelh) 125 * via xge_hal_ring_dtr_post(). 126 * 127 * Returns: XGE_HAL_OK - success. 128 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. 129 * 130 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(), 131 * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}. 132 * Usage: See ex_post_all_rx{}. 133 */ 134 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e 135 xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh) 136 { 137 xge_hal_status_e status; 138 #if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) 139 unsigned long flags; 140 #endif 141 142 #if defined(XGE_HAL_RX_MULTI_RESERVE) 143 xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock); 144 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) 145 xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock, 146 flags); 147 #endif 148 149 status = __hal_channel_dtr_alloc(channelh, dtrh); 150 151 #if defined(XGE_HAL_RX_MULTI_RESERVE) 152 xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock); 153 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ) 154 xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock, 155 flags); 156 #endif 157 158 if (status == XGE_HAL_OK) { 159 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh; 160 161 /* instead of memset: reset this RxD */ 162 rxdp->control_1 = rxdp->control_2 = 0; 163 164 #if defined(XGE_OS_MEMORY_CHECK) 165 __hal_ring_rxd_priv(channelh, rxdp)->allocated = 1; 166 #endif 167 } 168 169 return status; 170 } 171 172 /** 173 * xge_hal_ring_dtr_info_get - Get extended information associated with 174 * a completed receive descriptor for 1b mode. 175 * @channelh: Channel handle. 176 * @dtrh: Descriptor handle. 177 * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL. 178 * 179 * Retrieve extended information associated with a completed receive descriptor. 180 * 181 * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(), 182 * xge_hal_ring_dtr_5b_get(). 183 */ 184 __HAL_STATIC_RING __HAL_INLINE_RING void 185 xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 186 xge_hal_dtr_info_t *ext_info) 187 { 188 /* cast to 1-buffer mode RxD: the code below relies on the fact 189 * that control_1 and control_2 are formatted the same way.. */ 190 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 191 192 ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1); 193 ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1); 194 ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1); 195 ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1); 196 ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2); 197 198 /* Herc only, a few extra cycles imposed on Xena and/or 199 * when RTH is not enabled. 200 * Alternatively, could check 201 * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */ 202 ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1); 203 ext_info->rth_spdm_hit = 204 XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1); 205 ext_info->rth_hash_type = 206 XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1); 207 ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2); 208 } 209 210 /** 211 * xge_hal_ring_dtr_info_nb_get - Get extended information associated 212 * with a completed receive descriptor for 3b or 5b 213 * modes. 214 * @channelh: Channel handle. 215 * @dtrh: Descriptor handle. 216 * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL. 217 * 218 * Retrieve extended information associated with a completed receive descriptor. 219 * 220 * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(), 221 * xge_hal_ring_dtr_5b_get(). 222 */ 223 __HAL_STATIC_RING __HAL_INLINE_RING void 224 xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 225 xge_hal_dtr_info_t *ext_info) 226 { 227 /* cast to 1-buffer mode RxD: the code below relies on the fact 228 * that control_1 and control_2 are formatted the same way.. */ 229 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 230 231 ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1); 232 ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1); 233 ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1); 234 ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1); 235 ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2); 236 /* Herc only, a few extra cycles imposed on Xena and/or 237 * when RTH is not enabled. Same comment as above. */ 238 ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1); 239 ext_info->rth_spdm_hit = 240 XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1); 241 ext_info->rth_hash_type = 242 XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1); 243 ext_info->rth_value = (u32)rxdp->buffer0_ptr; 244 } 245 246 /** 247 * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor. 248 * @dtrh: Descriptor handle. 249 * @dma_pointer: DMA address of a single receive buffer this descriptor 250 * should carry. Note that by the time 251 * xge_hal_ring_dtr_1b_set 252 * is called, the receive buffer should be already mapped 253 * to the corresponding Xframe device. 254 * @size: Size of the receive @dma_pointer buffer. 255 * 256 * Prepare 1-buffer-mode Rx descriptor for posting 257 * (via xge_hal_ring_dtr_post()). 258 * 259 * This inline helper-function does not return any parameters and always 260 * succeeds. 261 * 262 * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set(). 263 * Usage: See ex_post_all_rx{}. 264 */ 265 __HAL_STATIC_RING __HAL_INLINE_RING void 266 xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size) 267 { 268 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 269 rxdp->buffer0_ptr = dma_pointer; 270 rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE); 271 rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size); 272 } 273 274 /** 275 * xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf 276 * descriptor. 277 * @channelh: Channel handle. 278 * @dtrh: Descriptor handle. 279 * @dma_pointer: DMA address of a single receive buffer _this_ descriptor 280 * carries. Returned by HAL. 281 * @pkt_length: Length (in bytes) of the data in the buffer pointed by 282 * @dma_pointer. Returned by HAL. 283 * 284 * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. 285 * This inline helper-function uses completed descriptor to populate receive 286 * buffer pointer and other "out" parameters. The function always succeeds. 287 * 288 * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). 289 * Usage: See ex_rx_compl{}. 290 */ 291 __HAL_STATIC_RING __HAL_INLINE_RING void 292 xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 293 dma_addr_t *dma_pointer, int *pkt_length) 294 { 295 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 296 297 *pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2); 298 *dma_pointer = rxdp->buffer0_ptr; 299 300 ((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length; 301 } 302 303 /** 304 * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor. 305 * @dtrh: Descriptor handle. 306 * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers 307 * _this_ descriptor should carry. 308 * Note that by the time xge_hal_ring_dtr_3b_set 309 * is called, the receive buffers should be mapped 310 * to the corresponding Xframe device. 311 * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per 312 * buffer from @dma_pointers. 313 * 314 * Prepare 3-buffer-mode Rx descriptor for posting (via 315 * xge_hal_ring_dtr_post()). 316 * This inline helper-function does not return any parameters and always 317 * succeeds. 318 * 319 * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set(). 320 */ 321 __HAL_STATIC_RING __HAL_INLINE_RING void 322 xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], 323 int sizes[]) 324 { 325 xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh; 326 rxdp->buffer0_ptr = dma_pointers[0]; 327 rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE); 328 rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]); 329 rxdp->buffer1_ptr = dma_pointers[1]; 330 rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE); 331 rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]); 332 rxdp->buffer2_ptr = dma_pointers[2]; 333 rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE); 334 rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]); 335 } 336 337 /** 338 * xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf 339 * descriptor. 340 * @channelh: Channel handle. 341 * @dtrh: Descriptor handle. 342 * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor 343 * carries. The first two buffers contain ethernet and 344 * (IP + transport) headers. The 3rd buffer contains packet 345 * data. 346 * Returned by HAL. 347 * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per 348 * buffer from @dma_pointers. Returned by HAL. 349 * 350 * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor. 351 * This inline helper-function uses completed descriptor to populate receive 352 * buffer pointer and other "out" parameters. The function always succeeds. 353 * 354 * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). 355 */ 356 __HAL_STATIC_RING __HAL_INLINE_RING void 357 xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 358 dma_addr_t dma_pointers[], int sizes[]) 359 { 360 xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh; 361 362 dma_pointers[0] = rxdp->buffer0_ptr; 363 sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2); 364 365 dma_pointers[1] = rxdp->buffer1_ptr; 366 sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2); 367 368 dma_pointers[2] = rxdp->buffer2_ptr; 369 sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2); 370 371 ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] + 372 sizes[2]; 373 } 374 375 /** 376 * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor. 377 * @dtrh: Descriptor handle. 378 * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers 379 * _this_ descriptor should carry. 380 * Note that by the time xge_hal_ring_dtr_5b_set 381 * is called, the receive buffers should be mapped 382 * to the corresponding Xframe device. 383 * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per 384 * buffer from @dma_pointers. 385 * 386 * Prepare 3-buffer-mode Rx descriptor for posting (via 387 * xge_hal_ring_dtr_post()). 388 * This inline helper-function does not return any parameters and always 389 * succeeds. 390 * 391 * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set(). 392 */ 393 __HAL_STATIC_RING __HAL_INLINE_RING void 394 xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[], 395 int sizes[]) 396 { 397 xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh; 398 rxdp->buffer0_ptr = dma_pointers[0]; 399 rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE); 400 rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]); 401 rxdp->buffer1_ptr = dma_pointers[1]; 402 rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE); 403 rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]); 404 rxdp->buffer2_ptr = dma_pointers[2]; 405 rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE); 406 rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]); 407 rxdp->buffer3_ptr = dma_pointers[3]; 408 rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE); 409 rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]); 410 rxdp->buffer4_ptr = dma_pointers[4]; 411 rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE); 412 rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]); 413 } 414 415 /** 416 * xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf 417 * descriptor. 418 * @channelh: Channel handle. 419 * @dtrh: Descriptor handle. 420 * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor 421 * carries. The first 4 buffers contains L2 (ethernet) through 422 * L5 headers. The 5th buffer contain received (applicaion) 423 * data. Returned by HAL. 424 * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per 425 * buffer from @dma_pointers. Returned by HAL. 426 * 427 * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor. 428 * This inline helper-function uses completed descriptor to populate receive 429 * buffer pointer and other "out" parameters. The function always succeeds. 430 * 431 * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get(). 432 */ 433 __HAL_STATIC_RING __HAL_INLINE_RING void 434 xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, 435 dma_addr_t dma_pointers[], int sizes[]) 436 { 437 xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh; 438 439 dma_pointers[0] = rxdp->buffer0_ptr; 440 sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2); 441 442 dma_pointers[1] = rxdp->buffer1_ptr; 443 sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2); 444 445 dma_pointers[2] = rxdp->buffer2_ptr; 446 sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2); 447 448 dma_pointers[3] = rxdp->buffer3_ptr; 449 sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3); 450 451 dma_pointers[4] = rxdp->buffer4_ptr; 452 sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3); 453 454 ((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] + 455 sizes[2] + sizes[3] + sizes[4]; 456 } 457 458 459 /** 460 * xge_hal_ring_dtr_pre_post - FIXME. 461 * @channelh: Channel handle. 462 * @dtrh: Descriptor handle. 463 * 464 * TBD 465 */ 466 __HAL_STATIC_RING __HAL_INLINE_RING void 467 xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 468 { 469 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 470 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 471 xge_hal_ring_rxd_priv_t *priv; 472 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; 473 #endif 474 #if defined(XGE_HAL_RX_MULTI_POST_IRQ) 475 unsigned long flags; 476 #endif 477 478 rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED; 479 480 #ifdef XGE_DEBUG_ASSERT 481 /* make sure Xena overwrites the (illegal) t_code on completion */ 482 XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C); 483 #endif 484 485 xge_debug_ring(XGE_TRACE, "posted %d rxd 0x"XGE_OS_LLXFMT" post_qid %d", 486 ((xge_hal_ring_t *)channelh)->channel.post_index, 487 (unsigned long long)(ulong_t)dtrh, 488 ((xge_hal_ring_t *)channelh)->channel.post_qid); 489 490 #if defined(XGE_HAL_RX_MULTI_POST) 491 xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock); 492 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ) 493 xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock, 494 flags); 495 #endif 496 497 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER) 498 { 499 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 500 501 if (channel->post_index != 0) { 502 xge_hal_dtr_h prev_dtrh; 503 xge_hal_ring_rxd_priv_t *rxdp_priv; 504 505 rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp); 506 prev_dtrh = channel->work_arr[channel->post_index - 1]; 507 508 if (prev_dtrh != NULL && 509 (rxdp_priv->dma_offset & (~0xFFF)) != 510 rxdp_priv->dma_offset) { 511 xge_assert((char *)prev_dtrh + 512 ((xge_hal_ring_t*)channel)->rxd_size == dtrh); 513 } 514 } 515 } 516 #endif 517 518 __hal_channel_dtr_post(channelh, dtrh); 519 520 #if defined(XGE_HAL_RX_MULTI_POST) 521 xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock); 522 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ) 523 xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock, 524 flags); 525 #endif 526 } 527 528 529 /** 530 * xge_hal_ring_dtr_post_post - FIXME. 531 * @channelh: Channel handle. 532 * @dtrh: Descriptor handle. 533 * 534 * TBD 535 */ 536 __HAL_STATIC_RING __HAL_INLINE_RING void 537 xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 538 { 539 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 540 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; 541 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 542 xge_hal_ring_rxd_priv_t *priv; 543 #endif 544 /* do POST */ 545 rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME; 546 547 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 548 priv = __hal_ring_rxd_priv(ring, rxdp); 549 xge_os_dma_sync(ring->channel.pdev, 550 priv->dma_handle, priv->dma_addr, 551 priv->dma_offset, ring->rxd_size, 552 XGE_OS_DMA_DIR_TODEVICE); 553 #endif 554 if (ring->channel.usage_cnt > 0) 555 ring->channel.usage_cnt--; 556 } 557 558 /** 559 * xge_hal_ring_dtr_post - Post descriptor on the ring channel. 560 * @channelh: Channel handle. 561 * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve(). 562 * 563 * Post descriptor on the 'ring' type channel. 564 * Prior to posting the descriptor should be filled in accordance with 565 * Host/Xframe interface specification for a given service (LL, etc.). 566 * 567 * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post(). 568 * Usage: See ex_post_all_rx{}. 569 */ 570 __HAL_STATIC_RING __HAL_INLINE_RING void 571 xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 572 { 573 xge_hal_ring_dtr_pre_post(channelh, dtrh); 574 xge_hal_ring_dtr_post_post(channelh, dtrh); 575 } 576 577 /** 578 * xge_hal_ring_dtr_next_completed - Get the _next_ completed 579 * descriptor. 580 * @channelh: Channel handle. 581 * @dtrh: Descriptor handle. Returned by HAL. 582 * @t_code: Transfer code, as per Xframe User Guide, 583 * Receive Descriptor Format. Returned by HAL. 584 * 585 * Retrieve the _next_ completed descriptor. 586 * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy 587 * upper-layer driver (ULD) of new completed descriptors. After that 588 * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest 589 * completions (the very first completion is passed by HAL via 590 * xge_hal_channel_callback_f). 591 * 592 * Implementation-wise, the upper-layer driver is free to call 593 * xge_hal_ring_dtr_next_completed either immediately from inside the 594 * channel callback, or in a deferred fashion and separate (from HAL) 595 * context. 596 * 597 * Non-zero @t_code means failure to fill-in receive buffer(s) 598 * of the descriptor. 599 * For instance, parity error detected during the data transfer. 600 * In this case Xframe will complete the descriptor and indicate 601 * for the host that the received data is not to be used. 602 * For details please refer to Xframe User Guide. 603 * 604 * Returns: XGE_HAL_OK - success. 605 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 606 * are currently available for processing. 607 * 608 * See also: xge_hal_channel_callback_f{}, 609 * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}. 610 * Usage: See ex_rx_compl{}. 611 */ 612 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e 613 xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh, 614 u8 *t_code) 615 { 616 xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ 617 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; 618 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 619 xge_hal_ring_rxd_priv_t *priv; 620 #endif 621 622 __hal_channel_dtr_try_complete(ring, dtrh); 623 rxdp = (xge_hal_ring_rxd_1_t *)*dtrh; 624 if (rxdp == NULL) { 625 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 626 } 627 628 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING) 629 /* Note: 24 bytes at most means: 630 * - Control_3 in case of 5-buffer mode 631 * - Control_1 and Control_2 632 * 633 * This is the only length needs to be invalidated 634 * type of channels.*/ 635 priv = __hal_ring_rxd_priv(ring, rxdp); 636 xge_os_dma_sync(ring->channel.pdev, 637 priv->dma_handle, priv->dma_addr, 638 priv->dma_offset, 24, 639 XGE_OS_DMA_DIR_FROMDEVICE); 640 #endif 641 642 /* check whether it is not the end */ 643 if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) && 644 !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) { 645 #ifndef XGE_HAL_IRQ_POLLING 646 if (++ring->cmpl_cnt > ring->indicate_max_pkts) { 647 /* reset it. since we don't want to return 648 * garbage to the ULD */ 649 *dtrh = 0; 650 return XGE_HAL_COMPLETIONS_REMAIN; 651 } 652 #endif 653 654 #ifdef XGE_DEBUG_ASSERT 655 #if defined(XGE_HAL_USE_5B_MODE) 656 #if !defined(XGE_OS_PLATFORM_64BIT) 657 if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { 658 xge_assert(((xge_hal_ring_rxd_5_t *) 659 rxdp)->host_control!=0); 660 } 661 #endif 662 663 #else 664 xge_assert(rxdp->host_control!=0); 665 #endif 666 #endif 667 668 __hal_channel_dtr_complete(ring); 669 670 *t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1); 671 672 /* see XGE_HAL_SET_RXD_T_CODE() above.. */ 673 xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C); 674 675 xge_debug_ring(XGE_TRACE, 676 "compl_index %d post_qid %d rxd 0x"XGE_OS_LLXFMT, 677 ((xge_hal_channel_t*)ring)->compl_index, 678 ((xge_hal_channel_t*)ring)->post_qid, 679 (unsigned long long)(ulong_t)rxdp); 680 681 ring->channel.usage_cnt++; 682 if (ring->channel.stats.usage_max < ring->channel.usage_cnt) 683 ring->channel.stats.usage_max = ring->channel.usage_cnt; 684 685 return XGE_HAL_OK; 686 } 687 688 /* reset it. since we don't want to return 689 * garbage to the ULD */ 690 *dtrh = 0; 691 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 692 } 693 694 /** 695 * xge_hal_ring_dtr_free - Free descriptor. 696 * @channelh: Channel handle. 697 * @dtrh: Descriptor handle. 698 * 699 * Free the reserved descriptor. This operation is "symmetrical" to 700 * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's 701 * lifecycle. 702 * 703 * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can 704 * be: 705 * 706 * - reserved (xge_hal_ring_dtr_reserve); 707 * 708 * - posted (xge_hal_ring_dtr_post); 709 * 710 * - completed (xge_hal_ring_dtr_next_completed); 711 * 712 * - and recycled again (xge_hal_ring_dtr_free). 713 * 714 * For alternative state transitions and more details please refer to 715 * the design doc. 716 * 717 * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free(). 718 * Usage: See ex_rx_compl{}. 719 */ 720 __HAL_STATIC_RING __HAL_INLINE_RING void 721 xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh) 722 { 723 #if defined(XGE_HAL_RX_MULTI_FREE_IRQ) 724 unsigned long flags; 725 #endif 726 727 #if defined(XGE_HAL_RX_MULTI_FREE) 728 xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock); 729 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ) 730 xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock, 731 flags); 732 #endif 733 734 __hal_channel_dtr_free(channelh, dtrh); 735 #if defined(XGE_OS_MEMORY_CHECK) 736 __hal_ring_rxd_priv(channelh, dtrh)->allocated = 0; 737 #endif 738 739 #if defined(XGE_HAL_RX_MULTI_FREE) 740 xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock); 741 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ) 742 xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock, 743 flags); 744 #endif 745 } 746 747 /** 748 * xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed 749 * @channelh: Channel handle. 750 * 751 * Checks if the the _next_ completed descriptor is in host memory 752 * 753 * Returns: XGE_HAL_OK - success. 754 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 755 * are currently available for processing. 756 */ 757 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e 758 xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh) 759 { 760 xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */ 761 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh; 762 xge_hal_dtr_h dtrh; 763 764 __hal_channel_dtr_try_complete(ring, &dtrh); 765 rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 766 if (rxdp == NULL) { 767 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 768 } 769 770 /* check whether it is not the end */ 771 if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) && 772 !(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) { 773 774 #ifdef XGE_DEBUG_ASSERT 775 #if defined(XGE_HAL_USE_5B_MODE) 776 #if !defined(XGE_OS_PLATFORM_64BIT) 777 if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) { 778 xge_assert(((xge_hal_ring_rxd_5_t *) 779 rxdp)->host_control!=0); 780 } 781 #endif 782 783 #else 784 xge_assert(rxdp->host_control!=0); 785 #endif 786 #endif 787 return XGE_HAL_OK; 788 } 789 790 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; 791 } 792