xref: /illumos-gate/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c (revision 65a89a64c60f3061bbe2381edaacc81660af9a95)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  *  Copyright (c) 2002-2005 Neterion, Inc.
24  *  All right Reserved.
25  *
26  *  FileName :    xgehal-ring-fp.c
27  *
28  *  Description:  HAL Rx ring object functionality (fast path)
29  *
30  *  Created:      10 June 2004
31  */
32 
33 #ifdef XGE_DEBUG_FP
34 #include "xgehal-ring.h"
35 #endif
36 
37 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
38 __hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
39 {
40 
41 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
42 	xge_hal_ring_rxd_priv_t *rxd_priv;
43 
44 	xge_assert(rxdp);
45 
46 #if defined(XGE_HAL_USE_5B_MODE)
47 	xge_assert(ring);
48 	if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
49 		xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
50 #if defined (XGE_OS_PLATFORM_64BIT)
51 		int memblock_idx = rxdp_5->host_control >> 16;
52 		int i = rxdp_5->host_control & 0xFFFF;
53 		rxd_priv = (xge_hal_ring_rxd_priv_t *)
54 			((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
55 #else
56 		/* 32-bit case */
57 		rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
58 #endif
59 	} else
60 #endif
61 	{
62 		rxd_priv = (xge_hal_ring_rxd_priv_t *)
63 				(ulong_t)rxdp->host_control;
64 	}
65 
66 	xge_assert(rxd_priv);
67 	xge_assert(rxd_priv->dma_object);
68 
69 	xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
70 
71 	xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
72 							rxd_priv->dma_addr);
73 
74 	return rxd_priv;
75 }
76 
77 __HAL_STATIC_RING __HAL_INLINE_RING int
78 __hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
79 {
80        return (int)*((u64 *)(void *)((char *)block +
81                                XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
82 }
83 
84 __HAL_STATIC_RING __HAL_INLINE_RING void
85 __hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
86 {
87        *((u64 *)(void *)((char *)block +
88                        XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
89                        memblock_idx;
90 }
91 
92 
93 __HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
94 __hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
95 {
96 	return (dma_addr_t)*((u64 *)(void *)((char *)block +
97 			XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
98 }
99 
100 __HAL_STATIC_RING __HAL_INLINE_RING void
101 __hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
102 			dma_addr_t dma_next)
103 {
104 	*((u64 *)(void *)((char *)block +
105 			  XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
106 }
107 
108 /**
109  * xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
110  * @channelh: Channel handle.
111  * @dtrh: Descriptor handle.
112  *
113  * Returns: private ULD info associated with the descriptor.
114  * ULD requests per-descriptor space via xge_hal_channel_open().
115  *
116  * See also: xge_hal_fifo_dtr_private().
117  * Usage: See ex_rx_compl{}.
118  */
119 __HAL_STATIC_RING __HAL_INLINE_RING void*
120 xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
121 {
122 	return (char *)__hal_ring_rxd_priv(channelh, dtrh) +
123 					sizeof(xge_hal_ring_rxd_priv_t);
124 }
125 
126 /**
127  * xge_hal_ring_dtr_reserve - Reserve ring descriptor.
128  * @channelh: Channel handle.
129  * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
130  *        with a valid handle.
131  *
132  * Reserve Rx descriptor for the subsequent filling-in (by upper layer
133  * driver (ULD)) and posting on the corresponding channel (@channelh)
134  * via xge_hal_ring_dtr_post().
135  *
136  * Returns: XGE_HAL_OK - success.
137  * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
138  *
139  * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
140  * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
141  * Usage: See ex_post_all_rx{}.
142  */
143 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
144 xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
145 {
146 	xge_hal_status_e status;
147 #if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
148 	unsigned long flags;
149 #endif
150 
151 #if defined(XGE_HAL_RX_MULTI_RESERVE)
152 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
153 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
154 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
155 	flags);
156 #endif
157 
158 	status = __hal_channel_dtr_alloc(channelh, dtrh);
159 
160 #if defined(XGE_HAL_RX_MULTI_RESERVE)
161 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
162 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
163 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
164 			     flags);
165 #endif
166 
167 	if (status == XGE_HAL_OK) {
168 		xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
169 
170 		/* instead of memset: reset this RxD */
171 		rxdp->control_1 = rxdp->control_2 = 0;
172 
173 #if defined(XGE_OS_MEMORY_CHECK)
174 		__hal_ring_rxd_priv(channelh, rxdp)->allocated = 1;
175 #endif
176 	}
177 
178 	return status;
179 }
180 
181 /**
182  * xge_hal_ring_dtr_info_get - Get extended information associated with
183  * a completed receive descriptor for 1b mode.
184  * @channelh: Channel handle.
185  * @dtrh: Descriptor handle.
186  * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
187  *
188  * Retrieve extended information associated with a completed receive descriptor.
189  *
190  * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
191  * xge_hal_ring_dtr_5b_get().
192  */
193 __HAL_STATIC_RING __HAL_INLINE_RING void
194 xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
195 			xge_hal_dtr_info_t *ext_info)
196 {
197 	/* cast to 1-buffer mode RxD: the code below relies on the fact
198 	 * that control_1 and control_2 are formatted the same way.. */
199 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
200 
201 	ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
202 	ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
203         ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
204         ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
205 	ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
206 
207 	/* Herc only, a few extra cycles imposed on Xena and/or
208 	 * when RTH is not enabled.
209 	 * Alternatively, could check
210 	 * xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
211 	ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
212 	ext_info->rth_spdm_hit =
213 	XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
214 	ext_info->rth_hash_type =
215 	XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
216 	ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
217 }
218 
219 /**
220  * xge_hal_ring_dtr_info_nb_get - Get extended information associated
221  * with a completed receive descriptor for 3b or 5b
222  * modes.
223  * @channelh: Channel handle.
224  * @dtrh: Descriptor handle.
225  * @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
226  *
227  * Retrieve extended information associated with a completed receive descriptor.
228  *
229  * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
230  *           xge_hal_ring_dtr_5b_get().
231  */
232 __HAL_STATIC_RING __HAL_INLINE_RING void
233 xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
234 			xge_hal_dtr_info_t *ext_info)
235 {
236 	/* cast to 1-buffer mode RxD: the code below relies on the fact
237 	 * that control_1 and control_2 are formatted the same way.. */
238 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
239 
240 	ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
241 	ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
242         ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
243         ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
244         ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
245 	/* Herc only, a few extra cycles imposed on Xena and/or
246 	 * when RTH is not enabled. Same comment as above. */
247 	ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
248 	ext_info->rth_spdm_hit =
249 	XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
250 	ext_info->rth_hash_type =
251 	XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
252 	ext_info->rth_value = (u32)rxdp->buffer0_ptr;
253 }
254 
255 /**
256  * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
257  * @dtrh: Descriptor handle.
258  * @dma_pointer: DMA address of a single receive buffer this descriptor
259  *               should carry. Note that by the time
260  *               xge_hal_ring_dtr_1b_set
261  *               is called, the receive buffer should be already mapped
262  *               to the corresponding Xframe device.
263  * @size: Size of the receive @dma_pointer buffer.
264  *
265  * Prepare 1-buffer-mode Rx descriptor for posting
266  * (via xge_hal_ring_dtr_post()).
267  *
268  * This inline helper-function does not return any parameters and always
269  * succeeds.
270  *
271  * See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
272  * Usage: See ex_post_all_rx{}.
273  */
274 __HAL_STATIC_RING __HAL_INLINE_RING void
275 xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
276 {
277 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
278 	rxdp->buffer0_ptr = dma_pointer;
279 	rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
280 	rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
281 }
282 
283 /**
284  * xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf
285  * descriptor.
286  * @channelh: Channel handle.
287  * @dtrh: Descriptor handle.
288  * @dma_pointer: DMA address of a single receive buffer _this_ descriptor
289  *               carries. Returned by HAL.
290  * @pkt_length: Length (in bytes) of the data in the buffer pointed by
291  *              @dma_pointer. Returned by HAL.
292  *
293  * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
294  * This inline helper-function uses completed descriptor to populate receive
295  * buffer pointer and other "out" parameters. The function always succeeds.
296  *
297  * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
298  * Usage: See ex_rx_compl{}.
299  */
300 __HAL_STATIC_RING __HAL_INLINE_RING void
301 xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
302 		dma_addr_t *dma_pointer, int *pkt_length)
303 {
304 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
305 
306 	*pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
307 	*dma_pointer = rxdp->buffer0_ptr;
308 }
309 
310 /**
311  * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
312  * @dtrh: Descriptor handle.
313  * @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
314  *               _this_ descriptor should carry.
315  *               Note that by the time xge_hal_ring_dtr_3b_set
316  *               is called, the receive buffers should be mapped
317  *               to the corresponding Xframe device.
318  * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
319  *         buffer from @dma_pointers.
320  *
321  * Prepare 3-buffer-mode Rx descriptor for posting (via
322  * xge_hal_ring_dtr_post()).
323  * This inline helper-function does not return any parameters and always
324  * succeeds.
325  *
326  * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
327  */
328 __HAL_STATIC_RING __HAL_INLINE_RING void
329 xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
330 			int sizes[])
331 {
332 	xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
333 	rxdp->buffer0_ptr = dma_pointers[0];
334 	rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
335 	rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
336 	rxdp->buffer1_ptr = dma_pointers[1];
337 	rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
338 	rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
339 	rxdp->buffer2_ptr = dma_pointers[2];
340 	rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
341 	rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
342 }
343 
344 /**
345  * xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf
346  * descriptor.
347  * @channelh: Channel handle.
348  * @dtrh: Descriptor handle.
349  * @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
350  *                carries. The first two buffers contain ethernet and
351  *                (IP + transport) headers. The 3rd buffer contains packet
352  *                data.
353  *                Returned by HAL.
354  * @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
355  * buffer from @dma_pointers. Returned by HAL.
356  *
357  * Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
358  * This inline helper-function uses completed descriptor to populate receive
359  * buffer pointer and other "out" parameters. The function always succeeds.
360  *
361  * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
362  */
363 __HAL_STATIC_RING __HAL_INLINE_RING void
364 xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
365 		dma_addr_t dma_pointers[], int sizes[])
366 {
367 	xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
368 
369 	dma_pointers[0] = rxdp->buffer0_ptr;
370 	sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
371 
372 	dma_pointers[1] = rxdp->buffer1_ptr;
373 	sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
374 
375 	dma_pointers[2] = rxdp->buffer2_ptr;
376 	sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
377 }
378 
379 /**
380  * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
381  * @dtrh: Descriptor handle.
382  * @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
383  *               _this_ descriptor should carry.
384  *               Note that by the time xge_hal_ring_dtr_5b_set
385  *               is called, the receive buffers should be mapped
386  *               to the corresponding Xframe device.
387  * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
388  *         buffer from @dma_pointers.
389  *
390  * Prepare 3-buffer-mode Rx descriptor for posting (via
391  * xge_hal_ring_dtr_post()).
392  * This inline helper-function does not return any parameters and always
393  * succeeds.
394  *
395  * See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
396  */
397 __HAL_STATIC_RING __HAL_INLINE_RING void
398 xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
399 			int sizes[])
400 {
401 	xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
402 	rxdp->buffer0_ptr = dma_pointers[0];
403 	rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
404 	rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
405 	rxdp->buffer1_ptr = dma_pointers[1];
406 	rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
407 	rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
408 	rxdp->buffer2_ptr = dma_pointers[2];
409 	rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
410 	rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
411 	rxdp->buffer3_ptr = dma_pointers[3];
412 	rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
413 	rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
414 	rxdp->buffer4_ptr = dma_pointers[4];
415 	rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
416 	rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[4]);
417 }
418 
419 /**
420  * xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf
421  * descriptor.
422  * @channelh: Channel handle.
423  * @dtrh: Descriptor handle.
424  * @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
425  *                carries. The first 4 buffers contains L2 (ethernet) through
426  *                L5 headers. The 5th buffer contain received (applicaion)
427  *                data. Returned by HAL.
428  * @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
429  * buffer from @dma_pointers. Returned by HAL.
430  *
431  * Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
432  * This inline helper-function uses completed descriptor to populate receive
433  * buffer pointer and other "out" parameters. The function always succeeds.
434  *
435  * See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
436  */
437 __HAL_STATIC_RING __HAL_INLINE_RING void
438 xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
439 		dma_addr_t dma_pointers[], int sizes[])
440 {
441 	xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
442 
443 	dma_pointers[0] = rxdp->buffer0_ptr;
444 	sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
445 
446 	dma_pointers[1] = rxdp->buffer1_ptr;
447 	sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
448 
449 	dma_pointers[2] = rxdp->buffer2_ptr;
450 	sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
451 
452 	dma_pointers[3] = rxdp->buffer3_ptr;
453 	sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
454 
455 	dma_pointers[4] = rxdp->buffer4_ptr;
456 	sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
457 }
458 
459 
460 /**
461  * FIXME - document
462  */
463 __HAL_STATIC_RING __HAL_INLINE_RING void
464 xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
465 {
466 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
467 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
468 	xge_hal_ring_rxd_priv_t *priv;
469 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
470 #endif
471 #if defined(XGE_HAL_RX_MULTI_POST_IRQ)
472 	unsigned long flags;
473 #endif
474 
475 	rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
476 
477 #ifdef XGE_DEBUG_ASSERT
478         /* make sure Xena overwrites the (illegal) t_code on completion */
479         XGE_HAL_RXD_SET_T_CODE(rxdp->control_1,	XGE_HAL_RXD_T_CODE_UNUSED_C);
480 #endif
481 
482 	xge_debug_ring(XGE_TRACE, "posted %d rxd 0x%llx post_qid %d",
483 			((xge_hal_ring_t *)channelh)->channel.post_index,
484 			(unsigned long long)(ulong_t)dtrh,
485 			((xge_hal_ring_t *)channelh)->channel.post_qid);
486 
487 #if defined(XGE_HAL_RX_MULTI_POST)
488 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
489 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
490 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
491 	flags);
492 #endif
493 
494 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
495 	{
496 		xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
497 
498 		if (channel->post_index != 0) {
499 			xge_hal_dtr_h prev_dtrh;
500 			xge_hal_ring_rxd_priv_t *rxdp_priv;
501 
502 			rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp);
503 			prev_dtrh = channel->work_arr[channel->post_index - 1];
504 
505 			if ((rxdp_priv->dma_offset & (~0xFFF)) !=
506 						rxdp_priv->dma_offset) {
507 				xge_assert((char *)prev_dtrh +
508 				    ((xge_hal_ring_t*)channel)->rxd_size == dtrh);
509 			}
510 		}
511 	}
512 #endif
513 
514 	__hal_channel_dtr_post(channelh, dtrh);
515 
516 #if defined(XGE_HAL_RX_MULTI_POST)
517 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
518 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
519 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
520 			       flags);
521 #endif
522 }
523 
524 
525 /**
526  * FIXME - document
527  */
528 __HAL_STATIC_RING __HAL_INLINE_RING void
529 xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
530 {
531 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
532 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
533 	xge_hal_ring_rxd_priv_t *priv;
534 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
535 #endif
536 	/* do POST */
537 	rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
538 
539 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
540 	priv = __hal_ring_rxd_priv(ring, rxdp);
541 	xge_os_dma_sync(ring->channel.pdev,
542 	              priv->dma_handle, priv->dma_addr,
543 		      priv->dma_offset, ring->rxd_size,
544 		      XGE_OS_DMA_DIR_TODEVICE);
545 #endif
546 }
547 
548 /**
549  * xge_hal_ring_dtr_post - Post descriptor on the ring channel.
550  * @channelh: Channel handle.
551  * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
552  *
553  * Post descriptor on the 'ring' type channel.
554  * Prior to posting the descriptor should be filled in accordance with
555  * Host/Xframe interface specification for a given service (LL, etc.).
556  *
557  * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
558  * Usage: See ex_post_all_rx{}.
559  */
560 __HAL_STATIC_RING __HAL_INLINE_RING void
561 xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
562 {
563 	xge_hal_ring_dtr_pre_post(channelh, dtrh);
564 	xge_hal_ring_dtr_post_post(channelh, dtrh);
565 }
566 
567 /**
568  * xge_hal_ring_dtr_next_completed - Get the _next_ completed
569  * descriptor.
570  * @channelh: Channel handle.
571  * @dtrh: Descriptor handle. Returned by HAL.
572  * @t_code: Transfer code, as per Xframe User Guide,
573  *          Receive Descriptor Format. Returned by HAL.
574  *
575  * Retrieve the _next_ completed descriptor.
576  * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
577  * upper-layer driver (ULD) of new completed descriptors. After that
578  * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
579  * completions (the very first completion is passed by HAL via
580  * xge_hal_channel_callback_f).
581  *
582  * Implementation-wise, the upper-layer driver is free to call
583  * xge_hal_ring_dtr_next_completed either immediately from inside the
584  * channel callback, or in a deferred fashion and separate (from HAL)
585  * context.
586  *
587  * Non-zero @t_code means failure to fill-in receive buffer(s)
588  * of the descriptor.
589  * For instance, parity error detected during the data transfer.
590  * In this case Xframe will complete the descriptor and indicate
591  * for the host that the received data is not to be used.
592  * For details please refer to Xframe User Guide.
593  *
594  * Returns: XGE_HAL_OK - success.
595  * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
596  * are currently available for processing.
597  *
598  * See also: xge_hal_channel_callback_f{},
599  * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
600  * Usage: See ex_rx_compl{}.
601  */
602 __HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
603 xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
604 				u8 *t_code)
605 {
606 	xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
607 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
608 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
609 	xge_hal_ring_rxd_priv_t *priv;
610 #endif
611 
612 	__hal_channel_dtr_try_complete(ring, dtrh);
613 	rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
614 	if (rxdp == NULL) {
615 		return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
616 	}
617 
618 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
619 	/* Note: 24 bytes at most means:
620 	 *	- Control_3 in case of 5-buffer mode
621 	 *	- Control_1 and Control_2
622 	 *
623 	 * This is the only length needs to be invalidated
624 	 * type of channels.*/
625 	priv = __hal_ring_rxd_priv(ring, rxdp);
626 	xge_os_dma_sync(ring->channel.pdev,
627 	              priv->dma_handle, priv->dma_addr,
628 		      priv->dma_offset, 24,
629 		      XGE_OS_DMA_DIR_FROMDEVICE);
630 #endif
631 
632 	/* check whether it is not the end */
633 	if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
634 		!(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
635 #ifndef XGE_HAL_IRQ_POLLING
636 		if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
637 			/* reset it. since we don't want to return
638 			 * garbage to the ULD */
639 			*dtrh = 0;
640 			return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
641 		}
642 #endif
643 
644 #ifdef XGE_DEBUG_ASSERT
645 #if defined(XGE_HAL_USE_5B_MODE)
646 #if !defined(XGE_OS_PLATFORM_64BIT)
647 		if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
648 			xge_assert(((xge_hal_ring_rxd_5_t *)
649 					rxdp)->host_control!=0);
650 		}
651 #endif
652 
653 #else
654 		xge_assert(rxdp->host_control!=0);
655 #endif
656 #endif
657 
658 		__hal_channel_dtr_complete(ring);
659 
660 		*t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
661 
662                 /* see XGE_HAL_SET_RXD_T_CODE() above.. */
663 		xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
664 
665 		xge_debug_ring(XGE_TRACE,
666 			"compl_index %d post_qid %d rxd 0x%llx",
667 			((xge_hal_channel_t*)ring)->compl_index,
668 			((xge_hal_channel_t*)ring)->post_qid,
669 			(unsigned long long)(ulong_t)rxdp);
670 
671 		return XGE_HAL_OK;
672 	}
673 
674 	/* reset it. since we don't want to return
675 	 * garbage to the ULD */
676 	*dtrh = 0;
677 	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
678 }
679 
680 /**
681  * xge_hal_ring_dtr_free - Free descriptor.
682  * @channelh: Channel handle.
683  * @dtrh: Descriptor handle.
684  *
685  * Free the reserved descriptor. This operation is "symmetrical" to
686  * xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
687  * lifecycle.
688  *
689  * After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
690  * be:
691  *
692  * - reserved (xge_hal_ring_dtr_reserve);
693  *
694  * - posted (xge_hal_ring_dtr_post);
695  *
696  * - completed (xge_hal_ring_dtr_next_completed);
697  *
698  * - and recycled again (xge_hal_ring_dtr_free).
699  *
700  * For alternative state transitions and more details please refer to
701  * the design doc.
702  *
703  * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
704  * Usage: See ex_rx_compl{}.
705  */
706 __HAL_STATIC_RING __HAL_INLINE_RING void
707 xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
708 {
709 #if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
710 	unsigned long flags;
711 #endif
712 
713 #if defined(XGE_HAL_RX_MULTI_FREE)
714 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
715 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
716 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
717 	flags);
718 #endif
719 
720 	__hal_channel_dtr_free(channelh, dtrh);
721 #if defined(XGE_OS_MEMORY_CHECK)
722 	__hal_ring_rxd_priv(channelh, dtrh)->allocated = 0;
723 #endif
724 
725 #if defined(XGE_HAL_RX_MULTI_FREE)
726 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
727 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
728 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
729 	flags);
730 #endif
731 }
732