xref: /titanic_41/usr/src/uts/common/io/xge/hal/xgehal/xgehal-ring-fp.c (revision 7eced415e5dd557aef2d78483b5a7785f0e13670)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 #ifdef XGE_DEBUG_FP
25 #include "xgehal-ring.h"
26 #endif
27 
28 __HAL_STATIC_RING __HAL_INLINE_RING	xge_hal_ring_rxd_priv_t*
__hal_ring_rxd_priv(xge_hal_ring_t * ring,xge_hal_dtr_h dtrh)29 __hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h	dtrh)
30 {
31 
32 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
33 	xge_hal_ring_rxd_priv_t	*rxd_priv;
34 
35 	xge_assert(rxdp);
36 
37 #if	defined(XGE_HAL_USE_5B_MODE)
38 	xge_assert(ring);
39 	if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
40 		xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
41 #if	defined	(XGE_OS_PLATFORM_64BIT)
42 		int	memblock_idx = rxdp_5->host_control	>> 16;
43 		int	i =	rxdp_5->host_control & 0xFFFF;
44 		rxd_priv = (xge_hal_ring_rxd_priv_t	*)
45 			((char*)ring->mempool->memblocks_priv_arr[memblock_idx]	+ ring->rxd_priv_size *	i);
46 #else
47 		/* 32-bit case */
48 		rxd_priv = (xge_hal_ring_rxd_priv_t	*)rxdp_5->host_control;
49 #endif
50 	} else
51 #endif
52 	{
53 		rxd_priv = (xge_hal_ring_rxd_priv_t	*)
54 				(ulong_t)rxdp->host_control;
55 	}
56 
57 	xge_assert(rxd_priv);
58 	xge_assert(rxd_priv->dma_object);
59 
60 	xge_assert(rxd_priv->dma_object->handle	== rxd_priv->dma_handle);
61 
62 	xge_assert(rxd_priv->dma_object->addr +	rxd_priv->dma_offset ==
63 							rxd_priv->dma_addr);
64 
65 	return rxd_priv;
66 }
67 
68 __HAL_STATIC_RING __HAL_INLINE_RING	int
__hal_ring_block_memblock_idx(xge_hal_ring_block_t * block)69 __hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
70 {
71 	   return (int)*((u64 *)(void *)((char *)block +
72 							   XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
73 }
74 
75 __HAL_STATIC_RING __HAL_INLINE_RING	void
__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t * block,int memblock_idx)76 __hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
77 {
78 	   *((u64 *)(void *)((char *)block +
79 					   XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
80 					   memblock_idx;
81 }
82 
83 
84 __HAL_STATIC_RING __HAL_INLINE_RING	dma_addr_t
__hal_ring_block_next_pointer(xge_hal_ring_block_t * block)85 __hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
86 {
87 	return (dma_addr_t)*((u64 *)(void *)((char *)block +
88 			XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
89 }
90 
91 __HAL_STATIC_RING __HAL_INLINE_RING	void
__hal_ring_block_next_pointer_set(xge_hal_ring_block_t * block,dma_addr_t dma_next)92 __hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
93 			dma_addr_t dma_next)
94 {
95 	*((u64 *)(void *)((char	*)block	+
96 			  XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
97 }
98 
99 /**
100  * xge_hal_ring_dtr_private	- Get ULD private per-descriptor data.
101  * @channelh: Channel handle.
102  * @dtrh: Descriptor handle.
103  *
104  * Returns:	private	ULD	info associated	with the descriptor.
105  * ULD requests	per-descriptor space via xge_hal_channel_open().
106  *
107  * See also: xge_hal_fifo_dtr_private().
108  * Usage: See ex_rx_compl{}.
109  */
110 __HAL_STATIC_RING __HAL_INLINE_RING	void*
xge_hal_ring_dtr_private(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)111 xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
112 {
113 	return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh,	dtrh) +
114 					sizeof(xge_hal_ring_rxd_priv_t);
115 }
116 
117 /**
118  * xge_hal_ring_dtr_reserve	- Reserve ring descriptor.
119  * @channelh: Channel handle.
120  * @dtrh: Reserved descriptor. On success HAL fills	this "out" parameter
121  *		  with a valid handle.
122  *
123  * Reserve Rx descriptor for the subsequent	filling-in (by upper layer
124  * driver (ULD)) and posting on	the	corresponding channel (@channelh)
125  * via xge_hal_ring_dtr_post().
126  *
127  * Returns:	XGE_HAL_OK - success.
128  * XGE_HAL_INF_OUT_OF_DESCRIPTORS -	Currently no descriptors available.
129  *
130  * See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
131  * xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
132  * Usage: See ex_post_all_rx{}.
133  */
134 __HAL_STATIC_RING __HAL_INLINE_RING	xge_hal_status_e
xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh)135 xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
136 {
137 	xge_hal_status_e status;
138 #if	defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
139 	unsigned long flags;
140 #endif
141 
142 #if	defined(XGE_HAL_RX_MULTI_RESERVE)
143 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
144 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
145 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
146 	flags);
147 #endif
148 
149 	status = __hal_channel_dtr_alloc(channelh, dtrh);
150 
151 #if	defined(XGE_HAL_RX_MULTI_RESERVE)
152 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
153 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
154 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
155 				 flags);
156 #endif
157 
158 	if (status == XGE_HAL_OK) {
159 		xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
160 
161 		/* instead of memset: reset	this RxD */
162 		rxdp->control_1	= rxdp->control_2 =	0;
163 
164 #if	defined(XGE_OS_MEMORY_CHECK)
165 		__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1;
166 #endif
167 	}
168 
169 	return status;
170 }
171 
172 /**
173  * xge_hal_ring_dtr_info_get - Get extended	information	associated with
174  * a completed receive descriptor for 1b mode.
175  * @channelh: Channel handle.
176  * @dtrh: Descriptor handle.
177  * @ext_info: See xge_hal_dtr_info_t{}.	Returned by	HAL.
178  *
179  * Retrieve	extended information associated	with a completed receive descriptor.
180  *
181  * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
182  * xge_hal_ring_dtr_5b_get().
183  */
184 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_info_t * ext_info)185 xge_hal_ring_dtr_info_get(xge_hal_channel_h	channelh, xge_hal_dtr_h	dtrh,
186 			xge_hal_dtr_info_t *ext_info)
187 {
188 	/* cast	to 1-buffer	mode RxD: the code below relies	on the fact
189 	 * that	control_1 and control_2	are	formatted the same way.. */
190 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
191 
192 	ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
193 	ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
194 		ext_info->frame	= XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
195 		ext_info->proto	= XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
196 	ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
197 
198 	/* Herc	only, a	few	extra cycles imposed on	Xena and/or
199 	 * when	RTH	is not enabled.
200 	 * Alternatively, could	check
201 	 * xge_hal_device_check_id(), hldev->config.rth_en,	queue->rth_en */
202 	ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
203 	ext_info->rth_spdm_hit =
204 	XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
205 	ext_info->rth_hash_type	=
206 	XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
207 	ext_info->rth_value	= XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
208 }
209 
210 /**
211  * xge_hal_ring_dtr_info_nb_get	- Get extended information associated
212  * with	a completed	receive	descriptor for 3b or 5b
213  * modes.
214  * @channelh: Channel handle.
215  * @dtrh: Descriptor handle.
216  * @ext_info: See xge_hal_dtr_info_t{}.	Returned by	HAL.
217  *
218  * Retrieve	extended information associated	with a completed receive descriptor.
219  *
220  * See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
221  *			 xge_hal_ring_dtr_5b_get().
222  */
223 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_info_t * ext_info)224 xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
225 			xge_hal_dtr_info_t *ext_info)
226 {
227 	/* cast	to 1-buffer	mode RxD: the code below relies	on the fact
228 	 * that	control_1 and control_2	are	formatted the same way.. */
229 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
230 
231 	ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
232 	ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
233 		ext_info->frame	= XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
234 		ext_info->proto	= XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
235 		ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
236 	/* Herc	only, a	few	extra cycles imposed on	Xena and/or
237 	 * when	RTH	is not enabled.	Same comment as	above. */
238 	ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
239 	ext_info->rth_spdm_hit =
240 	XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
241 	ext_info->rth_hash_type	=
242 	XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
243 	ext_info->rth_value	= (u32)rxdp->buffer0_ptr;
244 }
245 
246 /**
247  * xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
248  * @dtrh: Descriptor handle.
249  * @dma_pointer: DMA address of	a single receive buffer	this descriptor
250  *				 should	carry. Note	that by	the	time
251  *				 xge_hal_ring_dtr_1b_set
252  *				 is	called,	the	receive	buffer should be already mapped
253  *				 to	the	corresponding Xframe device.
254  * @size: Size of the receive @dma_pointer buffer.
255  *
256  * Prepare 1-buffer-mode Rx	descriptor for posting
257  * (via	xge_hal_ring_dtr_post()).
258  *
259  * This	inline helper-function does	not	return any parameters and always
260  * succeeds.
261  *
262  * See also: xge_hal_ring_dtr_3b_set(),	xge_hal_ring_dtr_5b_set().
263  * Usage: See ex_post_all_rx{}.
264  */
265 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh,dma_addr_t dma_pointer,int size)266 xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh,	dma_addr_t dma_pointer,	int	size)
267 {
268 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
269 	rxdp->buffer0_ptr =	dma_pointer;
270 	rxdp->control_2	&= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
271 	rxdp->control_2	|= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
272 
273 	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_1b_set: rxdp %p control_2 %p buffer0_ptr %p",
274     			(xge_hal_ring_rxd_1_t *)dtrh,
275                 rxdp->control_2,
276     			rxdp->buffer0_ptr);
277 }
278 
279 /**
280  * xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf
281  * descriptor.
282  * @channelh: Channel handle.
283  * @dtrh: Descriptor handle.
284  * @dma_pointer: DMA address of	a single receive buffer	_this_ descriptor
285  *				 carries. Returned by HAL.
286  * @pkt_length:	Length (in bytes) of the data in the buffer	pointed	by
287  *				@dma_pointer. Returned by HAL.
288  *
289  * Retrieve	protocol data from the completed 1-buffer-mode Rx descriptor.
290  * This	inline helper-function uses	completed descriptor to	populate receive
291  * buffer pointer and other	"out" parameters. The function always succeeds.
292  *
293  * See also: xge_hal_ring_dtr_3b_get(),	xge_hal_ring_dtr_5b_get().
294  * Usage: See ex_rx_compl{}.
295  */
296 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,dma_addr_t * dma_pointer,int * pkt_length)297 xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh,	xge_hal_dtr_h dtrh,
298 		dma_addr_t *dma_pointer, int *pkt_length)
299 {
300 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
301 
302 	*pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
303 	*dma_pointer = rxdp->buffer0_ptr;
304 
305 	((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length;
306 }
307 
308 /**
309  * xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
310  * @dtrh: Descriptor handle.
311  * @dma_pointers: Array	of DMA addresses. Contains exactly 3 receive buffers
312  *				 _this_	descriptor should carry.
313  *				 Note that by the time xge_hal_ring_dtr_3b_set
314  *				 is	called,	the	receive	buffers	should be mapped
315  *				 to	the	corresponding Xframe device.
316  * @sizes: Array of	receive	buffer sizes. Contains 3 sizes:	one	size per
317  *		   buffer from @dma_pointers.
318  *
319  * Prepare 3-buffer-mode Rx	descriptor for posting (via
320  * xge_hal_ring_dtr_post()).
321  * This	inline helper-function does	not	return any parameters and always
322  * succeeds.
323  *
324  * See also: xge_hal_ring_dtr_1b_set(),	xge_hal_ring_dtr_5b_set().
325  */
326 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh,dma_addr_t dma_pointers[],int sizes[])327 xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh,	dma_addr_t dma_pointers[],
328 			int	sizes[])
329 {
330 	xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
331 	rxdp->buffer0_ptr =	dma_pointers[0];
332 	rxdp->control_2	&= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
333 	rxdp->control_2	|= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
334 	rxdp->buffer1_ptr =	dma_pointers[1];
335 	rxdp->control_2	&= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
336 	rxdp->control_2	|= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
337 	rxdp->buffer2_ptr =	dma_pointers[2];
338 	rxdp->control_2	&= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
339 	rxdp->control_2	|= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
340 }
341 
342 /**
343  * xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf
344  * descriptor.
345  * @channelh: Channel handle.
346  * @dtrh: Descriptor handle.
347  * @dma_pointers: DMA addresses	of the 3 receive buffers _this_	descriptor
348  *				  carries. The first two buffers contain ethernet and
349  *				  (IP +	transport) headers.	The	3rd	buffer contains	packet
350  *				  data.
351  *				  Returned by HAL.
352  * @sizes: Array of	receive	buffer sizes. Contains 3 sizes:	one	size per
353  * buffer from @dma_pointers. Returned by HAL.
354  *
355  * Retrieve	protocol data from the completed 3-buffer-mode Rx descriptor.
356  * This	inline helper-function uses	completed descriptor to	populate receive
357  * buffer pointer and other	"out" parameters. The function always succeeds.
358  *
359  * See also: xge_hal_ring_dtr_3b_get(),	xge_hal_ring_dtr_5b_get().
360  */
361 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,dma_addr_t dma_pointers[],int sizes[])362 xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh,	xge_hal_dtr_h dtrh,
363 		dma_addr_t dma_pointers[], int sizes[])
364 {
365 	xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
366 
367 	dma_pointers[0]	= rxdp->buffer0_ptr;
368 	sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
369 
370 	dma_pointers[1]	= rxdp->buffer1_ptr;
371 	sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
372 
373 	dma_pointers[2]	= rxdp->buffer2_ptr;
374 	sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
375 
376 	((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
377 		sizes[2];
378 }
379 
380 /**
381  * xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
382  * @dtrh: Descriptor handle.
383  * @dma_pointers: Array	of DMA addresses. Contains exactly 5 receive buffers
384  *				 _this_	descriptor should carry.
385  *				 Note that by the time xge_hal_ring_dtr_5b_set
386  *				 is	called,	the	receive	buffers	should be mapped
387  *				 to	the	corresponding Xframe device.
388  * @sizes: Array of	receive	buffer sizes. Contains 5 sizes:	one	size per
389  *		   buffer from @dma_pointers.
390  *
391  * Prepare 3-buffer-mode Rx	descriptor for posting (via
392  * xge_hal_ring_dtr_post()).
393  * This	inline helper-function does	not	return any parameters and always
394  * succeeds.
395  *
396  * See also: xge_hal_ring_dtr_1b_set(),	xge_hal_ring_dtr_3b_set().
397  */
398 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh,dma_addr_t dma_pointers[],int sizes[])399 xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh,	dma_addr_t dma_pointers[],
400 			int	sizes[])
401 {
402 	xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
403 	rxdp->buffer0_ptr =	dma_pointers[0];
404 	rxdp->control_2	&= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
405 	rxdp->control_2	|= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
406 	rxdp->buffer1_ptr =	dma_pointers[1];
407 	rxdp->control_2	&= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
408 	rxdp->control_2	|= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
409 	rxdp->buffer2_ptr =	dma_pointers[2];
410 	rxdp->control_2	&= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
411 	rxdp->control_2	|= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
412 	rxdp->buffer3_ptr =	dma_pointers[3];
413 	rxdp->control_3	&= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
414 	rxdp->control_3	|= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
415 	rxdp->buffer4_ptr =	dma_pointers[4];
416 	rxdp->control_3	&= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
417 	rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]);
418 }
419 
420 /**
421  * xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf
422  * descriptor.
423  * @channelh: Channel handle.
424  * @dtrh: Descriptor handle.
425  * @dma_pointers: DMA addresses	of the 5 receive buffers _this_	descriptor
426  *				  carries. The first 4 buffers contains	L2 (ethernet) through
427  *				  L5 headers. The 5th buffer contain received (applicaion)
428  *				  data.	Returned by	HAL.
429  * @sizes: Array of	receive	buffer sizes. Contains 5 sizes:	one	size per
430  * buffer from @dma_pointers. Returned by HAL.
431  *
432  * Retrieve	protocol data from the completed 5-buffer-mode Rx descriptor.
433  * This	inline helper-function uses	completed descriptor to	populate receive
434  * buffer pointer and other	"out" parameters. The function always succeeds.
435  *
436  * See also: xge_hal_ring_dtr_3b_get(),	xge_hal_ring_dtr_5b_get().
437  */
438 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,dma_addr_t dma_pointers[],int sizes[])439 xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh,	xge_hal_dtr_h dtrh,
440 		dma_addr_t dma_pointers[], int sizes[])
441 {
442 	xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
443 
444 	dma_pointers[0]	= rxdp->buffer0_ptr;
445 	sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
446 
447 	dma_pointers[1]	= rxdp->buffer1_ptr;
448 	sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
449 
450 	dma_pointers[2]	= rxdp->buffer2_ptr;
451 	sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
452 
453 	dma_pointers[3]	= rxdp->buffer3_ptr;
454 	sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
455 
456 	dma_pointers[4]	= rxdp->buffer4_ptr;
457 	sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
458 
459 	((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
460 		sizes[2] + sizes[3] + sizes[4];
461 }
462 
463 
464 /**
465  * xge_hal_ring_dtr_pre_post - FIXME.
466  * @channelh: Channel handle.
467  * @dtrh: Descriptor handle.
468  *
469  * TBD
470  */
471 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)472 xge_hal_ring_dtr_pre_post(xge_hal_channel_h	channelh, xge_hal_dtr_h	dtrh)
473 {
474 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
475 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
476 	xge_hal_ring_rxd_priv_t	*priv;
477 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
478 #endif
479 #if	defined(XGE_HAL_RX_MULTI_POST_IRQ)
480 	unsigned long flags;
481 #endif
482 
483 	rxdp->control_2	|= XGE_HAL_RXD_NOT_COMPLETED;
484 
485 #ifdef XGE_DEBUG_ASSERT
486 		/* make	sure Xena overwrites the (illegal) t_code on completion	*/
487 		XGE_HAL_RXD_SET_T_CODE(rxdp->control_1,	XGE_HAL_RXD_T_CODE_UNUSED_C);
488 #endif
489 
490 	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d  post_qid	%d",
491 			(unsigned long long)(ulong_t)dtrh,
492             ((xge_hal_ring_t *)channelh)->channel.post_index,
493 			((xge_hal_ring_t *)channelh)->channel.post_qid);
494 
495 #if	defined(XGE_HAL_RX_MULTI_POST)
496 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
497 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
498 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
499 	flags);
500 #endif
501 
502 #if	defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
503 	{
504 		xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
505 
506 		if (channel->post_index	!= 0) {
507 			xge_hal_dtr_h prev_dtrh;
508 			xge_hal_ring_rxd_priv_t	*rxdp_priv;
509 
510 			rxdp_priv =	__hal_ring_rxd_priv((xge_hal_ring_t*)channel, rxdp);
511 			prev_dtrh =	channel->work_arr[channel->post_index -	1];
512 
513 			if (prev_dtrh != NULL &&
514 				(rxdp_priv->dma_offset & (~0xFFF)) !=
515 						rxdp_priv->dma_offset) {
516 				xge_assert((char *)prev_dtrh +
517 					((xge_hal_ring_t*)channel)->rxd_size ==	dtrh);
518 			}
519 		}
520 	}
521 #endif
522 
523 	__hal_channel_dtr_post(channelh, dtrh);
524 
525 #if	defined(XGE_HAL_RX_MULTI_POST)
526 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
527 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
528 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
529 				   flags);
530 #endif
531 }
532 
533 
534 /**
535  * xge_hal_ring_dtr_post_post -	FIXME.
536  * @channelh: Channel handle.
537  * @dtrh: Descriptor handle.
538  *
539  * TBD
540  */
541 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)542 xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
543 {
544 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
545 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
546 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
547 	xge_hal_ring_rxd_priv_t	*priv;
548 #endif
549 	/* do POST */
550 	rxdp->control_1	|= XGE_HAL_RXD_POSTED_4_XFRAME;
551 
552 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
553 	priv = __hal_ring_rxd_priv(ring, rxdp);
554 	xge_os_dma_sync(ring->channel.pdev,
555 				  priv->dma_handle,	priv->dma_addr,
556 			  priv->dma_offset,	ring->rxd_size,
557 			  XGE_OS_DMA_DIR_TODEVICE);
558 #endif
559 
560 	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post: rxdp %p control_1 %p",
561         		  (xge_hal_ring_rxd_1_t *)dtrh,
562                   rxdp->control_1);
563 
564 	if (ring->channel.usage_cnt	> 0)
565 		ring->channel.usage_cnt--;
566 }
567 
568 /**
569  * xge_hal_ring_dtr_post_post_wmb.
570  * @channelh: Channel handle.
571  * @dtrh: Descriptor handle.
572  *
573  * Similar as xge_hal_ring_dtr_post_post, but in addition it does memory barrier.
574  */
575 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)576 xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
577 {
578 	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
579 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
580 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
581 	xge_hal_ring_rxd_priv_t	*priv;
582 #endif
583     /* Do memory barrier before changing the ownership */
584     xge_os_wmb();
585 
586 	/* do POST */
587 	rxdp->control_1	|= XGE_HAL_RXD_POSTED_4_XFRAME;
588 
589 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
590 	priv = __hal_ring_rxd_priv(ring, rxdp);
591 	xge_os_dma_sync(ring->channel.pdev,
592 				  priv->dma_handle,	priv->dma_addr,
593 			  priv->dma_offset,	ring->rxd_size,
594 			  XGE_OS_DMA_DIR_TODEVICE);
595 #endif
596 
597 	if (ring->channel.usage_cnt	> 0)
598 		ring->channel.usage_cnt--;
599 
600 	xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post_wmb: rxdp %p control_1 %p rxds_with_host %d",
601         		  (xge_hal_ring_rxd_1_t *)dtrh,
602                   rxdp->control_1, ring->channel.usage_cnt);
603 
604 }
605 
606 /**
607  * xge_hal_ring_dtr_post - Post	descriptor on the ring channel.
608  * @channelh: Channel handle.
609  * @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
610  *
611  * Post	descriptor on the 'ring' type channel.
612  * Prior to	posting	the	descriptor should be filled	in accordance with
613  * Host/Xframe interface specification for a given service (LL,	etc.).
614  *
615  * See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
616  * Usage: See ex_post_all_rx{}.
617  */
618 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_post(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)619 xge_hal_ring_dtr_post(xge_hal_channel_h	channelh, xge_hal_dtr_h	dtrh)
620 {
621 	xge_hal_ring_dtr_pre_post(channelh,	dtrh);
622 	xge_hal_ring_dtr_post_post(channelh, dtrh);
623 }
624 
625 /**
626  * xge_hal_ring_dtr_next_completed - Get the _next_	completed
627  * descriptor.
628  * @channelh: Channel handle.
629  * @dtrh: Descriptor handle. Returned by HAL.
630  * @t_code:	Transfer code, as per Xframe User Guide,
631  *			Receive	Descriptor Format. Returned	by HAL.
632  *
633  * Retrieve	the	_next_ completed descriptor.
634  * HAL uses	channel	callback (*xge_hal_channel_callback_f) to notifiy
635  * upper-layer driver (ULD)	of new completed descriptors. After	that
636  * the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
637  * completions (the	very first completion is passed	by HAL via
638  * xge_hal_channel_callback_f).
639  *
640  * Implementation-wise,	the	upper-layer	driver is free to call
641  * xge_hal_ring_dtr_next_completed either immediately from inside the
642  * channel callback, or	in a deferred fashion and separate (from HAL)
643  * context.
644  *
645  * Non-zero	@t_code	means failure to fill-in receive buffer(s)
646  * of the descriptor.
647  * For instance, parity	error detected during the data transfer.
648  * In this case	Xframe will	complete the descriptor	and	indicate
649  * for the host	that the received data is not to be	used.
650  * For details please refer	to Xframe User Guide.
651  *
652  * Returns:	XGE_HAL_OK - success.
653  * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed	descriptors
654  * are currently available for processing.
655  *
656  * See also: xge_hal_channel_callback_f{},
657  * xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
658  * Usage: See ex_rx_compl{}.
659  */
660 __HAL_STATIC_RING __HAL_INLINE_RING	xge_hal_status_e
xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh,u8 * t_code)661 xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh,	xge_hal_dtr_h *dtrh,
662 				u8 *t_code)
663 {
664 	xge_hal_ring_rxd_1_t *rxdp;	/* doesn't matter 1, 3 or 5... */
665 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
666 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
667 	xge_hal_ring_rxd_priv_t	*priv;
668 #endif
669 
670 	__hal_channel_dtr_try_complete(ring, dtrh);
671 	rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
672 	if (rxdp ==	NULL) {
673 		return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
674 	}
675 
676 #if	defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
677 	/* Note: 24	bytes at most means:
678 	 *	- Control_3	in case	of 5-buffer	mode
679 	 *	- Control_1	and	Control_2
680 	 *
681 	 * This	is the only	length needs to	be invalidated
682 	 * type	of channels.*/
683 	priv = __hal_ring_rxd_priv(ring, rxdp);
684 	xge_os_dma_sync(ring->channel.pdev,
685 				  priv->dma_handle,	priv->dma_addr,
686 			  priv->dma_offset,	24,
687 			  XGE_OS_DMA_DIR_FROMDEVICE);
688 #endif
689 
690 	/* check whether it	is not the end */
691 	if (!(rxdp->control_2 &	XGE_HAL_RXD_NOT_COMPLETED) &&
692 		!(rxdp->control_1 &	XGE_HAL_RXD_POSTED_4_XFRAME)) {
693 #ifndef	XGE_HAL_IRQ_POLLING
694 		if (++ring->cmpl_cnt > ring->indicate_max_pkts)	{
695 			/* reset it. since we don't	want to	return
696 			 * garbage to the ULD */
697 			*dtrh =	0;
698 			return XGE_HAL_COMPLETIONS_REMAIN;
699 		}
700 #endif
701 
702 #ifdef XGE_DEBUG_ASSERT
703 #if	defined(XGE_HAL_USE_5B_MODE)
704 #if	!defined(XGE_OS_PLATFORM_64BIT)
705 		if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
706 			xge_assert(((xge_hal_ring_rxd_5_t *)
707 					rxdp)->host_control!=0);
708 		}
709 #endif
710 
711 #else
712 		xge_assert(rxdp->host_control!=0);
713 #endif
714 #endif
715 
716 		__hal_channel_dtr_complete(ring);
717 
718 		*t_code	= (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
719 
720 				/* see XGE_HAL_SET_RXD_T_CODE()	above..	*/
721 		xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
722 
723 		xge_debug_ring(XGE_TRACE,
724 			"compl_index %d	post_qid %d	t_code %d rxd 0x"XGE_OS_LLXFMT,
725 			((xge_hal_channel_t*)ring)->compl_index,
726 			((xge_hal_channel_t*)ring)->post_qid, *t_code,
727 			(unsigned long long)(ulong_t)rxdp);
728 
729 		ring->channel.usage_cnt++;
730 		if (ring->channel.stats.usage_max <	ring->channel.usage_cnt)
731 			ring->channel.stats.usage_max =	ring->channel.usage_cnt;
732 
733 		return XGE_HAL_OK;
734 	}
735 
736 	/* reset it. since we don't	want to	return
737 	 * garbage to the ULD */
738 	*dtrh =	0;
739 	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
740 }
741 
742 /**
743  * xge_hal_ring_dtr_free - Free	descriptor.
744  * @channelh: Channel handle.
745  * @dtrh: Descriptor handle.
746  *
747  * Free	the	reserved descriptor. This operation	is "symmetrical" to
748  * xge_hal_ring_dtr_reserve. The "free-ing"	completes the descriptor's
749  * lifecycle.
750  *
751  * After free-ing (see xge_hal_ring_dtr_free())	the	descriptor again can
752  * be:
753  *
754  * - reserved (xge_hal_ring_dtr_reserve);
755  *
756  * - posted	(xge_hal_ring_dtr_post);
757  *
758  * - completed (xge_hal_ring_dtr_next_completed);
759  *
760  * - and recycled again	(xge_hal_ring_dtr_free).
761  *
762  * For alternative state transitions and more details please refer to
763  * the design doc.
764  *
765  * See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
766  * Usage: See ex_rx_compl{}.
767  */
768 __HAL_STATIC_RING __HAL_INLINE_RING	void
xge_hal_ring_dtr_free(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)769 xge_hal_ring_dtr_free(xge_hal_channel_h	channelh, xge_hal_dtr_h	dtrh)
770 {
771 #if	defined(XGE_HAL_RX_MULTI_FREE_IRQ)
772 	unsigned long flags;
773 #endif
774 
775 #if	defined(XGE_HAL_RX_MULTI_FREE)
776 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
777 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
778 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
779 	flags);
780 #endif
781 
782 	__hal_channel_dtr_free(channelh, dtrh);
783 #if	defined(XGE_OS_MEMORY_CHECK)
784 	__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtrh)->allocated = 0;
785 #endif
786 
787 #if	defined(XGE_HAL_RX_MULTI_FREE)
788 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
789 #elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
790 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
791 	flags);
792 #endif
793 }
794 
795 /**
796  * xge_hal_ring_is_next_dtr_completed -	Check if the next dtr is completed
797  * @channelh: Channel handle.
798  *
799  * Checks if the the _next_	completed descriptor is	in host	memory
800  *
801  * Returns:	XGE_HAL_OK - success.
802  * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed	descriptors
803  * are currently available for processing.
804  */
805 __HAL_STATIC_RING __HAL_INLINE_RING	xge_hal_status_e
xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)806 xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)
807 {
808 	xge_hal_ring_rxd_1_t *rxdp;	/* doesn't matter 1, 3 or 5... */
809 	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
810 	xge_hal_dtr_h dtrh;
811 
812 	__hal_channel_dtr_try_complete(ring, &dtrh);
813 	rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
814 	if (rxdp ==	NULL) {
815 		return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
816 	}
817 
818 	/* check whether it	is not the end */
819 	if (!(rxdp->control_2 &	XGE_HAL_RXD_NOT_COMPLETED) &&
820 		!(rxdp->control_1 &	XGE_HAL_RXD_POSTED_4_XFRAME)) {
821 
822 #ifdef XGE_DEBUG_ASSERT
823 #if	defined(XGE_HAL_USE_5B_MODE)
824 #if	!defined(XGE_OS_PLATFORM_64BIT)
825 		if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
826 			xge_assert(((xge_hal_ring_rxd_5_t *)
827 					rxdp)->host_control!=0);
828 		}
829 #endif
830 
831 #else
832 		xge_assert(rxdp->host_control!=0);
833 #endif
834 #endif
835 		return XGE_HAL_OK;
836 	}
837 
838 	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
839 }
840