xref: /titanic_50/usr/src/uts/common/io/xge/hal/xgehal/xgehal-channel-fp.c (revision 7eced415e5dd557aef2d78483b5a7785f0e13670)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 #ifdef XGE_DEBUG_FP
25 #include "xgehal-channel.h"
26 #endif
27 
28 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_channel_dtr_alloc(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh)29 __hal_channel_dtr_alloc(xge_hal_channel_h channelh,	xge_hal_dtr_h *dtrh)
30 {
31 	void **tmp_arr;
32 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
33 #if	defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
34 	unsigned long flags	= 0;
35 #endif
36 	if (channel->terminating) {
37 		return XGE_HAL_FAIL;
38 	}
39 
40 	if (channel->reserve_length	- channel->reserve_top >
41 						channel->reserve_threshold)	{
42 
43 _alloc_after_swap:
44 		*dtrh =	channel->reserve_arr[--channel->reserve_length];
45 
46 		xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated,	"
47 				   "channel	%d:%d:%d, reserve_idx %d",
48 				   (unsigned long long)(ulong_t)*dtrh,
49 				   channel->type, channel->post_qid,
50 				   channel->compl_qid, channel->reserve_length);
51 
52 		return XGE_HAL_OK;
53 	}
54 
55 #if	defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
56 	xge_os_spin_lock_irq(&channel->free_lock, flags);
57 #elif defined(XGE_HAL_RX_MULTI_FREE) ||	defined(XGE_HAL_TX_MULTI_FREE)
58 	xge_os_spin_lock(&channel->free_lock);
59 #endif
60 
61 	/* switch between empty	and	full arrays	*/
62 
63 	/* the idea	behind such	a design is	that by	having free	and	reserved
64 	 * arrays separated	we basically separated irq and non-irq parts.
65 	 * i.e.	no additional lock need	to be done when	we free	a resource */
66 
67 	if (channel->reserve_initial - channel->free_length	>
68 					channel->reserve_threshold)	{
69 
70 		tmp_arr	= channel->reserve_arr;
71 		channel->reserve_arr = channel->free_arr;
72 		channel->reserve_length	= channel->reserve_initial;
73 		channel->free_arr =	tmp_arr;
74 		channel->reserve_top = channel->free_length;
75 		channel->free_length = channel->reserve_initial;
76 
77 		channel->stats.reserve_free_swaps_cnt++;
78 
79 		xge_debug_channel(XGE_TRACE,
80 			   "switch on channel %d:%d:%d,	reserve_length %d, "
81 			   "free_length	%d", channel->type,	channel->post_qid,
82 			   channel->compl_qid, channel->reserve_length,
83 			   channel->free_length);
84 
85 #if	defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
86 		xge_os_spin_unlock_irq(&channel->free_lock,	flags);
87 #elif defined(XGE_HAL_RX_MULTI_FREE) ||	defined(XGE_HAL_TX_MULTI_FREE)
88 		xge_os_spin_unlock(&channel->free_lock);
89 #endif
90 
91 		goto _alloc_after_swap;
92 	}
93 
94 #if	defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
95 	xge_os_spin_unlock_irq(&channel->free_lock,	flags);
96 #elif defined(XGE_HAL_RX_MULTI_FREE) ||	defined(XGE_HAL_TX_MULTI_FREE)
97 	xge_os_spin_unlock(&channel->free_lock);
98 #endif
99 
100 	xge_debug_channel(XGE_TRACE, "channel %d:%d:%d is empty!",
101 			   channel->type, channel->post_qid,
102 			   channel->compl_qid);
103 
104 	channel->stats.full_cnt++;
105 
106 	*dtrh =	NULL;
107 	return XGE_HAL_INF_OUT_OF_DESCRIPTORS;
108 }
109 
110 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_restore(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,int offset)111 __hal_channel_dtr_restore(xge_hal_channel_h	channelh, xge_hal_dtr_h	dtrh,
112 			  int offset)
113 {
114 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
115 
116 	/* restore a previously	allocated dtrh at current offset and update
117 	 * the available reserve length	accordingly. If	dtrh is	null just
118 	 * update the reserve length, only */
119 
120 	if (dtrh) {
121 		channel->reserve_arr[channel->reserve_length + offset] = dtrh;
122 		xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for "
123 			"channel %d:%d:%d, offset %d at	reserve	index %d, ",
124 			(unsigned long long)(ulong_t)dtrh, channel->type,
125 			channel->post_qid, channel->compl_qid, offset,
126 			channel->reserve_length	+ offset);
127 	}
128 	else {
129 		channel->reserve_length	+= offset;
130 		xge_debug_channel(XGE_TRACE, "channel %d:%d:%d,	restored "
131 			"for offset	%d,	new	reserve_length %d, free	length %d",
132 			channel->type, channel->post_qid, channel->compl_qid,
133 			offset,	channel->reserve_length, channel->free_length);
134 	}
135 }
136 
137 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_post(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)138 __hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
139 {
140 	xge_hal_channel_t *channel	  =	(xge_hal_channel_t*)channelh;
141 
142 	xge_assert(channel->work_arr[channel->post_index] == NULL);
143 
144 	channel->work_arr[channel->post_index++] = dtrh;
145 
146 		/* wrap-around */
147 	if (channel->post_index	== channel->length)
148 		channel->post_index	= 0;
149 }
150 
151 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_try_complete(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh)152 __hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
153 {
154 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
155 
156 	xge_assert(channel->work_arr);
157 	xge_assert(channel->compl_index	< channel->length);
158 
159 	*dtrh =	channel->work_arr[channel->compl_index];
160 }
161 
162 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_complete(xge_hal_channel_h channelh)163 __hal_channel_dtr_complete(xge_hal_channel_h channelh)
164 {
165 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
166 
167 	channel->work_arr[channel->compl_index]	= NULL;
168 
169 	/* wrap-around */
170 	if (++channel->compl_index == channel->length)
171 		channel->compl_index = 0;
172 
173 	channel->stats.total_compl_cnt++;
174 }
175 
176 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_free(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)177 __hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
178 {
179 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
180 
181 	channel->free_arr[--channel->free_length] =	dtrh;
182 
183 	xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed,	"
184 			   "channel	%d:%d:%d, new free_length %d",
185 			   (unsigned long long)(ulong_t)dtrh,
186 			   channel->type, channel->post_qid,
187 			   channel->compl_qid, channel->free_length);
188 }
189 
190 /**
191  * xge_hal_channel_dtr_count
192  * @channelh: Channel handle. Obtained via xge_hal_channel_open().
193  *
194  * Retreive number of DTRs available. This function can not be called
195  * from data path.
196  */
197 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_dtr_count(xge_hal_channel_h channelh)198 xge_hal_channel_dtr_count(xge_hal_channel_h channelh)
199 {
200 	xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
201 
202 	return ((channel->reserve_length - channel->reserve_top) +
203 		(channel->reserve_initial - channel->free_length) -
204 						channel->reserve_threshold);
205 }
206 
207 /**
208  * xge_hal_channel_userdata	- Get user-specified channel context.
209  * @channelh: Channel handle. Obtained via xge_hal_channel_open().
210  *
211  * Returns:	per-channel	"user data", which can be any ULD-defined context.
212  * The %userdata "gets"	into the channel at	open time
213  * (see	xge_hal_channel_open()).
214  *
215  * See also: xge_hal_channel_open().
216  */
217 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
xge_hal_channel_userdata(xge_hal_channel_h channelh)218 xge_hal_channel_userdata(xge_hal_channel_h channelh)
219 {
220 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
221 
222 	return channel->userdata;
223 }
224 
225 /**
226  * xge_hal_channel_id -	Get	channel	ID.
227  * @channelh: Channel handle. Obtained via xge_hal_channel_open().
228  *
229  * Returns:	channel	ID.	For	link layer channel id is the number
230  * in the range	from 0 to 7	that identifies	hardware ring or fifo,
231  * depending on	the	channel	type.
232  */
233 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_id(xge_hal_channel_h channelh)234 xge_hal_channel_id(xge_hal_channel_h channelh)
235 {
236 	xge_hal_channel_t *channel = (xge_hal_channel_t	*)channelh;
237 
238 	return channel->post_qid;
239 }
240 
241 /**
242  * xge_hal_check_alignment - Check buffer alignment	and	calculate the
243  * "misaligned"	portion.
244  * @dma_pointer: DMA address of	the	buffer.
245  * @size: Buffer size, in bytes.
246  * @alignment: Alignment "granularity" (see	below),	in bytes.
247  * @copy_size: Maximum number of bytes to "extract"	from the buffer
248  * (in order to	spost it as	a separate scatter-gather entry). See below.
249  *
250  * Check buffer	alignment and calculate	"misaligned" portion, if exists.
251  * The buffer is considered	aligned	if its address is multiple of
252  * the specified @alignment. If	this is	the	case,
253  * xge_hal_check_alignment() returns zero.
254  * Otherwise, xge_hal_check_alignment()	uses the last argument,
255  * @copy_size,
256  * to calculate	the	size to	"extract" from the buffer. The @copy_size
257  * may or may not be equal @alignment. The difference between these	two
258  * arguments is	that the @alignment	is used	to make	the	decision: aligned
259  * or not aligned. While the @copy_size	is used	to calculate the portion
260  * of the buffer to	"extract", i.e.	to post	as a separate entry	in the
261  * transmit	descriptor.	For	example, the combination
262  * @alignment=8	and	@copy_size=64 will work	okay on	AMD	Opteron	boxes.
263  *
264  * Note: @copy_size	should be a	multiple of	@alignment.	In many	practical
265  * cases @copy_size	and	@alignment will	probably be	equal.
266  *
267  * See also: xge_hal_fifo_dtr_buffer_set_aligned().
268  */
269 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_check_alignment(dma_addr_t dma_pointer,int size,int alignment,int copy_size)270 xge_hal_check_alignment(dma_addr_t dma_pointer,	int	size, int alignment,
271 		int	copy_size)
272 {
273 	int	misaligned_size;
274 
275 	misaligned_size	= (int)(dma_pointer	& (alignment - 1));
276 	if (!misaligned_size) {
277 		return 0;
278 	}
279 
280 	if (size > copy_size) {
281 		misaligned_size	= (int)(dma_pointer	& (copy_size - 1));
282 		misaligned_size	= copy_size	- misaligned_size;
283 	} else {
284 		misaligned_size	= size;
285 	}
286 
287 	return misaligned_size;
288 }
289 
290