xref: /titanic_41/usr/src/uts/common/io/xge/hal/xgehal/xgehal-device-fp.c (revision da14cebe459d3275048785f25bd869cb09b5307f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #ifdef XGE_DEBUG_FP
30 #include "xgehal-device.h"
31 #endif
32 
33 #include "xgehal-ring.h"
34 #include "xgehal-fifo.h"
35 
36 /**
37  * xge_hal_device_bar0 - Get BAR0 mapped address.
38  * @hldev: HAL device handle.
39  *
40  * Returns:	BAR0 address of	the	specified device.
41  */
42 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	char *
xge_hal_device_bar0(xge_hal_device_t * hldev)43 xge_hal_device_bar0(xge_hal_device_t *hldev)
44 {
45 	return hldev->bar0;
46 }
47 
48 /**
49  * xge_hal_device_isrbar0 -	Get	BAR0 mapped	address.
50  * @hldev: HAL device handle.
51  *
52  * Returns:	BAR0 address of	the	specified device.
53  */
54 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	char *
xge_hal_device_isrbar0(xge_hal_device_t * hldev)55 xge_hal_device_isrbar0(xge_hal_device_t	*hldev)
56 {
57 	return hldev->isrbar0;
58 }
59 
60 /**
61  * xge_hal_device_bar1 - Get BAR1 mapped address.
62  * @hldev: HAL device handle.
63  *
64  * Returns:	BAR1 address of	the	specified device.
65  */
66 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	char *
xge_hal_device_bar1(xge_hal_device_t * hldev)67 xge_hal_device_bar1(xge_hal_device_t *hldev)
68 {
69 	return hldev->bar1;
70 }
71 
72 /**
73  * xge_hal_device_bar0_set - Set BAR0 mapped address.
74  * @hldev: HAL device handle.
75  * @bar0: BAR0 mapped address.
76  * * Set BAR0 address in the HAL device	object.
77  */
78 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_bar0_set(xge_hal_device_t * hldev,char * bar0)79 xge_hal_device_bar0_set(xge_hal_device_t *hldev, char *bar0)
80 {
81 	xge_assert(bar0);
82 	hldev->bar0	= bar0;
83 }
84 
85 /**
86  * xge_hal_device_isrbar0_set -	Set	BAR0 mapped	address.
87  * @hldev: HAL device handle.
88  * @isrbar0: BAR0 mapped address.
89  * * Set BAR0 address in the HAL device	object.
90  */
91 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_isrbar0_set(xge_hal_device_t * hldev,char * isrbar0)92 xge_hal_device_isrbar0_set(xge_hal_device_t	*hldev,	char *isrbar0)
93 {
94 	xge_assert(isrbar0);
95 	hldev->isrbar0 = isrbar0;
96 }
97 
98 /**
99  * xge_hal_device_bar1_set - Set BAR1 mapped address.
100  * @hldev: HAL device handle.
101  * @channelh: Channel handle.
102  * @bar1: BAR1 mapped address.
103  *
104  * Set BAR1	address	for	the	given channel.
105  */
106 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_bar1_set(xge_hal_device_t * hldev,xge_hal_channel_h channelh,char * bar1)107 xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh,
108 			   char	*bar1)
109 {
110 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
111 
112 	xge_assert(bar1);
113 	xge_assert(fifo);
114 
115 	/* Initializing	the	BAR1 address as	the	start of
116 	 * the FIFO	queue pointer and as a location	of FIFO	control
117 	 * word. */
118 	fifo->hw_pair =
119 			(xge_hal_fifo_hw_pair_t	*) (bar1 +
120 				(fifo->channel.post_qid	* XGE_HAL_FIFO_HW_PAIR_OFFSET));
121 	hldev->bar1	= bar1;
122 }
123 
124 
125 /**
126  * xge_hal_device_rev -	Get	Device revision	number.
127  * @hldev: HAL device handle.
128  *
129  * Returns:	Device revision	number
130  */
131 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	int
xge_hal_device_rev(xge_hal_device_t * hldev)132 xge_hal_device_rev(xge_hal_device_t	*hldev)
133 {
134 		return hldev->revision;
135 }
136 
137 
138 /**
139  * xge_hal_device_begin_irq	- Begin	IRQ	processing.
140  * @hldev: HAL device handle.
141  * @reason:	"Reason" for the interrupt,	the	value of Xframe's
142  *			general_int_status register.
143  *
144  * The function	performs two actions, It first checks whether (shared IRQ) the
145  * interrupt was raised	by the device. Next, it	masks the device interrupts.
146  *
147  * Note:
148  * xge_hal_device_begin_irq() does not flush MMIO writes through the
149  * bridge. Therefore, two back-to-back interrupts are potentially possible.
150  * It is the responsibility	of the ULD to make sure	that only one
151  * xge_hal_device_continue_irq() runs at a time.
152  *
153  * Returns:	0, if the interrupt	is not "ours" (note	that in	this case the
154  * device remain enabled).
155  * Otherwise, xge_hal_device_begin_irq() returns 64bit general adapter
156  * status.
157  * See also: xge_hal_device_handle_irq()
158  */
159 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	xge_hal_status_e
xge_hal_device_begin_irq(xge_hal_device_t * hldev,u64 * reason)160 xge_hal_device_begin_irq(xge_hal_device_t *hldev, u64 *reason)
161 {
162 	u64	val64;
163 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
164 
165 	hldev->stats.sw_dev_info_stats.total_intr_cnt++;
166 
167 	val64 =	xge_os_pio_mem_read64(hldev->pdev,
168 				  hldev->regh0,	&isrbar0->general_int_status);
169 	if (xge_os_unlikely(!val64)) {
170 		/* not Xframe interrupt	*/
171 		hldev->stats.sw_dev_info_stats.not_xge_intr_cnt++;
172 		*reason	= 0;
173 			return XGE_HAL_ERR_WRONG_IRQ;
174 	}
175 
176 	if (xge_os_unlikely(val64 == XGE_HAL_ALL_FOXES)) {
177 				u64	adapter_status =
178 						xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
179 						  &isrbar0->adapter_status);
180 				if (adapter_status == XGE_HAL_ALL_FOXES)  {
181 					(void) xge_queue_produce(hldev->queueh,
182 						 XGE_HAL_EVENT_SLOT_FREEZE,
183 						 hldev,
184 						 1,	 /*	critical: slot freeze */
185 						 sizeof(u64),
186 						 (void*)&adapter_status);
187 			*reason	= 0;
188 			return XGE_HAL_ERR_CRITICAL;
189 		}
190 	}
191 
192 	*reason	= val64;
193 
194 	/* separate	fast path, i.e.	no errors */
195 	if (val64 &	XGE_HAL_GEN_INTR_RXTRAFFIC)	{
196 		hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt++;
197 		return XGE_HAL_OK;
198 	}
199 	if (val64 &	XGE_HAL_GEN_INTR_TXTRAFFIC)	{
200 		hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt++;
201 		return XGE_HAL_OK;
202 	}
203 
204 	hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
205 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_TXPIC)) {
206 		xge_hal_status_e status;
207 		hldev->stats.sw_dev_info_stats.txpic_intr_cnt++;
208 		status = __hal_device_handle_txpic(hldev, val64);
209 		if (status != XGE_HAL_OK) {
210 			return status;
211 		}
212 	}
213 
214 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_TXDMA)) {
215 		xge_hal_status_e status;
216 		hldev->stats.sw_dev_info_stats.txdma_intr_cnt++;
217 		status = __hal_device_handle_txdma(hldev, val64);
218 		if (status != XGE_HAL_OK) {
219 			return status;
220 		}
221 	}
222 
223 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_TXMAC)) {
224 		xge_hal_status_e status;
225 		hldev->stats.sw_dev_info_stats.txmac_intr_cnt++;
226 		status = __hal_device_handle_txmac(hldev, val64);
227 		if (status != XGE_HAL_OK) {
228 			return status;
229 		}
230 	}
231 
232 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_TXXGXS)) {
233 		xge_hal_status_e status;
234 		hldev->stats.sw_dev_info_stats.txxgxs_intr_cnt++;
235 		status = __hal_device_handle_txxgxs(hldev, val64);
236 		if (status != XGE_HAL_OK) {
237 			return status;
238 		}
239 	}
240 
241 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_RXPIC)) {
242 		xge_hal_status_e status;
243 		hldev->stats.sw_dev_info_stats.rxpic_intr_cnt++;
244 		status = __hal_device_handle_rxpic(hldev, val64);
245 		if (status != XGE_HAL_OK) {
246 			return status;
247 		}
248 	}
249 
250 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_RXDMA)) {
251 		xge_hal_status_e status;
252 		hldev->stats.sw_dev_info_stats.rxdma_intr_cnt++;
253 		status = __hal_device_handle_rxdma(hldev, val64);
254 		if (status != XGE_HAL_OK) {
255 			return status;
256 		}
257 	}
258 
259 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_RXMAC)) {
260 		xge_hal_status_e status;
261 		hldev->stats.sw_dev_info_stats.rxmac_intr_cnt++;
262 		status = __hal_device_handle_rxmac(hldev, val64);
263 		if (status != XGE_HAL_OK) {
264 			return status;
265 		}
266 	}
267 
268 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_RXXGXS)) {
269 		xge_hal_status_e status;
270 		hldev->stats.sw_dev_info_stats.rxxgxs_intr_cnt++;
271 		status = __hal_device_handle_rxxgxs(hldev, val64);
272 		if (status != XGE_HAL_OK) {
273 			return status;
274 		}
275 	}
276 
277 	if (xge_os_unlikely(val64 &	XGE_HAL_GEN_INTR_MC)) {
278 		xge_hal_status_e status;
279 		hldev->stats.sw_dev_info_stats.mc_intr_cnt++;
280 		status = __hal_device_handle_mc(hldev, val64);
281 		if (status != XGE_HAL_OK) {
282 			return status;
283 		}
284 	}
285 
286 	return XGE_HAL_OK;
287 }
288 
289 /**
290  * xge_hal_device_clear_rx - Acknowledge (that is, clear) the
291  * condition that has caused the RX	interrupt.
292  * @hldev: HAL device handle.
293  *
294  * Acknowledge (that is, clear)	the	condition that has caused
295  * the Rx interrupt.
296  * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
297  * xge_hal_device_clear_tx(), xge_hal_device_mask_rx().
298  */
299 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_clear_rx(xge_hal_device_t * hldev)300 xge_hal_device_clear_rx(xge_hal_device_t *hldev)
301 {
302 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
303 
304 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
305 				 0xFFFFFFFFFFFFFFFFULL,
306 				 &isrbar0->rx_traffic_int);
307 }
308 
309 /**
310  * xge_hal_device_clear_tx - Acknowledge (that is, clear) the
311  * condition that has caused the TX	interrupt.
312  * @hldev: HAL device handle.
313  *
314  * Acknowledge (that is, clear)	the	condition that has caused
315  * the Tx interrupt.
316  * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq(),
317  * xge_hal_device_clear_rx(), xge_hal_device_mask_tx().
318  */
319 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_clear_tx(xge_hal_device_t * hldev)320 xge_hal_device_clear_tx(xge_hal_device_t *hldev)
321 {
322 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
323 
324 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
325 				 0xFFFFFFFFFFFFFFFFULL,
326 				 &isrbar0->tx_traffic_int);
327 }
328 
329 /**
330  * xge_hal_device_poll_rx_channel -	Poll Rx	channel	for	completed
331  * descriptors and process the same.
332  * @channel: HAL channel.
333  * @got_rx: Buffer to return the flag set if receive interrupt is occured
334  *
335  * The function	polls the Rx channel for the completed	descriptors	and	calls
336  * the upper-layer driver (ULD)	via	supplied completion	callback.
337  *
338  * Returns:	XGE_HAL_OK,	if the polling is completed	successful.
339  * XGE_HAL_COMPLETIONS_REMAIN: There are still more	completed
340  * descriptors available which are yet to be processed.
341  *
342  * See also: xge_hal_device_poll_tx_channel()
343  */
344 __HAL_STATIC_DEVICE __HAL_INLINE_DEVICE	xge_hal_status_e
xge_hal_device_poll_rx_channel(xge_hal_channel_t * channel,int * got_rx)345 xge_hal_device_poll_rx_channel(xge_hal_channel_t *channel, int *got_rx)
346 {
347 	xge_hal_status_e ret = XGE_HAL_OK;
348 	xge_hal_dtr_h first_dtrh;
349 	xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
350 	u8 t_code;
351 	int got_bytes;
352 
353 	/* for each opened rx channel */
354 	got_bytes = *got_rx = 0;
355 	((xge_hal_ring_t *)channel)->cmpl_cnt = 0;
356 	channel->poll_bytes = 0;
357 	if ((ret = xge_hal_ring_dtr_next_completed (channel, &first_dtrh,
358 		&t_code)) == XGE_HAL_OK) {
359 		if (channel->callback(channel, first_dtrh,
360 			t_code,	channel->userdata) != XGE_HAL_OK) {
361 			(*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
362 			got_bytes += channel->poll_bytes + 1;
363 			ret = XGE_HAL_COMPLETIONS_REMAIN;
364 		} else {
365 			(*got_rx) += ((xge_hal_ring_t *)channel)->cmpl_cnt + 1;
366 			got_bytes += channel->poll_bytes + 1;
367 		}
368 	}
369 
370 	if (*got_rx) {
371 		hldev->irq_workload_rxd[channel->post_qid] += *got_rx;
372 		hldev->irq_workload_rxcnt[channel->post_qid] ++;
373 	}
374 	hldev->irq_workload_rxlen[channel->post_qid] += got_bytes;
375 
376 	return ret;
377 }
378 
379 /**
380  * xge_hal_device_poll_tx_channel -	Poll Tx	channel	for	completed
381  * descriptors and process the same.
382  * @channel: HAL channel.
383  * @got_tx: Buffer to return the flag set if transmit interrupt is occured
384  *
385  * The function	polls the Tx channel for the completed	descriptors	and	calls
386  * the upper-layer driver (ULD)	via	supplied completion	callback.
387  *
388  * Returns:	XGE_HAL_OK,	if the polling is completed	successful.
389  * XGE_HAL_COMPLETIONS_REMAIN: There are still more	completed
390  * descriptors available which are yet to be processed.
391  *
392  * See also: xge_hal_device_poll_rx_channel().
393  */
394 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE xge_hal_status_e
xge_hal_device_poll_tx_channel(xge_hal_channel_t * channel,int * got_tx)395 xge_hal_device_poll_tx_channel(xge_hal_channel_t *channel, int *got_tx)
396 {
397 	xge_hal_dtr_h first_dtrh;
398 	xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
399 	u8 t_code;
400 	int got_bytes;
401 
402 	/* for each opened tx channel */
403 	got_bytes = *got_tx = 0;
404 	channel->poll_bytes = 0;
405 	if (xge_hal_fifo_dtr_next_completed (channel, &first_dtrh,
406 		&t_code) ==	XGE_HAL_OK)	{
407 		if (channel->callback(channel, first_dtrh,
408 			t_code,	channel->userdata) != XGE_HAL_OK) {
409 			(*got_tx)++;
410 			got_bytes += channel->poll_bytes + 1;
411 			return XGE_HAL_COMPLETIONS_REMAIN;
412 		}
413 		(*got_tx)++;
414 		got_bytes += channel->poll_bytes + 1;
415 	}
416 
417 	if (*got_tx) {
418 		hldev->irq_workload_txd[channel->post_qid] += *got_tx;
419 		hldev->irq_workload_txcnt[channel->post_qid] ++;
420 	}
421 	hldev->irq_workload_txlen[channel->post_qid] += got_bytes;
422 
423 	return XGE_HAL_OK;
424 }
425 
426 /**
427  * xge_hal_device_poll_rx_channels - Poll Rx channels for completed
428  * descriptors and process the same.
429  * @hldev: HAL device handle.
430  * @got_rx: Buffer to return flag set if receive is ready
431  *
432  * The function	polls the Rx channels for the completed	descriptors	and	calls
433  * the upper-layer driver (ULD)	via	supplied completion	callback.
434  *
435  * Returns:	XGE_HAL_OK,	if the polling is completed	successful.
436  * XGE_HAL_COMPLETIONS_REMAIN: There are still more	completed
437  * descriptors available which are yet to be processed.
438  *
439  * See also: xge_hal_device_poll_tx_channels(),	xge_hal_device_continue_irq().
440  */
441 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	xge_hal_status_e
xge_hal_device_poll_rx_channels(xge_hal_device_t * hldev,int * got_rx)442 xge_hal_device_poll_rx_channels(xge_hal_device_t *hldev, int *got_rx)
443 {
444 	xge_list_t *item;
445 	xge_hal_channel_t *channel;
446 
447 	/* for each opened rx channel */
448 	xge_list_for_each(item,	&hldev->ring_channels) {
449 		if (hldev->terminating)
450 			return XGE_HAL_OK;
451 		channel	= xge_container_of(item, xge_hal_channel_t,	item);
452 		if (!(channel->flags & XGE_HAL_CHANNEL_FLAG_USE_RX_POLLING)) {
453 			(void) xge_hal_device_poll_rx_channel(channel, got_rx);
454 		}
455 	}
456 
457 	return XGE_HAL_OK;
458 }
459 
460 /**
461  * xge_hal_device_poll_tx_channels - Poll Tx channels for completed
462  * descriptors and process the same.
463  * @hldev: HAL device handle.
464  * @got_tx: Buffer to return flag set if transmit is ready
465  *
466  * The function	polls the Tx channels for the completed	descriptors	and	calls
467  * the upper-layer driver (ULD)	via	supplied completion	callback.
468  *
469  * Returns:	XGE_HAL_OK,	if the polling is completed	successful.
470  * XGE_HAL_COMPLETIONS_REMAIN: There are still more	completed
471  * descriptors available which are yet to be processed.
472  *
473  * See also: xge_hal_device_poll_rx_channels(),	xge_hal_device_continue_irq().
474  */
475 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	xge_hal_status_e
xge_hal_device_poll_tx_channels(xge_hal_device_t * hldev,int * got_tx)476 xge_hal_device_poll_tx_channels(xge_hal_device_t *hldev, int *got_tx)
477 {
478 	xge_list_t *item;
479 	xge_hal_channel_t *channel;
480 
481 	/* for each opened tx channel */
482 	xge_list_for_each(item,	&hldev->fifo_channels) {
483 		if (hldev->terminating)
484 			return XGE_HAL_OK;
485 		channel	= xge_container_of(item, xge_hal_channel_t, item);
486 		(void) xge_hal_device_poll_tx_channel(channel, got_tx);
487 	}
488 
489 	return XGE_HAL_OK;
490 }
491 
492 /**
493  *
494  */
495 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_rx_channel_enable_polling(xge_hal_channel_t * channel)496 xge_hal_device_rx_channel_enable_polling(xge_hal_channel_t *channel)
497 {
498 	channel->flags |= XGE_HAL_CHANNEL_FLAG_USE_RX_POLLING;
499 }
500 
501 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_rx_channel_disable_polling(xge_hal_channel_t * channel)502 xge_hal_device_rx_channel_disable_polling(xge_hal_channel_t *channel)
503 {
504 	channel->flags &= ~XGE_HAL_CHANNEL_FLAG_USE_RX_POLLING;
505 }
506 
507 /**
508  * xge_hal_device_mask_tx -	Mask Tx	interrupts.
509  * @hldev: HAL device handle.
510  *
511  * Mask	Tx device interrupts.
512  *
513  * See also: xge_hal_device_unmask_tx(), xge_hal_device_mask_rx(),
514  * xge_hal_device_clear_tx().
515  */
516 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_mask_tx(xge_hal_device_t * hldev)517 xge_hal_device_mask_tx(xge_hal_device_t	*hldev)
518 {
519 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
520 
521 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
522 				   0xFFFFFFFFFFFFFFFFULL,
523 				   &isrbar0->tx_traffic_mask);
524 }
525 
526 /**
527  * xge_hal_device_mask_rx -	Mask Rx	interrupts.
528  * @hldev: HAL device handle.
529  *
530  * Mask	Rx device interrupts.
531  *
532  * See also: xge_hal_device_unmask_rx(), xge_hal_device_mask_tx(),
533  * xge_hal_device_clear_rx().
534  */
535 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_mask_rx(xge_hal_device_t * hldev)536 xge_hal_device_mask_rx(xge_hal_device_t	*hldev)
537 {
538 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
539 
540 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
541 				   0xFFFFFFFFFFFFFFFFULL,
542 				   &isrbar0->rx_traffic_mask);
543 }
544 
545 /**
546  * xge_hal_device_mask_all - Mask all device interrupts.
547  * @hldev: HAL device handle.
548  *
549  * Mask	all	device interrupts.
550  *
551  * See also: xge_hal_device_unmask_all()
552  */
553 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_mask_all(xge_hal_device_t * hldev)554 xge_hal_device_mask_all(xge_hal_device_t *hldev)
555 {
556 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
557 
558 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
559 				   0xFFFFFFFFFFFFFFFFULL,
560 				   &isrbar0->general_int_mask);
561 }
562 
563 /**
564  * xge_hal_device_unmask_tx	- Unmask Tx	interrupts.
565  * @hldev: HAL device handle.
566  *
567  * Unmask Tx device	interrupts.
568  *
569  * See also: xge_hal_device_mask_tx(), xge_hal_device_clear_tx().
570  */
571 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_unmask_tx(xge_hal_device_t * hldev)572 xge_hal_device_unmask_tx(xge_hal_device_t *hldev)
573 {
574 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
575 
576 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
577 				   0x0ULL,
578 				   &isrbar0->tx_traffic_mask);
579 }
580 
581 /**
582  * xge_hal_device_unmask_rx	- Unmask Rx	interrupts.
583  * @hldev: HAL device handle.
584  *
585  * Unmask Rx device	interrupts.
586  *
587  * See also: xge_hal_device_mask_rx(), xge_hal_device_clear_rx().
588  */
589 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_unmask_rx(xge_hal_device_t * hldev)590 xge_hal_device_unmask_rx(xge_hal_device_t *hldev)
591 {
592 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
593 
594 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
595 				   0x0ULL,
596 				   &isrbar0->rx_traffic_mask);
597 }
598 
599 /**
600  * xge_hal_device_unmask_all - Unmask all device interrupts.
601  * @hldev: HAL device handle.
602  *
603  * Unmask all device interrupts.
604  *
605  * See also: xge_hal_device_mask_all()
606  */
607 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	void
xge_hal_device_unmask_all(xge_hal_device_t * hldev)608 xge_hal_device_unmask_all(xge_hal_device_t *hldev)
609 {
610 	xge_hal_pci_bar0_t *isrbar0	= (xge_hal_pci_bar0_t *)hldev->isrbar0;
611 
612 	xge_os_pio_mem_write64(hldev->pdev,	hldev->regh0,
613 				   0x0ULL,
614 				   &isrbar0->general_int_mask);
615 }
616 
617 
618 /**
619  * xge_hal_device_continue_irq - Continue handling IRQ:	process	all
620  * completed descriptors.
621  * @hldev: HAL device handle.
622  *
623  * Process completed descriptors and unmask	the	device interrupts.
624  *
625  * The xge_hal_device_continue_irq() walks all open	channels
626  * and calls upper-layer driver	(ULD) via supplied completion
627  * callback. Note that the completion callback is specified	at channel open
628  * time, see xge_hal_channel_open().
629  *
630  * Note	that the xge_hal_device_continue_irq is	part of	the	_fast_ path.
631  * To optimize the processing, the function	does _not_ check for
632  * errors and alarms.
633  *
634  * The latter is done in a polling fashion,	via	xge_hal_device_poll().
635  *
636  * Returns:	XGE_HAL_OK.
637  *
638  * See also: xge_hal_device_handle_irq(), xge_hal_device_poll(),
639  * xge_hal_ring_dtr_next_completed(),
640  * xge_hal_fifo_dtr_next_completed(), xge_hal_channel_callback_f{}.
641  */
642 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	xge_hal_status_e
xge_hal_device_continue_irq(xge_hal_device_t * hldev)643 xge_hal_device_continue_irq(xge_hal_device_t *hldev)
644 {
645 	int	got_rx = 1,	got_tx = 1;
646 	int	isr_polling_cnt	= hldev->config.isr_polling_cnt;
647 	int	count =	0;
648 
649 	do
650 	{
651 		if (got_rx)
652 			(void) xge_hal_device_poll_rx_channels(hldev, &got_rx);
653 		if (got_tx && hldev->tti_enabled)
654 			(void) xge_hal_device_poll_tx_channels(hldev, &got_tx);
655 
656 		if (!got_rx && !got_tx)
657 			break;
658 
659 		count += (got_rx + got_tx);
660 	}while (isr_polling_cnt--);
661 
662 	if (!count)
663 		hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
664 
665 	return XGE_HAL_OK;
666 }
667 
668 /**
669  * xge_hal_device_handle_irq - Handle device IRQ.
670  * @hldev: HAL device handle.
671  *
672  * Perform the complete	handling of	the	line interrupt.	The	function
673  * performs	two	calls.
674  * First it	uses xge_hal_device_begin_irq()	to	check the reason for
675  * the interrupt and mask the device interrupts.
676  * Second, it calls	xge_hal_device_continue_irq() to process all
677  * completed descriptors and re-enable the interrupts.
678  *
679  * Returns:	XGE_HAL_OK - success;
680  * XGE_HAL_ERR_WRONG_IRQ - (shared)	IRQ	produced by	other device.
681  *
682  * See also: xge_hal_device_begin_irq(), xge_hal_device_continue_irq().
683  */
684 __HAL_STATIC_DEVICE	__HAL_INLINE_DEVICE	xge_hal_status_e
xge_hal_device_handle_irq(xge_hal_device_t * hldev)685 xge_hal_device_handle_irq(xge_hal_device_t *hldev)
686 {
687 	u64	reason;
688 	xge_hal_status_e status;
689 
690 	xge_hal_device_mask_all(hldev);
691 
692 	status = xge_hal_device_begin_irq(hldev, &reason);
693 	if (status != XGE_HAL_OK) {
694 		xge_hal_device_unmask_all(hldev);
695 		return status;
696 	}
697 
698 	if (reason & XGE_HAL_GEN_INTR_RXTRAFFIC) {
699 		xge_hal_device_clear_rx(hldev);
700 	}
701 
702 	status = xge_hal_device_continue_irq(hldev);
703 
704 	xge_hal_device_clear_tx(hldev);
705 
706 	xge_hal_device_unmask_all(hldev);
707 
708 	return status;
709 }
710 
711 #if	defined(XGE_HAL_CONFIG_LRO)
712 
713 
714 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
__hal_lro_check_for_session_match(lro_t * lro,tcplro_t * tcp,iplro_t * ip)715 __hal_lro_check_for_session_match(lro_t	*lro, tcplro_t *tcp, iplro_t *ip)
716 {
717 
718 	/* Match Source	address	field */
719 	if ((lro->ip_hdr->saddr	!= ip->saddr))
720 		return XGE_HAL_FAIL;
721 
722 	/* Match Destination address field */
723 	if ((lro->ip_hdr->daddr	!= ip->daddr))
724 		return XGE_HAL_FAIL;
725 
726 	/* Match Source	Port field */
727 	if ((lro->tcp_hdr->source != tcp->source))
728 		return XGE_HAL_FAIL;
729 
730 	/* Match Destination Port field	*/
731 	if ((lro->tcp_hdr->dest	!= tcp->dest))
732 		return XGE_HAL_FAIL;
733 
734 	return XGE_HAL_OK;
735 }
736 
737 /*
738  * __hal_tcp_seg_len: Find the tcp seg len.
739  * @ip:	ip header.
740  * @tcp: tcp header.
741  * returns:	Tcp	seg	length.
742  */
743 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL u16
__hal_tcp_seg_len(iplro_t * ip,tcplro_t * tcp)744 __hal_tcp_seg_len(iplro_t *ip, tcplro_t	*tcp)
745 {
746 	u16	ret;
747 
748 	ret	=  (xge_os_ntohs(ip->tot_len) -
749 		   ((ip->version_ihl & 0x0F)<<2) -
750 		   ((tcp->doff_res)>>2));
751 	return (ret);
752 }
753 
754 /*
755  * __hal_ip_lro_capable: Finds whether ip is lro capable.
756  * @ip:	ip header.
757  * @ext_info:  descriptor info.
758  */
759 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_ip_lro_capable(iplro_t * ip,xge_hal_dtr_info_t * ext_info)760 __hal_ip_lro_capable(iplro_t *ip,
761 			 xge_hal_dtr_info_t	*ext_info)
762 {
763 
764 #ifdef XGE_LL_DEBUG_DUMP_PKT
765 		{
766 			u16	i;
767 			u8 ch, *iph	= (u8 *)ip;
768 
769 			xge_debug_ring(XGE_TRACE, "Dump	Ip:" );
770 			for	(i =0; i < 40; i++)	{
771 				ch = ntohs(*((u8 *)(iph	+ i)) );
772 				printf("i:%d %02x, ",i,ch);
773 			}
774 		}
775 #endif
776 
777 	if (ip->version_ihl	!= IP_FAST_PATH_HDR_MASK) {
778 		xge_debug_ring(XGE_ERR,	"iphdr !=45	:%d",ip->version_ihl);
779 		return XGE_HAL_FAIL;
780 	}
781 
782 	if (ext_info->proto	& XGE_HAL_FRAME_PROTO_IP_FRAGMENTED) {
783 		xge_debug_ring(XGE_ERR,	"IP	fragmented");
784 		return XGE_HAL_FAIL;
785 	}
786 
787 	return XGE_HAL_OK;
788 }
789 
790 /*
791  * __hal_tcp_lro_capable: Finds	whether	tcp	is lro capable.
792  * @ip:	ip header.
793  * @tcp: tcp header.
794  */
795 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_tcp_lro_capable(iplro_t * ip,tcplro_t * tcp,lro_t * lro,int * ts_off)796 __hal_tcp_lro_capable(iplro_t *ip, tcplro_t	*tcp, lro_t	*lro, int *ts_off)
797 {
798 #ifdef XGE_LL_DEBUG_DUMP_PKT
799 		{
800 			u8 ch;
801 			u16	i;
802 
803 			xge_debug_ring(XGE_TRACE, "Dump	Tcp:" );
804 			for	(i =0; i < 20; i++)	{
805 				ch = ntohs(*((u8 *)((u8	*)tcp +	i))	);
806 				xge_os_printf("i:%d	%02x, ",i,ch);
807 			}
808 		}
809 #endif
810 	if ((TCP_FAST_PATH_HDR_MASK2 !=	tcp->ctrl) &&
811 		(TCP_FAST_PATH_HDR_MASK3 !=	tcp->ctrl))
812 		goto _exit_fail;
813 
814 	*ts_off	= -1;
815 	if (TCP_FAST_PATH_HDR_MASK1	!= tcp->doff_res) {
816 		u16	tcp_hdr_len	= tcp->doff_res	>> 2; /* TCP header	len	*/
817 		u16	off	= 20; /* Start of tcp options */
818 		int	i, diff;
819 
820 		/* Does	Packet can contain time	stamp */
821 		if (tcp_hdr_len	< 32) {
822 			/*
823 			 * If the session is not opened, we	can	consider
824 			 * this	packet for LRO
825 			 */
826 			if (lro	== NULL)
827 				return XGE_HAL_OK;
828 
829 			goto _exit_fail;
830 		}
831 
832 		/* Ignore No-operation 0x1 */
833 		while (((u8	*)tcp)[off]	== 0x1)
834 			off++;
835 
836 		/* Next	option == Timestamp	*/
837 		if (((u8 *)tcp)[off] !=	0x8) {
838 			/*
839 			 * If the session ie not opened, we	can	consider
840 			 * this	packet for LRO
841 			 */
842 			if (lro	== NULL)
843 				return XGE_HAL_OK;
844 
845 			goto _exit_fail;
846 		}
847 
848 		*ts_off	= off;
849 		if (lro	== NULL)
850 			return XGE_HAL_OK;
851 
852 		/*
853 		 * Now the session is opened. If the LRO frame doesn't
854 		 * have	time stamp,	we cannot consider current packet for
855 		 * LRO.
856 		 */
857 		if (lro->ts_off	== -1) {
858 			xge_debug_ring(XGE_ERR,	"Pkt received with time	stamp after	session	opened with	no time	stamp :	%02x %02x", tcp->doff_res, tcp->ctrl);
859 			return XGE_HAL_FAIL;
860 		}
861 
862 		/*
863 		 * If the difference is	greater	than three,	then there are
864 		 * more	options	possible.
865 		 * else, there are two cases:
866 		 * case	1: remaining are padding bytes.
867 		 * case	2: remaining can contain options or	padding
868 		 */
869 		off	+= ((u8	*)tcp)[off+1];
870 		diff = tcp_hdr_len - off;
871 		if (diff > 3) {
872 			/*
873 			 * Probably	contains more options.
874 			 */
875 			xge_debug_ring(XGE_ERR,	"tcphdr	not	fastpth	: pkt received with	tcp	options	in addition	to time	stamp after	the	session	is opened %02x %02x	", tcp->doff_res,	tcp->ctrl);
876 			return XGE_HAL_FAIL;
877 		}
878 
879 		for	(i = 0;	i <	diff; i++) {
880 			u8 byte	= ((u8 *)tcp)[off+i];
881 
882 			/* Ignore No-operation 0x1 */
883 			if ((byte == 0x0) || (byte == 0x1))
884 				continue;
885 			xge_debug_ring(XGE_ERR,	"tcphdr	not	fastpth	: pkt received with	tcp	options	in addition	to time	stamp after	the	session	is opened %02x %02x	", tcp->doff_res,	tcp->ctrl);
886 			return XGE_HAL_FAIL;
887 		}
888 
889 		/*
890 		 * Update the time stamp of	LRO	frame.
891 		 */
892 		xge_os_memcpy(((char *)lro->tcp_hdr	+ lro->ts_off +	2),
893 				(char *)((char *)tcp + (*ts_off) + 2), 8);
894 	}
895 
896 	return XGE_HAL_OK;
897 
898 _exit_fail:
899 	xge_debug_ring(XGE_TRACE,	"tcphdr	not	fastpth	%02x %02x", tcp->doff_res, tcp->ctrl);
900 	return XGE_HAL_FAIL;
901 
902 }
903 
904 /*
905  * __hal_lro_capable: Finds	whether	frame is lro capable.
906  * @buffer:	Ethernet frame.
907  * @ip:	ip frame.
908  * @tcp: tcp frame.
909  * @ext_info: Descriptor info.
910  */
911 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_lro_capable(u8 * buffer,iplro_t ** ip,tcplro_t ** tcp,xge_hal_dtr_info_t * ext_info)912 __hal_lro_capable( u8 *buffer,
913 		   iplro_t **ip,
914 		   tcplro_t	**tcp,
915        xge_hal_dtr_info_t *ext_info)
916 {
917 	u8 ip_off, ip_length;
918 
919 	if (!(ext_info->proto &	XGE_HAL_FRAME_PROTO_TCP)) {
920 		xge_debug_ring(XGE_ERR,	"Cant do lro %d", ext_info->proto);
921 		return XGE_HAL_FAIL;
922 	}
923 
924   if ( !*ip )
925   {
926 #ifdef XGE_LL_DEBUG_DUMP_PKT
927 		{
928 			u8 ch;
929 			u16	i;
930 
931 			xge_os_printf("Dump	Eth:" );
932 			for	(i =0; i < 60; i++)	{
933 				ch = ntohs(*((u8 *)(buffer + i)) );
934 				xge_os_printf("i:%d	%02x, ",i,ch);
935 			}
936 		}
937 #endif
938 
939     switch (ext_info->frame) {
940     case XGE_HAL_FRAME_TYPE_DIX:
941       ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
942       break;
943     case XGE_HAL_FRAME_TYPE_LLC:
944       ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE	+
945                 XGE_HAL_HEADER_802_2_SIZE);
946       break;
947     case XGE_HAL_FRAME_TYPE_SNAP:
948       ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE	+
949                 XGE_HAL_HEADER_SNAP_SIZE);
950       break;
951     default: //	XGE_HAL_FRAME_TYPE_IPX,	etc.
952       return XGE_HAL_FAIL;
953     }
954 
955 
956     if (ext_info->proto	& XGE_HAL_FRAME_PROTO_VLAN_TAGGED) {
957       ip_off += XGE_HAL_HEADER_VLAN_SIZE;
958     }
959 
960     /* Grab	ip,	tcp	headers	*/
961     *ip	= (iplro_t *)((char*)buffer	+ ip_off);
962   } /* !*ip */
963 
964 	ip_length =	(u8)((*ip)->version_ihl	& 0x0F);
965 	ip_length =	ip_length <<2;
966 	*tcp = (tcplro_t *)((char *)*ip + ip_length);
967 
968 	xge_debug_ring(XGE_TRACE, "ip_length:%d	ip:"XGE_OS_LLXFMT
969 		   " tcp:"XGE_OS_LLXFMT"", (int)ip_length,
970 		(unsigned long long)(ulong_t)*ip, (unsigned long long)(ulong_t)*tcp);
971 
972 	return XGE_HAL_OK;
973 
974 }
975 
976 
977 /*
978  * __hal_open_lro_session: Open	a new LRO session.
979  * @buffer:	Ethernet frame.
980  * @ip:	ip header.
981  * @tcp: tcp header.
982  * @lro: lro pointer
983  * @ext_info: Descriptor info.
984  * @hldev: Hal context.
985  * @ring_lro: LRO descriptor per rx ring.
986  * @slot: Bucket no.
987  * @tcp_seg_len: Length	of tcp segment.
988  * @ts_off:	time stamp offset in the packet.
989  */
990 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_open_lro_session(u8 * buffer,iplro_t * ip,tcplro_t * tcp,lro_t ** lro,xge_hal_device_t * hldev,xge_hal_lro_desc_t * ring_lro,int slot,u32 tcp_seg_len,int ts_off)991 __hal_open_lro_session (u8 *buffer,	iplro_t	*ip, tcplro_t *tcp,	lro_t **lro,
992 			xge_hal_device_t *hldev, xge_hal_lro_desc_t	*ring_lro, int slot,
993       u32 tcp_seg_len, int	ts_off)
994 {
995 
996 	lro_t *lro_new = &ring_lro->lro_pool[slot];
997 
998 	lro_new->in_use			=	1;
999 	lro_new->ll_hdr			=	buffer;
1000 	lro_new->ip_hdr			=	ip;
1001 	lro_new->tcp_hdr		=	tcp;
1002 	lro_new->tcp_next_seq_num	=	tcp_seg_len	+ xge_os_ntohl(
1003 								tcp->seq);
1004 	lro_new->tcp_seq_num		=	tcp->seq;
1005 	lro_new->tcp_ack_num		=	tcp->ack_seq;
1006 	lro_new->sg_num			=	1;
1007 	lro_new->total_length		=	xge_os_ntohs(ip->tot_len);
1008 	lro_new->frags_len		=	0;
1009 	lro_new->ts_off			=	ts_off;
1010 
1011 	hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
1012 	hldev->stats.sw_dev_info_stats.tot_lro_sessions++;
1013 
1014 	*lro = ring_lro->lro_recent = lro_new;
1015 	return;
1016 }
1017 /*
1018  * __hal_lro_get_free_slot:	Get	a free LRO bucket.
1019  * @ring_lro: LRO descriptor per ring.
1020  */
1021 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
__hal_lro_get_free_slot(xge_hal_lro_desc_t * ring_lro)1022 __hal_lro_get_free_slot	(xge_hal_lro_desc_t	*ring_lro)
1023 {
1024 	int	i;
1025 
1026 	for	(i = 0;	i <	XGE_HAL_LRO_MAX_BUCKETS; i++) {
1027 		lro_t *lro_temp	= &ring_lro->lro_pool[i];
1028 
1029 		if (!lro_temp->in_use)
1030 			return i;
1031 	}
1032 	return -1;
1033 }
1034 
1035 /*
1036  * __hal_get_lro_session: Gets matching	LRO	session	or creates one.
1037  * @eth_hdr:	Ethernet header.
1038  * @ip:	ip header.
1039  * @tcp: tcp header.
1040  * @lro: lro pointer
1041  * @ext_info: Descriptor info.
1042  * @hldev: Hal context.
1043  * @ring_lro: LRO descriptor per rx ring
1044  */
1045 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_get_lro_session(u8 * eth_hdr,iplro_t * ip,tcplro_t * tcp,lro_t ** lro,xge_hal_dtr_info_t * ext_info,xge_hal_device_t * hldev,xge_hal_lro_desc_t * ring_lro,lro_t ** lro_end3)1046 __hal_get_lro_session (u8 *eth_hdr,
1047 			   iplro_t *ip,
1048 			   tcplro_t	*tcp,
1049 			   lro_t **lro,
1050 			   xge_hal_dtr_info_t *ext_info,
1051 			   xge_hal_device_t	*hldev,
1052 			   xge_hal_lro_desc_t	*ring_lro,
1053 			   lro_t **lro_end3	/* Valid only when ret=END_3 */)
1054 {
1055 	lro_t *lro_match;
1056 	int	i, free_slot = -1;
1057 	u32	tcp_seg_len;
1058 	int	ts_off = -1;
1059 
1060 	*lro = lro_match = NULL;
1061 	/*
1062 	 * Compare the incoming	frame with the lro session left	from the
1063 	 * previous	call.  There is	a good chance that this	incoming frame
1064 	 * matches the lro session.
1065 	 */
1066 	if (ring_lro->lro_recent && ring_lro->lro_recent->in_use)	{
1067 		if (__hal_lro_check_for_session_match(ring_lro->lro_recent,
1068 							  tcp, ip)
1069 							== XGE_HAL_OK)
1070 			lro_match =	ring_lro->lro_recent;
1071 	}
1072 
1073 	if (!lro_match)	{
1074 		/*
1075 		 * Search in the pool of LROs for the session that matches
1076 		 * the incoming	frame.
1077 		 */
1078 		for	(i = 0;	i <	XGE_HAL_LRO_MAX_BUCKETS; i++) {
1079 			lro_t *lro_temp	= &ring_lro->lro_pool[i];
1080 
1081 			if (!lro_temp->in_use) {
1082 				if (free_slot == -1)
1083 					free_slot =	i;
1084 				continue;
1085 			}
1086 
1087 			if (__hal_lro_check_for_session_match(lro_temp,	tcp,
1088 							  ip) == XGE_HAL_OK) {
1089 				lro_match =	lro_temp;
1090 				break;
1091 			}
1092 		}
1093 	}
1094 
1095 
1096 	if (lro_match) {
1097 		/*
1098 		 * Matching	LRO	Session	found
1099 		 */
1100 		*lro = lro_match;
1101 
1102 		if (lro_match->tcp_next_seq_num	!= xge_os_ntohl(tcp->seq)) {
1103      xge_debug_ring(XGE_ERR,	"**retransmit  **"
1104 						"found***");
1105 			hldev->stats.sw_dev_info_stats.lro_out_of_seq_pkt_cnt++;
1106 			return XGE_HAL_INF_LRO_END_2;
1107 		}
1108 
1109 		if (XGE_HAL_OK != __hal_ip_lro_capable(ip, ext_info))
1110     {
1111 			return XGE_HAL_INF_LRO_END_2;
1112     }
1113 
1114 		if (XGE_HAL_OK != __hal_tcp_lro_capable(ip,	tcp, lro_match,
1115 							&ts_off)) {
1116 			/*
1117 			 * Close the current session and open a	new
1118 			 * LRO session with	this packet,
1119 			 * provided	it has tcp payload
1120 			 */
1121 			tcp_seg_len	= __hal_tcp_seg_len(ip,	tcp);
1122 			if (tcp_seg_len	== 0)
1123       {
1124 				return XGE_HAL_INF_LRO_END_2;
1125       }
1126 
1127 			/* Get a free bucket  */
1128 			free_slot =	__hal_lro_get_free_slot(ring_lro);
1129 			if (free_slot == -1)
1130       {
1131 				return XGE_HAL_INF_LRO_END_2;
1132       }
1133 
1134 			/*
1135 			 * Open	a new LRO session
1136 			 */
1137 			__hal_open_lro_session (eth_hdr,	ip,	tcp, lro_end3,
1138 						hldev, ring_lro, free_slot, tcp_seg_len,
1139 						ts_off);
1140 
1141 			return XGE_HAL_INF_LRO_END_3;
1142 		}
1143 
1144 				/*
1145 		 * The frame is	good, in-sequence, can be LRO-ed;
1146 		 * take	its	(latest) ACK - unless it is	a dupack.
1147 		 * Note: to	be exact need to check window size as well..
1148 		*/
1149 		if (lro_match->tcp_ack_num == tcp->ack_seq &&
1150 			lro_match->tcp_seq_num == tcp->seq)	{
1151 			hldev->stats.sw_dev_info_stats.lro_dup_pkt_cnt++;
1152 			return XGE_HAL_INF_LRO_END_2;
1153 		}
1154 
1155 		lro_match->tcp_seq_num = tcp->seq;
1156 		lro_match->tcp_ack_num = tcp->ack_seq;
1157 		lro_match->frags_len +=	__hal_tcp_seg_len(ip, tcp);
1158 
1159 		ring_lro->lro_recent =	lro_match;
1160 
1161 		return XGE_HAL_INF_LRO_CONT;
1162 	}
1163 
1164 	/* ********** New Session ***************/
1165 	if (free_slot == -1)
1166 		return XGE_HAL_INF_LRO_UNCAPABLE;
1167 
1168 	if (XGE_HAL_FAIL ==	__hal_ip_lro_capable(ip, ext_info))
1169 		return XGE_HAL_INF_LRO_UNCAPABLE;
1170 
1171 	if (XGE_HAL_FAIL ==	__hal_tcp_lro_capable(ip, tcp, NULL, &ts_off))
1172 		return XGE_HAL_INF_LRO_UNCAPABLE;
1173 
1174 	xge_debug_ring(XGE_TRACE, "Creating	lro	session.");
1175 
1176 	/*
1177 	 * Open	a LRO session, provided	the	packet contains	payload.
1178 	 */
1179 	tcp_seg_len	= __hal_tcp_seg_len(ip,	tcp);
1180 	if (tcp_seg_len	== 0)
1181 		return XGE_HAL_INF_LRO_UNCAPABLE;
1182 
1183 	__hal_open_lro_session (eth_hdr,	ip,	tcp, lro, hldev, ring_lro, free_slot,
1184 				tcp_seg_len, ts_off);
1185 
1186 	return XGE_HAL_INF_LRO_BEGIN;
1187 }
1188 
1189 /*
1190  * __hal_lro_under_optimal_thresh: Finds whether combined session is optimal.
1191  * @ip:	ip header.
1192  * @tcp: tcp header.
1193  * @lro: lro pointer
1194  * @hldev: Hal context.
1195  */
1196 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_lro_under_optimal_thresh(iplro_t * ip,tcplro_t * tcp,lro_t * lro,xge_hal_device_t * hldev)1197 __hal_lro_under_optimal_thresh (iplro_t	*ip,
1198 					tcplro_t *tcp,
1199 				lro_t *lro,
1200 				xge_hal_device_t *hldev)
1201 {
1202 	if (!lro) return XGE_HAL_FAIL;
1203 
1204 	if ((lro->total_length + __hal_tcp_seg_len(ip, tcp)	) >
1205 						hldev->config.lro_frm_len) {
1206 		xge_debug_ring(XGE_TRACE, "Max LRO frame len exceeded:"
1207 		 "max length %d	", hldev->config.lro_frm_len);
1208 		hldev->stats.sw_dev_info_stats.lro_frm_len_exceed_cnt++;
1209 		return XGE_HAL_FAIL;
1210 	}
1211 
1212 	if (lro->sg_num	== hldev->config.lro_sg_size) {
1213 		xge_debug_ring(XGE_TRACE, "Max sg count	exceeded:"
1214 				 "max sg %d	", hldev->config.lro_sg_size);
1215 		hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
1216 		return XGE_HAL_FAIL;
1217 	}
1218 
1219 	return XGE_HAL_OK;
1220 }
1221 
1222 /*
1223  * __hal_collapse_ip_hdr: Collapses	ip header.
1224  * @ip:	ip header.
1225  * @tcp: tcp header.
1226  * @lro: lro pointer
1227  * @hldev: Hal context.
1228  */
1229 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_collapse_ip_hdr(iplro_t * ip,tcplro_t * tcp,lro_t * lro,xge_hal_device_t * hldev)1230 __hal_collapse_ip_hdr (	iplro_t	*ip,
1231 			tcplro_t *tcp,
1232 			lro_t *lro,
1233 			xge_hal_device_t *hldev)
1234 {
1235 
1236 	lro->total_length += __hal_tcp_seg_len(ip, tcp);
1237 
1238 	/* May be we have to handle	time stamps	or more	options	*/
1239 
1240 	return XGE_HAL_OK;
1241 
1242 }
1243 
1244 /*
1245  * __hal_collapse_tcp_hdr: Collapses tcp header.
1246  * @ip:	ip header.
1247  * @tcp: tcp header.
1248  * @lro: lro pointer
1249  * @hldev: Hal context.
1250  */
1251 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_collapse_tcp_hdr(iplro_t * ip,tcplro_t * tcp,lro_t * lro,xge_hal_device_t * hldev)1252 __hal_collapse_tcp_hdr ( iplro_t *ip,
1253 			 tcplro_t *tcp,
1254 			 lro_t *lro,
1255 			 xge_hal_device_t *hldev)
1256 {
1257 	lro->tcp_next_seq_num += __hal_tcp_seg_len(ip, tcp);
1258 	return XGE_HAL_OK;
1259 
1260 }
1261 
1262 /*
1263  * __hal_append_lro: Appends new frame to existing LRO session.
1264  * @ip:	ip header.
1265  * @tcp: IN	tcp	header,	OUT	tcp	payload.
1266  * @seg_len: tcp payload length.
1267  * @lro: lro pointer
1268  * @hldev: Hal context.
1269  */
1270 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_append_lro(iplro_t * ip,tcplro_t ** tcp,u32 * seg_len,lro_t * lro,xge_hal_device_t * hldev)1271 __hal_append_lro(iplro_t *ip,
1272 		 tcplro_t **tcp,
1273 		 u32 *seg_len,
1274 		 lro_t *lro,
1275 		 xge_hal_device_t *hldev)
1276 {
1277 	(void) __hal_collapse_ip_hdr(ip, *tcp,	lro, hldev);
1278 	(void) __hal_collapse_tcp_hdr(ip, *tcp, lro, hldev);
1279 	// Update mbuf chain will be done in ll	driver.
1280 	// xge_hal_accumulate_large_rx on success of appending new frame to
1281 	// lro will	return to ll driver	tcpdata	pointer, and tcp payload length.
1282 	// along with return code lro frame	appended.
1283 
1284 	lro->sg_num++;
1285 	*seg_len = __hal_tcp_seg_len(ip, *tcp);
1286 	*tcp = (tcplro_t *)((char *)*tcp	+ (((*tcp)->doff_res)>>2));
1287 
1288 	return XGE_HAL_OK;
1289 
1290 }
1291 
1292 /**
1293  * __xge_hal_accumulate_large_rx:	LRO	a given	frame
1294  * frames
1295  * @ring: rx ring number
1296  * @eth_hdr: ethernet header.
1297  * @ip_hdr: ip header (optional)
1298  * @tcp: tcp header.
1299  * @seglen:	packet length.
1300  * @p_lro: lro pointer.
1301  * @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
1302  * @hldev: HAL device.
1303  * @lro_end3: for lro_end3 output
1304  *
1305  * LRO the newly received frame, i.e. attach it	(if	possible) to the
1306  * already accumulated (i.e., already LRO-ed) received frames (if any),
1307  * to form one super-sized frame for the subsequent	processing
1308  * by the stack.
1309  */
1310 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
xge_hal_lro_process_rx(int ring,u8 * eth_hdr,u8 * ip_hdr,tcplro_t ** tcp,u32 * seglen,lro_t ** p_lro,xge_hal_dtr_info_t * ext_info,xge_hal_device_t * hldev,lro_t ** lro_end3)1311 xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp,
1312                        u32 *seglen, lro_t **p_lro,
1313                        xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
1314                        lro_t **lro_end3)
1315 {
1316 	iplro_t	*ip = (iplro_t *)ip_hdr;
1317 	xge_hal_status_e ret;
1318 	lro_t *lro;
1319 
1320 	xge_debug_ring(XGE_TRACE, "Entered accumu lro. ");
1321 	if (XGE_HAL_OK != __hal_lro_capable(eth_hdr, &ip, (tcplro_t **)tcp,
1322                                       ext_info))
1323 		return XGE_HAL_INF_LRO_UNCAPABLE;
1324 
1325 	/*
1326 	 * This	function shall get matching LRO or else
1327 	 * create one and return it
1328 	 */
1329 	ret = __hal_get_lro_session(eth_hdr, ip, (tcplro_t *)*tcp,
1330                               p_lro, ext_info, hldev,	&hldev->lro_desc[ring],
1331                               lro_end3);
1332 	xge_debug_ring(XGE_TRACE, "ret from get_lro:%d ",ret);
1333 	lro = *p_lro;
1334 	if (XGE_HAL_INF_LRO_CONT == ret) {
1335 		if (XGE_HAL_OK == __hal_lro_under_optimal_thresh(ip,
1336 						(tcplro_t *)*tcp, lro, hldev)) {
1337 			(void) __hal_append_lro(ip,(tcplro_t **) tcp, seglen,
1338 							 lro, hldev);
1339 			hldev->stats.sw_dev_info_stats.tot_frms_lroised++;
1340 
1341 			if (lro->sg_num	>= hldev->config.lro_sg_size) {
1342 				hldev->stats.sw_dev_info_stats.lro_sg_exceed_cnt++;
1343 				ret = XGE_HAL_INF_LRO_END_1;
1344 			}
1345 
1346 		} else ret = XGE_HAL_INF_LRO_END_2;
1347 	}
1348 
1349 	/*
1350 	 * Since its time to flush,
1351 	 * update ip header	so that	it can be sent up
1352 	 */
1353 	if ((ret == XGE_HAL_INF_LRO_END_1) ||
1354 		(ret ==	XGE_HAL_INF_LRO_END_2) ||
1355 		(ret ==	XGE_HAL_INF_LRO_END_3))	{
1356 		lro->ip_hdr->tot_len = xge_os_htons((*p_lro)->total_length);
1357 		lro->ip_hdr->check = xge_os_htons(0);
1358 		lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
1359 					(lro->ip_hdr->version_ihl & 0x0F));
1360 		lro->tcp_hdr->ack_seq =	lro->tcp_ack_num;
1361 	}
1362 
1363 	return (ret);
1364 }
1365 
1366 /**
1367  * xge_hal_accumulate_large_rx:	LRO	a given	frame
1368  * frames
1369  * @buffer:	Ethernet frame.
1370  * @tcp: tcp header.
1371  * @seglen:	packet length.
1372  * @p_lro: lro pointer.
1373  * @ext_info: descriptor info, see xge_hal_dtr_info_t{}.
1374  * @hldev: HAL device.
1375  * @lro_end3: for lro_end3 output
1376  *
1377  * LRO the newly received frame, i.e. attach it	(if	possible) to the
1378  * already accumulated (i.e., already LRO-ed) received frames (if any),
1379  * to form one super-sized frame for the subsequent	processing
1380  * by the stack.
1381  */
1382 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
xge_hal_accumulate_large_rx(u8 * buffer,tcplro_t ** tcp,u32 * seglen,lro_t ** p_lro,xge_hal_dtr_info_t * ext_info,xge_hal_device_t * hldev,lro_t ** lro_end3)1383 xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
1384 lro_t **p_lro, xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
1385 lro_t **lro_end3)
1386 {
1387   int ring = 0;
1388   return xge_hal_lro_process_rx(ring, buffer, NULL, tcp, seglen, p_lro,
1389                                 ext_info, hldev, lro_end3);
1390 }
1391 
1392 /**
1393  * xge_hal_lro_close_session: Close LRO session
1394  * @lro: LRO Session.
1395  * @hldev: HAL Context.
1396  */
1397 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
xge_hal_lro_close_session(lro_t * lro)1398 xge_hal_lro_close_session (lro_t *lro)
1399 {
1400 	lro->in_use = 0;
1401 }
1402 
1403 /**
1404  * xge_hal_lro_next_session: Returns next LRO session in the list or NULL
1405  *					if none	exists.
1406  * @hldev: HAL Context.
1407  * @ring: rx ring number.
1408  */
1409 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t	*
xge_hal_lro_next_session(xge_hal_device_t * hldev,int ring)1410 xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring)
1411 {
1412 xge_hal_lro_desc_t *ring_lro = &hldev->lro_desc[ring];
1413 	int	i;
1414 	int	start_idx =	ring_lro->lro_next_idx;
1415 
1416 	for(i =	start_idx; i < XGE_HAL_LRO_MAX_BUCKETS;	i++) {
1417 		lro_t *lro = &ring_lro->lro_pool[i];
1418 
1419 		if (!lro->in_use)
1420 			continue;
1421 
1422 		lro->ip_hdr->tot_len = xge_os_htons(lro->total_length);
1423 		lro->ip_hdr->check = xge_os_htons(0);
1424 		lro->ip_hdr->check = XGE_LL_IP_FAST_CSUM(((u8 *)(lro->ip_hdr)),
1425 								(lro->ip_hdr->version_ihl &	0x0F));
1426 		ring_lro->lro_next_idx	= i	+ 1;
1427 		return lro;
1428 	}
1429 
1430 	ring_lro->lro_next_idx	= 0;
1431 	return NULL;
1432 
1433 }
1434 
1435 __HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
xge_hal_lro_get_next_session(xge_hal_device_t * hldev)1436 xge_hal_lro_get_next_session(xge_hal_device_t *hldev)
1437 {
1438   int ring = 0; /* assume default ring=0 */
1439   return xge_hal_lro_next_session(hldev, ring);
1440 }
1441 #endif
1442