xref: /titanic_50/usr/src/uts/common/io/nge/nge_rx.c (revision c0889d7a91fa87e1cb7ef4457629b0cb51d47b50)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "nge.h"
30 
31 #undef	NGE_DBG
32 #define	NGE_DBG		NGE_DBG_RECV
33 
34 #define	RXD_END		0x20000000
35 #define	RXD_ERR		0x40000000
36 #define	RXD_OWN		0x80000000
37 #define	RXD_CSUM_MSK	0x1C000000
38 #define	RXD_BCNT_MSK	0x00003FFF
39 
40 #define	RXD_CK8G_NO_HSUM	0x0
41 #define	RXD_CK8G_TCP_SUM_ERR	0x04000000
42 #define	RXD_CK8G_UDP_SUM_ERR	0x08000000
43 #define	RXD_CK8G_IP_HSUM_ERR	0x0C000000
44 #define	RXD_CK8G_IP_HSUM	0x10000000
45 #define	RXD_CK8G_TCP_SUM	0x14000000
46 #define	RXD_CK8G_UDP_SUM	0x18000000
47 #define	RXD_CK8G_RESV		0x1C000000
48 
49 extern ddi_device_acc_attr_t nge_data_accattr;
50 
51 /*
52  * Callback code invoked from STREAMs when the recv data buffer is free for
53  * recycling.
54  *
55  * The following table describes function behaviour:
56  *
57  *                      | mac stopped | mac running
58  * ---------------------------------------------------
59  * buffer delivered     | free buffer | recycle buffer
60  * buffer not delivered | do nothing  | recycle buffer (*)
61  *
62  * Note (*):
63  *   Recycle buffer only if mac state did not change during execution of
64  *   function. Otherwise if mac state changed, set buffer delivered & re-enter
65  *   function by calling freemsg().
66  */
67 
68 void
69 nge_recv_recycle(caddr_t arg)
70 {
71 	boolean_t val;
72 	boolean_t valid;
73 	nge_t *ngep;
74 	dma_area_t *bufp;
75 	buff_ring_t *brp;
76 	nge_sw_statistics_t *sw_stp;
77 
78 	bufp = (dma_area_t *)arg;
79 	ngep = (nge_t *)bufp->private;
80 	brp = ngep->buff;
81 	sw_stp = &ngep->statistics.sw_statistics;
82 
83 	/*
84 	 * Free the buffer directly if the buffer was allocated
85 	 * previously or mac was stopped.
86 	 */
87 	if (bufp->signature != brp->buf_sign) {
88 		if (bufp->rx_delivered == B_TRUE) {
89 			nge_free_dma_mem(bufp);
90 			kmem_free(bufp, sizeof (dma_area_t));
91 			val = nge_atomic_decrease(&brp->rx_hold, 1);
92 			ASSERT(val == B_TRUE);
93 		}
94 		return;
95 	}
96 
97 	/*
98 	 * recycle the data buffer again and fill them in free ring
99 	 */
100 	bufp->rx_recycle.free_func = nge_recv_recycle;
101 	bufp->rx_recycle.free_arg = (caddr_t)bufp;
102 
103 	bufp->mp = desballoc(DMA_VPTR(*bufp),
104 	    ngep->buf_size + NGE_HEADROOM, 0, &bufp->rx_recycle);
105 
106 	if (bufp->mp == NULL) {
107 		sw_stp->mp_alloc_err++;
108 		sw_stp->recy_free++;
109 		nge_free_dma_mem(bufp);
110 		kmem_free(bufp, sizeof (dma_area_t));
111 		val = nge_atomic_decrease(&brp->rx_hold, 1);
112 		ASSERT(val == B_TRUE);
113 	} else {
114 
115 		mutex_enter(brp->recycle_lock);
116 		if (bufp->signature != brp->buf_sign)
117 			valid = B_TRUE;
118 		else
119 			valid = B_FALSE;
120 		bufp->rx_delivered = valid;
121 		if (bufp->rx_delivered == B_FALSE)  {
122 			bufp->next = brp->recycle_list;
123 			brp->recycle_list = bufp;
124 		}
125 		mutex_exit(brp->recycle_lock);
126 		if (valid == B_TRUE)
127 			/* call nge_rx_recycle again to free it */
128 			freemsg(bufp->mp);
129 		else {
130 			val = nge_atomic_decrease(&brp->rx_hold, 1);
131 			ASSERT(val == B_TRUE);
132 		}
133 	}
134 }
135 
136 /*
137  * Checking the rx's BDs (one or more) to receive
138  * one complete packet.
139  * start_index: the start indexer of BDs for one packet.
140  * end_index: the end indexer of BDs for one packet.
141  */
142 static mblk_t *nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len);
143 #pragma	inline(nge_recv_packet)
144 
145 static mblk_t *
146 nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len)
147 {
148 	uint8_t *rptr;
149 	uint32_t minsize;
150 	uint32_t maxsize;
151 	mblk_t *mp;
152 	buff_ring_t *brp;
153 	sw_rx_sbd_t *srbdp;
154 	dma_area_t *bufp;
155 	nge_sw_statistics_t *sw_stp;
156 	void *hw_bd_p;
157 
158 	brp = ngep->buff;
159 	minsize = ETHERMIN;
160 	maxsize = ngep->max_sdu;
161 	sw_stp = &ngep->statistics.sw_statistics;
162 	mp = NULL;
163 
164 	srbdp = &brp->sw_rbds[start_index];
165 	DMA_SYNC(*srbdp->bufp, DDI_DMA_SYNC_FORKERNEL);
166 	hw_bd_p = DMA_VPTR(srbdp->desc);
167 
168 	/*
169 	 * First check the free_list, if it is NULL,
170 	 * make the recycle_list be free_list.
171 	 */
172 	if (brp->free_list == NULL) {
173 		mutex_enter(brp->recycle_lock);
174 		brp->free_list = brp->recycle_list;
175 		brp->recycle_list = NULL;
176 		mutex_exit(brp->recycle_lock);
177 	}
178 	bufp = brp->free_list;
179 	/* If it's not a qualified packet, delete it */
180 	if (len > maxsize || len < minsize) {
181 		ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
182 		    srbdp->bufp->alength);
183 		srbdp->flags = CONTROLER_OWN;
184 		return (NULL);
185 	}
186 
187 	/*
188 	 * If receive packet size is smaller than RX bcopy threshold,
189 	 * or there is no available buffer in free_list or recycle list,
190 	 * we use bcopy directly.
191 	 */
192 	if (len <= ngep->param_rxbcopy_threshold || bufp == NULL)
193 		brp->rx_bcopy = B_TRUE;
194 	else
195 		brp->rx_bcopy = B_FALSE;
196 
197 	if (brp->rx_bcopy) {
198 		mp = allocb(len + NGE_HEADROOM, 0);
199 		if (mp == NULL) {
200 			sw_stp->mp_alloc_err++;
201 			ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
202 			    srbdp->bufp->alength);
203 			srbdp->flags = CONTROLER_OWN;
204 			return (NULL);
205 		}
206 		rptr = DMA_VPTR(*srbdp->bufp);
207 		mp->b_rptr = mp->b_rptr + NGE_HEADROOM;
208 		bcopy(rptr + NGE_HEADROOM, mp->b_rptr, len);
209 		mp->b_wptr = mp->b_rptr + len;
210 	} else {
211 		mp = srbdp->bufp->mp;
212 		/*
213 		 * Make sure the packet *contents* 4-byte aligned
214 		 */
215 		mp->b_rptr += NGE_HEADROOM;
216 		mp->b_wptr = mp->b_rptr + len;
217 		mp->b_next = mp->b_cont = NULL;
218 		srbdp->bufp->rx_delivered = B_TRUE;
219 		srbdp->bufp = NULL;
220 		nge_atomic_increase(&brp->rx_hold, 1);
221 
222 		/* Fill the buffer from free_list */
223 		srbdp->bufp = bufp;
224 		brp->free_list = bufp->next;
225 		bufp->next = NULL;
226 	}
227 
228 	/* replenish the buffer for hardware descriptor */
229 	ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
230 	    srbdp->bufp->alength);
231 	srbdp->flags = CONTROLER_OWN;
232 	sw_stp->rbytes += len;
233 	sw_stp->recv_count++;
234 
235 	return (mp);
236 }
237 
238 
239 #define	RX_HW_ERR	0x01
240 #define	RX_SUM_NO	0x02
241 #define	RX_SUM_ERR	0x04
242 
243 /*
244  * Statistic the rx's error
245  * and generate a log msg for these.
246  * Note:
247  * RXE, Parity Error, Symbo error, CRC error
248  * have been recored by nvidia's  hardware
249  * statistics part (nge_statistics). So it is uncessary to record them by
250  * driver in this place.
251  */
252 static uint32_t
253 nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags);
254 #pragma	inline(nge_rxsta_handle)
255 
256 static uint32_t
257 nge_rxsta_handle(nge_t *ngep,  uint32_t stflag, uint32_t *pflags)
258 {
259 	uint32_t errors;
260 	uint32_t err_flag;
261 	nge_sw_statistics_t *sw_stp;
262 
263 	err_flag = 0;
264 	sw_stp = &ngep->statistics.sw_statistics;
265 
266 	if ((RXD_END & stflag) == 0)
267 		return (RX_HW_ERR);
268 
269 	errors = stflag & RXD_CSUM_MSK;
270 	switch (errors) {
271 	default:
272 	break;
273 
274 	case RXD_CK8G_TCP_SUM:
275 	case RXD_CK8G_UDP_SUM:
276 		*pflags |= HCK_FULLCKSUM;
277 		*pflags |= HCK_IPV4_HDRCKSUM;
278 		*pflags |= HCK_FULLCKSUM_OK;
279 		break;
280 
281 	case RXD_CK8G_TCP_SUM_ERR:
282 	case RXD_CK8G_UDP_SUM_ERR:
283 		sw_stp->tcp_hwsum_err++;
284 		*pflags |= HCK_IPV4_HDRCKSUM;
285 		break;
286 
287 	case RXD_CK8G_IP_HSUM:
288 		*pflags |= HCK_IPV4_HDRCKSUM;
289 		break;
290 
291 	case RXD_CK8G_NO_HSUM:
292 		err_flag |= RX_SUM_NO;
293 		break;
294 
295 	case RXD_CK8G_IP_HSUM_ERR:
296 		sw_stp->ip_hwsum_err++;
297 		err_flag |=  RX_SUM_ERR;
298 		break;
299 	}
300 
301 	if ((stflag & RXD_ERR) != 0)	{
302 
303 		err_flag |= RX_HW_ERR;
304 		NGE_DEBUG(("Receive desc error, status: 0x%x", stflag));
305 	}
306 
307 	return (err_flag);
308 }
309 
310 static mblk_t *
311 nge_recv_ring(nge_t *ngep)
312 {
313 	uint32_t stflag;
314 	uint32_t flag_err;
315 	uint32_t sum_flags;
316 	size_t len;
317 	uint64_t end_index;
318 	uint64_t sync_start;
319 	mblk_t *mp;
320 	mblk_t **tail;
321 	mblk_t *head;
322 	recv_ring_t *rrp;
323 	buff_ring_t *brp;
324 	sw_rx_sbd_t *srbdp;
325 	void * hw_bd_p;
326 	nge_mode_cntl mode_cntl;
327 
328 	mp = NULL;
329 	head = NULL;
330 	tail = &head;
331 	rrp = ngep->recv;
332 	brp = ngep->buff;
333 
334 	end_index = sync_start = rrp->prod_index;
335 	/* Sync the descriptor for kernel */
336 	if (sync_start + ngep->param_recv_max_packet <= ngep->rx_desc) {
337 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
338 		    sync_start * ngep->desc_attr.rxd_size,
339 		    ngep->param_recv_max_packet * ngep->desc_attr.rxd_size,
340 		    DDI_DMA_SYNC_FORKERNEL);
341 	} else {
342 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
343 		    sync_start * ngep->desc_attr.rxd_size,
344 		    0,
345 		    DDI_DMA_SYNC_FORKERNEL);
346 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
347 		    0,
348 		    (ngep->param_recv_max_packet + sync_start - ngep->rx_desc) *
349 		    ngep->desc_attr.rxd_size,
350 		    DDI_DMA_SYNC_FORKERNEL);
351 	}
352 
353 	/*
354 	 * Looking through the rx's ring to find the good packets
355 	 * and try to receive more and more packets in rx's ring
356 	 */
357 	for (;;) {
358 		sum_flags = 0;
359 		flag_err = 0;
360 		end_index = rrp->prod_index;
361 		srbdp = &brp->sw_rbds[end_index];
362 		hw_bd_p = DMA_VPTR(srbdp->desc);
363 		stflag = ngep->desc_attr.rxd_check(hw_bd_p, &len);
364 		/*
365 		 * If there is no packet in receving ring
366 		 * break the loop
367 		 */
368 		if ((stflag & RXD_OWN) != 0 || HOST_OWN == srbdp->flags)
369 			break;
370 
371 		ngep->recv_count++;
372 		flag_err = nge_rxsta_handle(ngep, stflag, &sum_flags);
373 		if ((flag_err & RX_HW_ERR) == 0) {
374 			srbdp->flags = NGE_END_PACKET;
375 			mp = nge_recv_packet(ngep, end_index, len);
376 		} else {
377 			/* Hardware error, re-use the buffer */
378 			ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
379 			    srbdp->bufp->alength);
380 			srbdp->flags = CONTROLER_OWN;
381 		}
382 		if (mp != NULL) {
383 			if (!(flag_err & (RX_SUM_NO | RX_SUM_ERR))) {
384 				(void) hcksum_assoc(mp, NULL, NULL,
385 				    0, 0, 0, 0, sum_flags, 0);
386 			}
387 			*tail = mp;
388 			tail = &mp->b_next;
389 			mp = NULL;
390 		}
391 		rrp->prod_index = NEXT(end_index, rrp->desc.nslots);
392 		if (ngep->recv_count > ngep->param_recv_max_packet)
393 			break;
394 	}
395 
396 	/* Sync the descriptors for device */
397 	if (sync_start + ngep->recv_count <= ngep->rx_desc) {
398 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
399 		    sync_start * ngep->desc_attr.rxd_size,
400 		    ngep->recv_count * ngep->desc_attr.rxd_size,
401 		    DDI_DMA_SYNC_FORDEV);
402 	} else {
403 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
404 		    sync_start * ngep->desc_attr.rxd_size,
405 		    0,
406 		    DDI_DMA_SYNC_FORDEV);
407 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
408 		    0,
409 		    (ngep->recv_count + sync_start - ngep->rx_desc) *
410 		    ngep->desc_attr.rxd_size,
411 		    DDI_DMA_SYNC_FORDEV);
412 	}
413 	mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
414 	mode_cntl.mode_bits.rxdm = NGE_SET;
415 	mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
416 	nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
417 
418 	return (head);
419 }
420 
421 void
422 nge_receive(nge_t *ngep)
423 {
424 	mblk_t *mp;
425 	recv_ring_t *rrp;
426 	rrp = ngep->recv;
427 
428 	mp = nge_recv_ring(ngep);
429 	mutex_exit(ngep->genlock);
430 	if (mp != NULL)
431 		mac_rx(ngep->mh, rrp->handle, mp);
432 	mutex_enter(ngep->genlock);
433 }
434 
435 void
436 nge_hot_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
437 {
438 	uint64_t dmac_addr;
439 	hot_rx_bd * hw_bd_p;
440 
441 	hw_bd_p = (hot_rx_bd *)hwd;
442 	dmac_addr = cookie->dmac_laddress + NGE_HEADROOM;
443 
444 	hw_bd_p->cntl_status.cntl_val = 0;
445 
446 	hw_bd_p->host_buf_addr_hi = dmac_addr >> 32;
447 	hw_bd_p->host_buf_addr_lo = dmac_addr;
448 	hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
449 
450 	membar_producer();
451 	hw_bd_p->cntl_status.control_bits.own = NGE_SET;
452 }
453 
454 void
455 nge_sum_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
456 {
457 	uint64_t dmac_addr;
458 	sum_rx_bd * hw_bd_p;
459 
460 	hw_bd_p = hwd;
461 	dmac_addr = cookie->dmac_address + NGE_HEADROOM;
462 
463 	hw_bd_p->cntl_status.cntl_val = 0;
464 
465 	hw_bd_p->host_buf_addr = dmac_addr;
466 	hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
467 
468 	membar_producer();
469 	hw_bd_p->cntl_status.control_bits.own = NGE_SET;
470 }
471 
472 uint32_t
473 nge_hot_rxd_check(const void *hwd, size_t *len)
474 {
475 	uint32_t err_flag;
476 	const hot_rx_bd * hrbdp;
477 
478 	hrbdp = hwd;
479 
480 	err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK;
481 	*len = hrbdp->cntl_status.status_bits_legacy.bcnt;
482 
483 	return (err_flag);
484 }
485 
486 uint32_t
487 nge_sum_rxd_check(const void *hwd, size_t *len)
488 {
489 	uint32_t err_flag;
490 	const sum_rx_bd * hrbdp;
491 
492 	hrbdp = hwd;
493 
494 	err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK;
495 	*len = hrbdp->cntl_status.status_bits.bcnt;
496 
497 	return (err_flag);
498 }
499