xref: /titanic_51/usr/src/uts/common/io/nge/nge_rx.c (revision 002c70ff32f5df6f93c15f88d351ce26443e6ee7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "nge.h"
30 
31 #undef	NGE_DBG
32 #define	NGE_DBG		NGE_DBG_RECV
33 
34 #define	RXD_END		0x20000000
35 #define	RXD_ERR		0x40000000
36 #define	RXD_OWN		0x80000000
37 #define	RXD_CSUM_MSK	0x1C000000
38 #define	RXD_BCNT_MSK	0x00003FFF
39 
40 #define	RXD_CK8G_NO_HSUM	0x0
41 #define	RXD_CK8G_TCP_SUM_ERR	0x04000000
42 #define	RXD_CK8G_UDP_SUM_ERR	0x08000000
43 #define	RXD_CK8G_IP_HSUM_ERR	0x0C000000
44 #define	RXD_CK8G_IP_HSUM	0x10000000
45 #define	RXD_CK8G_TCP_SUM	0x14000000
46 #define	RXD_CK8G_UDP_SUM	0x18000000
47 #define	RXD_CK8G_RESV		0x1C000000
48 
49 extern ddi_device_acc_attr_t nge_data_accattr;
50 
51 /*
52  * Callback code invoked from STREAMs when the recv data buffer is free
53  * for recycling.
54  */
55 
56 void
57 nge_recv_recycle(caddr_t arg)
58 {
59 	boolean_t val;
60 	boolean_t valid;
61 	nge_t *ngep;
62 	dma_area_t *bufp;
63 	buff_ring_t *brp;
64 	nge_sw_statistics_t *sw_stp;
65 
66 	bufp = (dma_area_t *)arg;
67 	ngep = (nge_t *)bufp->private;
68 	brp = ngep->buff;
69 	sw_stp = &ngep->statistics.sw_statistics;
70 
71 	/*
72 	 * Free the buffer directly if the buffer was allocated
73 	 * previously or mac was stopped.
74 	 */
75 	if (bufp->signature != brp->buf_sign) {
76 		if (bufp->rx_delivered == B_TRUE) {
77 			nge_free_dma_mem(bufp);
78 			kmem_free(bufp, sizeof (dma_area_t));
79 			val = nge_atomic_decrease(&brp->rx_hold, 1);
80 			ASSERT(val == B_TRUE);
81 		}
82 		return;
83 	}
84 
85 	/*
86 	 * recycle the data buffer again and fill them in free ring
87 	 */
88 	bufp->rx_recycle.free_func = nge_recv_recycle;
89 	bufp->rx_recycle.free_arg = (caddr_t)bufp;
90 
91 	bufp->mp = desballoc(DMA_VPTR(*bufp),
92 	    ngep->buf_size + NGE_HEADROOM, 0, &bufp->rx_recycle);
93 
94 	if (bufp->mp == NULL) {
95 		sw_stp->mp_alloc_err++;
96 		sw_stp->recy_free++;
97 		nge_free_dma_mem(bufp);
98 		kmem_free(bufp, sizeof (dma_area_t));
99 		val = nge_atomic_decrease(&brp->rx_hold, 1);
100 		ASSERT(val == B_TRUE);
101 	} else {
102 
103 		mutex_enter(brp->recycle_lock);
104 		if (bufp->signature != brp->buf_sign)
105 			valid = B_TRUE;
106 		else
107 			valid = B_FALSE;
108 		bufp->rx_delivered = valid;
109 		if (bufp->rx_delivered == B_FALSE)  {
110 			bufp->next = brp->recycle_list;
111 			brp->recycle_list = bufp;
112 		}
113 		mutex_exit(brp->recycle_lock);
114 		if (valid == B_TRUE)
115 			/* call nge_rx_recycle again to free it */
116 			freemsg(bufp->mp);
117 		else {
118 			val = nge_atomic_decrease(&brp->rx_hold, 1);
119 			ASSERT(val == B_TRUE);
120 		}
121 	}
122 }
123 
124 /*
125  * Checking the rx's BDs (one or more) to receive
126  * one complete packet.
127  * start_index: the start indexer of BDs for one packet.
128  * end_index: the end indexer of BDs for one packet.
129  */
130 static mblk_t *nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len);
131 #pragma	inline(nge_recv_packet)
132 
133 static mblk_t *
134 nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len)
135 {
136 	uint8_t *rptr;
137 	uint32_t minsize;
138 	uint32_t maxsize;
139 	mblk_t *mp;
140 	buff_ring_t *brp;
141 	sw_rx_sbd_t *srbdp;
142 	dma_area_t *bufp;
143 	nge_sw_statistics_t *sw_stp;
144 	void *hw_bd_p;
145 
146 	brp = ngep->buff;
147 	minsize = ETHERMIN;
148 	maxsize = ngep->max_sdu;
149 	sw_stp = &ngep->statistics.sw_statistics;
150 	mp = NULL;
151 
152 	srbdp = &brp->sw_rbds[start_index];
153 	DMA_SYNC(*srbdp->bufp, DDI_DMA_SYNC_FORKERNEL);
154 	hw_bd_p = DMA_VPTR(srbdp->desc);
155 
156 	/*
157 	 * First check the free_list, if it is NULL,
158 	 * make the recycle_list be free_list.
159 	 */
160 	if (brp->free_list == NULL) {
161 		mutex_enter(brp->recycle_lock);
162 		brp->free_list = brp->recycle_list;
163 		brp->recycle_list = NULL;
164 		mutex_exit(brp->recycle_lock);
165 	}
166 	bufp = brp->free_list;
167 	/* If it's not a qualified packet, delete it */
168 	if (len > maxsize || len < minsize) {
169 		ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
170 		    srbdp->bufp->alength);
171 		srbdp->flags = CONTROLER_OWN;
172 		return (NULL);
173 	}
174 
175 	/*
176 	 * If receive packet size is smaller than RX bcopy threshold,
177 	 * or there is no available buffer in free_list or recycle list,
178 	 * we use bcopy directly.
179 	 */
180 	if (len <= ngep->param_rxbcopy_threshold || bufp == NULL)
181 		brp->rx_bcopy = B_TRUE;
182 	else
183 		brp->rx_bcopy = B_FALSE;
184 
185 	if (brp->rx_bcopy) {
186 		mp = allocb(len + NGE_HEADROOM, 0);
187 		if (mp == NULL) {
188 			sw_stp->mp_alloc_err++;
189 			ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
190 			    srbdp->bufp->alength);
191 			srbdp->flags = CONTROLER_OWN;
192 			return (NULL);
193 		}
194 		rptr = DMA_VPTR(*srbdp->bufp);
195 		mp->b_rptr = mp->b_rptr + NGE_HEADROOM;
196 		bcopy(rptr + NGE_HEADROOM, mp->b_rptr, len);
197 		mp->b_wptr = mp->b_rptr + len;
198 	} else {
199 		mp = srbdp->bufp->mp;
200 		/*
201 		 * Make sure the packet *contents* 4-byte aligned
202 		 */
203 		mp->b_rptr += NGE_HEADROOM;
204 		mp->b_wptr = mp->b_rptr + len;
205 		mp->b_next = mp->b_cont = NULL;
206 		srbdp->bufp->rx_delivered = B_TRUE;
207 		srbdp->bufp = NULL;
208 		nge_atomic_increase(&brp->rx_hold, 1);
209 
210 		/* Fill the buffer from free_list */
211 		srbdp->bufp = bufp;
212 		brp->free_list = bufp->next;
213 		bufp->next = NULL;
214 	}
215 
216 	/* replenish the buffer for hardware descriptor */
217 	ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
218 	    srbdp->bufp->alength);
219 	srbdp->flags = CONTROLER_OWN;
220 	sw_stp->rbytes += len;
221 	sw_stp->recv_count++;
222 
223 	return (mp);
224 }
225 
226 
227 #define	RX_HW_ERR	0x01
228 #define	RX_SUM_NO	0x02
229 #define	RX_SUM_ERR	0x04
230 
231 /*
232  * Statistic the rx's error
233  * and generate a log msg for these.
234  * Note:
235  * RXE, Parity Error, Symbo error, CRC error
236  * have been recored by nvidia's  hardware
237  * statistics part (nge_statistics). So it is uncessary to record them by
238  * driver in this place.
239  */
240 static uint32_t
241 nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags);
242 #pragma	inline(nge_rxsta_handle)
243 
244 static uint32_t
245 nge_rxsta_handle(nge_t *ngep,  uint32_t stflag, uint32_t *pflags)
246 {
247 	uint32_t errors;
248 	uint32_t err_flag;
249 	nge_sw_statistics_t *sw_stp;
250 
251 	err_flag = 0;
252 	sw_stp = &ngep->statistics.sw_statistics;
253 
254 	if ((RXD_END & stflag) == 0)
255 		return (RX_HW_ERR);
256 
257 	errors = stflag & RXD_CSUM_MSK;
258 	switch (errors) {
259 	default:
260 	break;
261 
262 	case RXD_CK8G_TCP_SUM:
263 	case RXD_CK8G_UDP_SUM:
264 		*pflags |= HCK_FULLCKSUM;
265 		*pflags |= HCK_IPV4_HDRCKSUM;
266 		*pflags |= HCK_FULLCKSUM_OK;
267 		break;
268 
269 	case RXD_CK8G_TCP_SUM_ERR:
270 	case RXD_CK8G_UDP_SUM_ERR:
271 		sw_stp->tcp_hwsum_err++;
272 		*pflags |= HCK_IPV4_HDRCKSUM;
273 		break;
274 
275 	case RXD_CK8G_IP_HSUM:
276 		*pflags |= HCK_IPV4_HDRCKSUM;
277 		break;
278 
279 	case RXD_CK8G_NO_HSUM:
280 		err_flag |= RX_SUM_NO;
281 		break;
282 
283 	case RXD_CK8G_IP_HSUM_ERR:
284 		sw_stp->ip_hwsum_err++;
285 		err_flag |=  RX_SUM_ERR;
286 		break;
287 	}
288 
289 	if ((stflag & RXD_ERR) != 0)	{
290 
291 		err_flag |= RX_HW_ERR;
292 		NGE_DEBUG(("Receive desc error, status: 0x%x", stflag));
293 	}
294 
295 	return (err_flag);
296 }
297 
298 static mblk_t *
299 nge_recv_ring(nge_t *ngep)
300 {
301 	uint32_t stflag;
302 	uint32_t flag_err;
303 	uint32_t sum_flags;
304 	uint32_t count;
305 	size_t len;
306 	uint64_t end_index;
307 	uint64_t sync_start;
308 	mblk_t *mp;
309 	mblk_t **tail;
310 	mblk_t *head;
311 	recv_ring_t *rrp;
312 	buff_ring_t *brp;
313 	sw_rx_sbd_t *srbdp;
314 	void * hw_bd_p;
315 	nge_mode_cntl mode_cntl;
316 
317 	mp = NULL;
318 	head = NULL;
319 	count = 0;
320 	tail = &head;
321 	rrp = ngep->recv;
322 	brp = ngep->buff;
323 
324 	end_index = sync_start = rrp->prod_index;
325 	/* Sync the descriptor for kernel */
326 	if (sync_start + ngep->param_recv_max_packet <= ngep->rx_desc) {
327 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
328 		    sync_start * ngep->desc_attr.rxd_size,
329 		    ngep->param_recv_max_packet * ngep->desc_attr.rxd_size,
330 		    DDI_DMA_SYNC_FORKERNEL);
331 	} else {
332 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
333 		    sync_start * ngep->desc_attr.rxd_size,
334 		    0,
335 		    DDI_DMA_SYNC_FORKERNEL);
336 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
337 		    0,
338 		    (ngep->param_recv_max_packet + sync_start - ngep->rx_desc) *
339 		    ngep->desc_attr.rxd_size,
340 		    DDI_DMA_SYNC_FORKERNEL);
341 	}
342 
343 	/*
344 	 * Looking through the rx's ring to find the good packets
345 	 * and try to receive more and more packets in rx's ring
346 	 */
347 	for (;;) {
348 		sum_flags = 0;
349 		flag_err = 0;
350 		end_index = rrp->prod_index;
351 		srbdp = &brp->sw_rbds[end_index];
352 		hw_bd_p = DMA_VPTR(srbdp->desc);
353 		stflag = ngep->desc_attr.rxd_check(hw_bd_p, &len);
354 		/*
355 		 * If there is no packet in receving ring
356 		 * break the loop
357 		 */
358 		if ((stflag & RXD_OWN) != 0 || HOST_OWN == srbdp->flags)
359 			break;
360 
361 		ngep->recv_count++;
362 		flag_err = nge_rxsta_handle(ngep, stflag, &sum_flags);
363 		if ((flag_err & RX_HW_ERR) == 0) {
364 			srbdp->flags = NGE_END_PACKET;
365 			mp = nge_recv_packet(ngep, end_index, len);
366 		} else {
367 			/* Hardware error, re-use the buffer */
368 			ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
369 			    srbdp->bufp->alength);
370 			srbdp->flags = CONTROLER_OWN;
371 		}
372 		count++;
373 		if (mp != NULL) {
374 			if (!(flag_err & (RX_SUM_NO | RX_SUM_ERR))) {
375 				(void) hcksum_assoc(mp, NULL, NULL,
376 				    0, 0, 0, 0, sum_flags, 0);
377 			}
378 			*tail = mp;
379 			tail = &mp->b_next;
380 			mp = NULL;
381 		}
382 		rrp->prod_index = NEXT(end_index, rrp->desc.nslots);
383 		if (count > ngep->param_recv_max_packet)
384 			break;
385 	}
386 
387 	/* Sync the descriptors for device */
388 	if (sync_start + count <= ngep->rx_desc) {
389 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
390 		    sync_start * ngep->desc_attr.rxd_size,
391 		    count * ngep->desc_attr.rxd_size,
392 		    DDI_DMA_SYNC_FORDEV);
393 	} else {
394 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
395 		    sync_start * ngep->desc_attr.rxd_size,
396 		    0,
397 		    DDI_DMA_SYNC_FORDEV);
398 		(void) ddi_dma_sync(rrp->desc.dma_hdl,
399 		    0,
400 		    (count + sync_start - ngep->rx_desc) *
401 		    ngep->desc_attr.rxd_size,
402 		    DDI_DMA_SYNC_FORDEV);
403 	}
404 	mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
405 	mode_cntl.mode_bits.rxdm = NGE_SET;
406 	mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
407 	nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
408 
409 	return (head);
410 }
411 
412 void
413 nge_receive(nge_t *ngep)
414 {
415 	mblk_t *mp;
416 	recv_ring_t *rrp;
417 	rrp = ngep->recv;
418 
419 	mp = nge_recv_ring(ngep);
420 	mutex_exit(ngep->genlock);
421 	if (mp != NULL)
422 		mac_rx(ngep->mh, rrp->handle, mp);
423 	mutex_enter(ngep->genlock);
424 }
425 
426 void
427 nge_hot_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
428 {
429 	uint64_t dmac_addr;
430 	hot_rx_bd * hw_bd_p;
431 
432 	hw_bd_p = (hot_rx_bd *)hwd;
433 	dmac_addr = cookie->dmac_laddress + NGE_HEADROOM;
434 
435 	hw_bd_p->cntl_status.cntl_val = 0;
436 
437 	hw_bd_p->host_buf_addr_hi = dmac_addr >> 32;
438 	hw_bd_p->host_buf_addr_lo = dmac_addr;
439 	hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
440 
441 	membar_producer();
442 	hw_bd_p->cntl_status.control_bits.own = NGE_SET;
443 }
444 
445 void
446 nge_sum_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
447 {
448 	uint64_t dmac_addr;
449 	sum_rx_bd * hw_bd_p;
450 
451 	hw_bd_p = hwd;
452 	dmac_addr = cookie->dmac_address + NGE_HEADROOM;
453 
454 	hw_bd_p->cntl_status.cntl_val = 0;
455 
456 	hw_bd_p->host_buf_addr = dmac_addr;
457 	hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
458 
459 	membar_producer();
460 	hw_bd_p->cntl_status.control_bits.own = NGE_SET;
461 }
462 
463 uint32_t
464 nge_hot_rxd_check(const void *hwd, size_t *len)
465 {
466 	uint32_t err_flag;
467 	const hot_rx_bd * hrbdp;
468 
469 	hrbdp = hwd;
470 
471 	err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK;
472 	*len = hrbdp->cntl_status.status_bits_legacy.bcnt;
473 
474 	return (err_flag);
475 }
476 
477 uint32_t
478 nge_sum_rxd_check(const void *hwd, size_t *len)
479 {
480 	uint32_t err_flag;
481 	const sum_rx_bd * hrbdp;
482 
483 	hrbdp = hwd;
484 
485 	err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK;
486 	*len = hrbdp->cntl_status.status_bits.bcnt;
487 
488 	return (err_flag);
489 }
490