1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include "nge.h"
28
29 #undef NGE_DBG
30 #define NGE_DBG NGE_DBG_RECV
31
32 #define RXD_END 0x20000000
33 #define RXD_ERR 0x40000000
34 #define RXD_OWN 0x80000000
35 #define RXD_CSUM_MSK 0x1C000000
36 #define RXD_BCNT_MSK 0x00003FFF
37
38 #define RXD_CK8G_NO_HSUM 0x0
39 #define RXD_CK8G_TCP_SUM_ERR 0x04000000
40 #define RXD_CK8G_UDP_SUM_ERR 0x08000000
41 #define RXD_CK8G_IP_HSUM_ERR 0x0C000000
42 #define RXD_CK8G_IP_HSUM 0x10000000
43 #define RXD_CK8G_TCP_SUM 0x14000000
44 #define RXD_CK8G_UDP_SUM 0x18000000
45 #define RXD_CK8G_RESV 0x1C000000
46
47 extern ddi_device_acc_attr_t nge_data_accattr;
48
49 /*
50 * Callback code invoked from STREAMs when the recv data buffer is free for
51 * recycling.
52 *
53 * The following table describes function behaviour:
54 *
55 * | mac stopped | mac running
56 * ---------------------------------------------------
57 * buffer delivered | free buffer | recycle buffer
58 * buffer not delivered | do nothing | recycle buffer (*)
59 *
60 * Note (*):
61 * Recycle buffer only if mac state did not change during execution of
62 * function. Otherwise if mac state changed, set buffer delivered & re-enter
63 * function by calling freemsg().
64 */
65
66 void
nge_recv_recycle(caddr_t arg)67 nge_recv_recycle(caddr_t arg)
68 {
69 boolean_t val;
70 boolean_t valid;
71 nge_t *ngep;
72 dma_area_t *bufp;
73 buff_ring_t *brp;
74 nge_sw_statistics_t *sw_stp;
75
76 bufp = (dma_area_t *)arg;
77 ngep = (nge_t *)bufp->private;
78 brp = ngep->buff;
79 sw_stp = &ngep->statistics.sw_statistics;
80
81 /*
82 * Free the buffer directly if the buffer was allocated
83 * previously or mac was stopped.
84 */
85 if (bufp->signature != brp->buf_sign) {
86 if (bufp->rx_delivered == B_TRUE) {
87 nge_free_dma_mem(bufp);
88 kmem_free(bufp, sizeof (dma_area_t));
89 val = nge_atomic_decrease(&brp->rx_hold, 1);
90 ASSERT(val == B_TRUE);
91 }
92 return;
93 }
94
95 /*
96 * recycle the data buffer again and fill them in free ring
97 */
98 bufp->rx_recycle.free_func = nge_recv_recycle;
99 bufp->rx_recycle.free_arg = (caddr_t)bufp;
100
101 bufp->mp = desballoc(DMA_VPTR(*bufp),
102 ngep->buf_size + NGE_HEADROOM, 0, &bufp->rx_recycle);
103
104 if (bufp->mp == NULL) {
105 sw_stp->mp_alloc_err++;
106 sw_stp->recy_free++;
107 nge_free_dma_mem(bufp);
108 kmem_free(bufp, sizeof (dma_area_t));
109 val = nge_atomic_decrease(&brp->rx_hold, 1);
110 ASSERT(val == B_TRUE);
111 } else {
112
113 mutex_enter(brp->recycle_lock);
114 if (bufp->signature != brp->buf_sign)
115 valid = B_TRUE;
116 else
117 valid = B_FALSE;
118 bufp->rx_delivered = valid;
119 if (bufp->rx_delivered == B_FALSE) {
120 bufp->next = brp->recycle_list;
121 brp->recycle_list = bufp;
122 }
123 mutex_exit(brp->recycle_lock);
124 if (valid == B_TRUE)
125 /* call nge_rx_recycle again to free it */
126 freemsg(bufp->mp);
127 else {
128 val = nge_atomic_decrease(&brp->rx_hold, 1);
129 ASSERT(val == B_TRUE);
130 }
131 }
132 }
133
134 /*
135 * Checking the rx's BDs (one or more) to receive
136 * one complete packet.
137 * start_index: the start indexer of BDs for one packet.
138 * end_index: the end indexer of BDs for one packet.
139 */
140 static mblk_t *nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len);
141
142 static mblk_t *
nge_recv_packet(nge_t * ngep,uint32_t start_index,size_t len)143 nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len)
144 {
145 uint8_t *rptr;
146 uint32_t minsize;
147 uint32_t maxsize;
148 mblk_t *mp;
149 buff_ring_t *brp;
150 sw_rx_sbd_t *srbdp;
151 dma_area_t *bufp;
152 nge_sw_statistics_t *sw_stp;
153 void *hw_bd_p;
154
155 brp = ngep->buff;
156 minsize = ETHERMIN;
157 maxsize = ngep->max_sdu;
158 sw_stp = &ngep->statistics.sw_statistics;
159 mp = NULL;
160
161 srbdp = &brp->sw_rbds[start_index];
162 DMA_SYNC(*srbdp->bufp, DDI_DMA_SYNC_FORKERNEL);
163 hw_bd_p = DMA_VPTR(srbdp->desc);
164
165 /*
166 * First check the free_list, if it is NULL,
167 * make the recycle_list be free_list.
168 */
169 if (brp->free_list == NULL) {
170 mutex_enter(brp->recycle_lock);
171 brp->free_list = brp->recycle_list;
172 brp->recycle_list = NULL;
173 mutex_exit(brp->recycle_lock);
174 }
175 bufp = brp->free_list;
176 /* If it's not a qualified packet, delete it */
177 if (len > maxsize || len < minsize) {
178 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
179 srbdp->bufp->alength);
180 srbdp->flags = CONTROLER_OWN;
181 return (NULL);
182 }
183
184 /*
185 * If receive packet size is smaller than RX bcopy threshold,
186 * or there is no available buffer in free_list or recycle list,
187 * we use bcopy directly.
188 */
189 if (len <= ngep->param_rxbcopy_threshold || bufp == NULL)
190 brp->rx_bcopy = B_TRUE;
191 else
192 brp->rx_bcopy = B_FALSE;
193
194 if (brp->rx_bcopy) {
195 mp = allocb(len + NGE_HEADROOM, 0);
196 if (mp == NULL) {
197 sw_stp->mp_alloc_err++;
198 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
199 srbdp->bufp->alength);
200 srbdp->flags = CONTROLER_OWN;
201 return (NULL);
202 }
203 rptr = DMA_VPTR(*srbdp->bufp);
204 mp->b_rptr = mp->b_rptr + NGE_HEADROOM;
205 bcopy(rptr + NGE_HEADROOM, mp->b_rptr, len);
206 mp->b_wptr = mp->b_rptr + len;
207 } else {
208 mp = srbdp->bufp->mp;
209 /*
210 * Make sure the packet *contents* 4-byte aligned
211 */
212 mp->b_rptr += NGE_HEADROOM;
213 mp->b_wptr = mp->b_rptr + len;
214 mp->b_next = mp->b_cont = NULL;
215 srbdp->bufp->rx_delivered = B_TRUE;
216 srbdp->bufp = NULL;
217 nge_atomic_increase(&brp->rx_hold, 1);
218
219 /* Fill the buffer from free_list */
220 srbdp->bufp = bufp;
221 brp->free_list = bufp->next;
222 bufp->next = NULL;
223 }
224
225 /* replenish the buffer for hardware descriptor */
226 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
227 srbdp->bufp->alength);
228 srbdp->flags = CONTROLER_OWN;
229 sw_stp->rbytes += len;
230 sw_stp->recv_count++;
231
232 return (mp);
233 }
234
235
236 #define RX_HW_ERR 0x01
237 #define RX_SUM_NO 0x02
238 #define RX_SUM_ERR 0x04
239
240 /*
241 * Statistic the rx's error
242 * and generate a log msg for these.
243 * Note:
244 * RXE, Parity Error, Symbo error, CRC error
245 * have been recored by nvidia's hardware
246 * statistics part (nge_statistics). So it is uncessary to record them by
247 * driver in this place.
248 */
249 static uint32_t
250 nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags);
251
252 static uint32_t
nge_rxsta_handle(nge_t * ngep,uint32_t stflag,uint32_t * pflags)253 nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags)
254 {
255 uint32_t errors;
256 uint32_t err_flag;
257 nge_sw_statistics_t *sw_stp;
258
259 err_flag = 0;
260 sw_stp = &ngep->statistics.sw_statistics;
261
262 if ((RXD_END & stflag) == 0)
263 return (RX_HW_ERR);
264
265 errors = stflag & RXD_CSUM_MSK;
266 switch (errors) {
267 default:
268 break;
269
270 case RXD_CK8G_TCP_SUM:
271 case RXD_CK8G_UDP_SUM:
272 *pflags |= HCK_IPV4_HDRCKSUM_OK;
273 *pflags |= HCK_FULLCKSUM_OK;
274 break;
275
276 case RXD_CK8G_TCP_SUM_ERR:
277 case RXD_CK8G_UDP_SUM_ERR:
278 sw_stp->tcp_hwsum_err++;
279 *pflags |= HCK_IPV4_HDRCKSUM_OK;
280 break;
281
282 case RXD_CK8G_IP_HSUM:
283 *pflags |= HCK_IPV4_HDRCKSUM_OK;
284 break;
285
286 case RXD_CK8G_NO_HSUM:
287 err_flag |= RX_SUM_NO;
288 break;
289
290 case RXD_CK8G_IP_HSUM_ERR:
291 sw_stp->ip_hwsum_err++;
292 err_flag |= RX_SUM_ERR;
293 break;
294 }
295
296 if ((stflag & RXD_ERR) != 0) {
297
298 err_flag |= RX_HW_ERR;
299 NGE_DEBUG(("Receive desc error, status: 0x%x", stflag));
300 }
301
302 return (err_flag);
303 }
304
305 static mblk_t *
nge_recv_ring(nge_t * ngep)306 nge_recv_ring(nge_t *ngep)
307 {
308 uint32_t stflag;
309 uint32_t flag_err;
310 uint32_t sum_flags;
311 size_t len;
312 uint64_t end_index;
313 uint64_t sync_start;
314 mblk_t *mp;
315 mblk_t **tail;
316 mblk_t *head;
317 recv_ring_t *rrp;
318 buff_ring_t *brp;
319 sw_rx_sbd_t *srbdp;
320 void * hw_bd_p;
321 nge_mode_cntl mode_cntl;
322
323 mp = NULL;
324 head = NULL;
325 tail = &head;
326 rrp = ngep->recv;
327 brp = ngep->buff;
328
329 end_index = sync_start = rrp->prod_index;
330 /* Sync the descriptor for kernel */
331 if (sync_start + ngep->param_recv_max_packet <= ngep->rx_desc) {
332 (void) ddi_dma_sync(rrp->desc.dma_hdl,
333 sync_start * ngep->desc_attr.rxd_size,
334 ngep->param_recv_max_packet * ngep->desc_attr.rxd_size,
335 DDI_DMA_SYNC_FORKERNEL);
336 } else {
337 (void) ddi_dma_sync(rrp->desc.dma_hdl,
338 sync_start * ngep->desc_attr.rxd_size,
339 0,
340 DDI_DMA_SYNC_FORKERNEL);
341 (void) ddi_dma_sync(rrp->desc.dma_hdl,
342 0,
343 (ngep->param_recv_max_packet + sync_start - ngep->rx_desc) *
344 ngep->desc_attr.rxd_size,
345 DDI_DMA_SYNC_FORKERNEL);
346 }
347
348 /*
349 * Looking through the rx's ring to find the good packets
350 * and try to receive more and more packets in rx's ring
351 */
352 for (;;) {
353 sum_flags = 0;
354 flag_err = 0;
355 end_index = rrp->prod_index;
356 srbdp = &brp->sw_rbds[end_index];
357 hw_bd_p = DMA_VPTR(srbdp->desc);
358 stflag = ngep->desc_attr.rxd_check(hw_bd_p, &len);
359 /*
360 * If there is no packet in receving ring
361 * break the loop
362 */
363 if ((stflag & RXD_OWN) != 0 || HOST_OWN == srbdp->flags)
364 break;
365
366 ngep->recv_count++;
367 flag_err = nge_rxsta_handle(ngep, stflag, &sum_flags);
368 if ((flag_err & RX_HW_ERR) == 0) {
369 srbdp->flags = NGE_END_PACKET;
370 mp = nge_recv_packet(ngep, end_index, len);
371 } else {
372 /* Hardware error, re-use the buffer */
373 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie,
374 srbdp->bufp->alength);
375 srbdp->flags = CONTROLER_OWN;
376 }
377 if (mp != NULL) {
378 if (!(flag_err & (RX_SUM_NO | RX_SUM_ERR))) {
379 mac_hcksum_set(mp, 0, 0, 0, 0, sum_flags);
380 }
381 *tail = mp;
382 tail = &mp->b_next;
383 mp = NULL;
384 }
385 rrp->prod_index = NEXT(end_index, rrp->desc.nslots);
386 if (ngep->recv_count >= ngep->param_recv_max_packet)
387 break;
388 }
389
390 /* Sync the descriptors for device */
391 if (sync_start + ngep->recv_count <= ngep->rx_desc) {
392 (void) ddi_dma_sync(rrp->desc.dma_hdl,
393 sync_start * ngep->desc_attr.rxd_size,
394 ngep->recv_count * ngep->desc_attr.rxd_size,
395 DDI_DMA_SYNC_FORDEV);
396 } else {
397 (void) ddi_dma_sync(rrp->desc.dma_hdl,
398 sync_start * ngep->desc_attr.rxd_size,
399 0,
400 DDI_DMA_SYNC_FORDEV);
401 (void) ddi_dma_sync(rrp->desc.dma_hdl,
402 0,
403 (ngep->recv_count + sync_start - ngep->rx_desc) *
404 ngep->desc_attr.rxd_size,
405 DDI_DMA_SYNC_FORDEV);
406 }
407 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
408 mode_cntl.mode_bits.rxdm = NGE_SET;
409 mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
410 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
411
412 return (head);
413 }
414
415 void
nge_receive(nge_t * ngep)416 nge_receive(nge_t *ngep)
417 {
418 mblk_t *mp;
419 recv_ring_t *rrp;
420 rrp = ngep->recv;
421
422 mp = nge_recv_ring(ngep);
423 mutex_exit(ngep->genlock);
424 if (mp != NULL)
425 mac_rx(ngep->mh, rrp->handle, mp);
426 mutex_enter(ngep->genlock);
427 }
428
429 void
nge_hot_rxd_fill(void * hwd,const ddi_dma_cookie_t * cookie,size_t len)430 nge_hot_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
431 {
432 uint64_t dmac_addr;
433 hot_rx_bd * hw_bd_p;
434
435 hw_bd_p = (hot_rx_bd *)hwd;
436 dmac_addr = cookie->dmac_laddress + NGE_HEADROOM;
437
438 hw_bd_p->cntl_status.cntl_val = 0;
439
440 hw_bd_p->host_buf_addr_hi = dmac_addr >> 32;
441 hw_bd_p->host_buf_addr_lo = (uint32_t)dmac_addr;
442 hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
443
444 membar_producer();
445 hw_bd_p->cntl_status.control_bits.own = NGE_SET;
446 }
447
448 void
nge_sum_rxd_fill(void * hwd,const ddi_dma_cookie_t * cookie,size_t len)449 nge_sum_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len)
450 {
451 sum_rx_bd * hw_bd_p;
452
453 hw_bd_p = hwd;
454
455 hw_bd_p->cntl_status.cntl_val = 0;
456
457 hw_bd_p->host_buf_addr =
458 (uint32_t)(cookie->dmac_address + NGE_HEADROOM);
459 hw_bd_p->cntl_status.control_bits.bcnt = len - 1;
460
461 membar_producer();
462 hw_bd_p->cntl_status.control_bits.own = NGE_SET;
463 }
464
465 uint32_t
nge_hot_rxd_check(const void * hwd,size_t * len)466 nge_hot_rxd_check(const void *hwd, size_t *len)
467 {
468 uint32_t err_flag;
469 const hot_rx_bd * hrbdp;
470
471 hrbdp = hwd;
472 err_flag = hrbdp->cntl_status.cntl_val;
473 *len = err_flag & RXD_BCNT_MSK;
474 return (err_flag);
475 }
476
477 uint32_t
nge_sum_rxd_check(const void * hwd,size_t * len)478 nge_sum_rxd_check(const void *hwd, size_t *len)
479 {
480 uint32_t err_flag;
481 const sum_rx_bd * hrbdp;
482
483 hrbdp = hwd;
484
485 err_flag = hrbdp->cntl_status.cntl_val;
486 *len = err_flag & RXD_BCNT_MSK;
487 return (err_flag);
488 }
489