1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "nge.h" 30 31 #undef NGE_DBG 32 #define NGE_DBG NGE_DBG_RECV 33 34 #define RXD_END 0x20000000 35 #define RXD_ERR 0x40000000 36 #define RXD_OWN 0x80000000 37 #define RXD_CSUM_MSK 0x1C000000 38 #define RXD_BCNT_MSK 0x00003FFF 39 40 #define RXD_CK8G_NO_HSUM 0x0 41 #define RXD_CK8G_TCP_SUM_ERR 0x04000000 42 #define RXD_CK8G_UDP_SUM_ERR 0x08000000 43 #define RXD_CK8G_IP_HSUM_ERR 0x0C000000 44 #define RXD_CK8G_IP_HSUM 0x10000000 45 #define RXD_CK8G_TCP_SUM 0x14000000 46 #define RXD_CK8G_UDP_SUM 0x18000000 47 #define RXD_CK8G_RESV 0x1C000000 48 49 extern ddi_device_acc_attr_t nge_data_accattr; 50 51 /* 52 * Callback code invoked from STREAMs when the recv data buffer is free 53 * for recycling. 54 */ 55 56 void 57 nge_recv_recycle(caddr_t arg) 58 { 59 boolean_t val; 60 boolean_t valid; 61 nge_t *ngep; 62 dma_area_t *bufp; 63 buff_ring_t *brp; 64 nge_sw_statistics_t *sw_stp; 65 66 bufp = (dma_area_t *)arg; 67 ngep = (nge_t *)bufp->private; 68 brp = ngep->buff; 69 sw_stp = &ngep->statistics.sw_statistics; 70 71 /* 72 * Free the buffer directly if the buffer was allocated 73 * previously or mac was stopped. 74 */ 75 if (bufp->signature != brp->buf_sign) { 76 if (bufp->rx_delivered == B_TRUE) { 77 nge_free_dma_mem(bufp); 78 kmem_free(bufp, sizeof (dma_area_t)); 79 val = nge_atomic_decrease(&brp->rx_hold, 1); 80 ASSERT(val == B_TRUE); 81 } 82 return; 83 } 84 85 /* 86 * recycle the data buffer again and fill them in free ring 87 */ 88 bufp->rx_recycle.free_func = nge_recv_recycle; 89 bufp->rx_recycle.free_arg = (caddr_t)bufp; 90 91 bufp->mp = desballoc(DMA_VPTR(*bufp), 92 ngep->buf_size + NGE_HEADROOM, 0, &bufp->rx_recycle); 93 94 if (bufp->mp == NULL) { 95 sw_stp->mp_alloc_err++; 96 sw_stp->recy_free++; 97 nge_free_dma_mem(bufp); 98 kmem_free(bufp, sizeof (dma_area_t)); 99 val = nge_atomic_decrease(&brp->rx_hold, 1); 100 ASSERT(val == B_TRUE); 101 } else { 102 103 mutex_enter(brp->recycle_lock); 104 if (bufp->signature != brp->buf_sign) 105 valid = B_TRUE; 106 else 107 valid = B_FALSE; 108 bufp->rx_delivered = valid; 109 if (bufp->rx_delivered == B_FALSE) { 110 bufp->next = brp->recycle_list; 111 brp->recycle_list = bufp; 112 } 113 mutex_exit(brp->recycle_lock); 114 if (valid == B_TRUE) 115 /* call nge_rx_recycle again to free it */ 116 freemsg(bufp->mp); 117 else { 118 val = nge_atomic_decrease(&brp->rx_hold, 1); 119 ASSERT(val == B_TRUE); 120 } 121 } 122 } 123 124 /* 125 * Checking the rx's BDs (one or more) to receive 126 * one complete packet. 127 * start_index: the start indexer of BDs for one packet. 128 * end_index: the end indexer of BDs for one packet. 129 */ 130 static mblk_t *nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len); 131 #pragma inline(nge_recv_packet) 132 133 static mblk_t * 134 nge_recv_packet(nge_t *ngep, uint32_t start_index, size_t len) 135 { 136 uint8_t *rptr; 137 uint32_t minsize; 138 uint32_t maxsize; 139 mblk_t *mp; 140 buff_ring_t *brp; 141 sw_rx_sbd_t *srbdp; 142 dma_area_t *bufp; 143 nge_sw_statistics_t *sw_stp; 144 void *hw_bd_p; 145 146 brp = ngep->buff; 147 minsize = ETHERMIN; 148 maxsize = ngep->max_sdu; 149 sw_stp = &ngep->statistics.sw_statistics; 150 mp = NULL; 151 152 srbdp = &brp->sw_rbds[start_index]; 153 DMA_SYNC(*srbdp->bufp, DDI_DMA_SYNC_FORKERNEL); 154 hw_bd_p = DMA_VPTR(srbdp->desc); 155 156 /* 157 * First check the free_list, if it is NULL, 158 * make the recycle_list be free_list. 159 */ 160 if (brp->free_list == NULL) { 161 mutex_enter(brp->recycle_lock); 162 brp->free_list = brp->recycle_list; 163 brp->recycle_list = NULL; 164 mutex_exit(brp->recycle_lock); 165 } 166 bufp = brp->free_list; 167 /* If it's not a qualified packet, delete it */ 168 if (len > maxsize || len < minsize) { 169 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie, 170 srbdp->bufp->alength); 171 srbdp->flags = CONTROLER_OWN; 172 return (NULL); 173 } 174 175 /* 176 * If receive packet size is smaller than RX bcopy threshold, 177 * or there is no available buffer in free_list or recycle list, 178 * we use bcopy directly. 179 */ 180 if (len <= ngep->param_rxbcopy_threshold || bufp == NULL) 181 brp->rx_bcopy = B_TRUE; 182 else 183 brp->rx_bcopy = B_FALSE; 184 185 if (brp->rx_bcopy) { 186 mp = allocb(len + NGE_HEADROOM, 0); 187 if (mp == NULL) { 188 sw_stp->mp_alloc_err++; 189 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie, 190 srbdp->bufp->alength); 191 srbdp->flags = CONTROLER_OWN; 192 return (NULL); 193 } 194 rptr = DMA_VPTR(*srbdp->bufp); 195 mp->b_rptr = mp->b_rptr + NGE_HEADROOM; 196 bcopy(rptr + NGE_HEADROOM, mp->b_rptr, len); 197 mp->b_wptr = mp->b_rptr + len; 198 } else { 199 mp = srbdp->bufp->mp; 200 /* 201 * Make sure the packet *contents* 4-byte aligned 202 */ 203 mp->b_rptr += NGE_HEADROOM; 204 mp->b_wptr = mp->b_rptr + len; 205 mp->b_next = mp->b_cont = NULL; 206 srbdp->bufp->rx_delivered = B_TRUE; 207 srbdp->bufp = NULL; 208 nge_atomic_increase(&brp->rx_hold, 1); 209 210 /* Fill the buffer from free_list */ 211 srbdp->bufp = bufp; 212 brp->free_list = bufp->next; 213 bufp->next = NULL; 214 } 215 216 /* replenish the buffer for hardware descriptor */ 217 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie, 218 srbdp->bufp->alength); 219 srbdp->flags = CONTROLER_OWN; 220 sw_stp->rbytes += len; 221 sw_stp->recv_count++; 222 223 return (mp); 224 } 225 226 227 #define RX_HW_ERR 0x01 228 #define RX_SUM_NO 0x02 229 #define RX_SUM_ERR 0x04 230 231 /* 232 * Statistic the rx's error 233 * and generate a log msg for these. 234 * Note: 235 * RXE, Parity Error, Symbo error, CRC error 236 * have been recored by nvidia's hardware 237 * statistics part (nge_statistics). So it is uncessary to record them by 238 * driver in this place. 239 */ 240 static uint32_t 241 nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags); 242 #pragma inline(nge_rxsta_handle) 243 244 static uint32_t 245 nge_rxsta_handle(nge_t *ngep, uint32_t stflag, uint32_t *pflags) 246 { 247 uint32_t errors; 248 uint32_t err_flag; 249 nge_sw_statistics_t *sw_stp; 250 251 err_flag = 0; 252 sw_stp = &ngep->statistics.sw_statistics; 253 254 if ((RXD_END & stflag) == 0) 255 return (RX_HW_ERR); 256 257 errors = stflag & RXD_CSUM_MSK; 258 switch (errors) { 259 default: 260 break; 261 262 case RXD_CK8G_TCP_SUM: 263 case RXD_CK8G_UDP_SUM: 264 *pflags |= HCK_FULLCKSUM; 265 *pflags |= HCK_IPV4_HDRCKSUM; 266 *pflags |= HCK_FULLCKSUM_OK; 267 break; 268 269 case RXD_CK8G_TCP_SUM_ERR: 270 case RXD_CK8G_UDP_SUM_ERR: 271 sw_stp->tcp_hwsum_err++; 272 *pflags |= HCK_IPV4_HDRCKSUM; 273 break; 274 275 case RXD_CK8G_IP_HSUM: 276 *pflags |= HCK_IPV4_HDRCKSUM; 277 break; 278 279 case RXD_CK8G_NO_HSUM: 280 err_flag |= RX_SUM_NO; 281 break; 282 283 case RXD_CK8G_IP_HSUM_ERR: 284 sw_stp->ip_hwsum_err++; 285 err_flag |= RX_SUM_ERR; 286 break; 287 } 288 289 if ((stflag & RXD_ERR) != 0) { 290 291 err_flag |= RX_HW_ERR; 292 NGE_DEBUG(("Receive desc error, status: 0x%x", stflag)); 293 } 294 295 return (err_flag); 296 } 297 298 static mblk_t * 299 nge_recv_ring(nge_t *ngep) 300 { 301 uint32_t stflag; 302 uint32_t flag_err; 303 uint32_t sum_flags; 304 size_t len; 305 uint64_t end_index; 306 uint64_t sync_start; 307 mblk_t *mp; 308 mblk_t **tail; 309 mblk_t *head; 310 recv_ring_t *rrp; 311 buff_ring_t *brp; 312 sw_rx_sbd_t *srbdp; 313 void * hw_bd_p; 314 nge_mode_cntl mode_cntl; 315 316 mp = NULL; 317 head = NULL; 318 tail = &head; 319 rrp = ngep->recv; 320 brp = ngep->buff; 321 322 end_index = sync_start = rrp->prod_index; 323 /* Sync the descriptor for kernel */ 324 if (sync_start + ngep->param_recv_max_packet <= ngep->rx_desc) { 325 (void) ddi_dma_sync(rrp->desc.dma_hdl, 326 sync_start * ngep->desc_attr.rxd_size, 327 ngep->param_recv_max_packet * ngep->desc_attr.rxd_size, 328 DDI_DMA_SYNC_FORKERNEL); 329 } else { 330 (void) ddi_dma_sync(rrp->desc.dma_hdl, 331 sync_start * ngep->desc_attr.rxd_size, 332 0, 333 DDI_DMA_SYNC_FORKERNEL); 334 (void) ddi_dma_sync(rrp->desc.dma_hdl, 335 0, 336 (ngep->param_recv_max_packet + sync_start - ngep->rx_desc) * 337 ngep->desc_attr.rxd_size, 338 DDI_DMA_SYNC_FORKERNEL); 339 } 340 341 /* 342 * Looking through the rx's ring to find the good packets 343 * and try to receive more and more packets in rx's ring 344 */ 345 for (;;) { 346 sum_flags = 0; 347 flag_err = 0; 348 end_index = rrp->prod_index; 349 srbdp = &brp->sw_rbds[end_index]; 350 hw_bd_p = DMA_VPTR(srbdp->desc); 351 stflag = ngep->desc_attr.rxd_check(hw_bd_p, &len); 352 /* 353 * If there is no packet in receving ring 354 * break the loop 355 */ 356 if ((stflag & RXD_OWN) != 0 || HOST_OWN == srbdp->flags) 357 break; 358 359 ngep->recv_count++; 360 flag_err = nge_rxsta_handle(ngep, stflag, &sum_flags); 361 if ((flag_err & RX_HW_ERR) == 0) { 362 srbdp->flags = NGE_END_PACKET; 363 mp = nge_recv_packet(ngep, end_index, len); 364 } else { 365 /* Hardware error, re-use the buffer */ 366 ngep->desc_attr.rxd_fill(hw_bd_p, &srbdp->bufp->cookie, 367 srbdp->bufp->alength); 368 srbdp->flags = CONTROLER_OWN; 369 } 370 if (mp != NULL) { 371 if (!(flag_err & (RX_SUM_NO | RX_SUM_ERR))) { 372 (void) hcksum_assoc(mp, NULL, NULL, 373 0, 0, 0, 0, sum_flags, 0); 374 } 375 *tail = mp; 376 tail = &mp->b_next; 377 mp = NULL; 378 } 379 rrp->prod_index = NEXT(end_index, rrp->desc.nslots); 380 if (ngep->recv_count > ngep->param_recv_max_packet) 381 break; 382 } 383 384 /* Sync the descriptors for device */ 385 if (sync_start + ngep->recv_count <= ngep->rx_desc) { 386 (void) ddi_dma_sync(rrp->desc.dma_hdl, 387 sync_start * ngep->desc_attr.rxd_size, 388 ngep->recv_count * ngep->desc_attr.rxd_size, 389 DDI_DMA_SYNC_FORDEV); 390 } else { 391 (void) ddi_dma_sync(rrp->desc.dma_hdl, 392 sync_start * ngep->desc_attr.rxd_size, 393 0, 394 DDI_DMA_SYNC_FORDEV); 395 (void) ddi_dma_sync(rrp->desc.dma_hdl, 396 0, 397 (ngep->recv_count + sync_start - ngep->rx_desc) * 398 ngep->desc_attr.rxd_size, 399 DDI_DMA_SYNC_FORDEV); 400 } 401 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 402 mode_cntl.mode_bits.rxdm = NGE_SET; 403 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 404 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 405 406 return (head); 407 } 408 409 void 410 nge_receive(nge_t *ngep) 411 { 412 mblk_t *mp; 413 recv_ring_t *rrp; 414 rrp = ngep->recv; 415 416 mp = nge_recv_ring(ngep); 417 mutex_exit(ngep->genlock); 418 if (mp != NULL) 419 mac_rx(ngep->mh, rrp->handle, mp); 420 mutex_enter(ngep->genlock); 421 } 422 423 void 424 nge_hot_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len) 425 { 426 uint64_t dmac_addr; 427 hot_rx_bd * hw_bd_p; 428 429 hw_bd_p = (hot_rx_bd *)hwd; 430 dmac_addr = cookie->dmac_laddress + NGE_HEADROOM; 431 432 hw_bd_p->cntl_status.cntl_val = 0; 433 434 hw_bd_p->host_buf_addr_hi = dmac_addr >> 32; 435 hw_bd_p->host_buf_addr_lo = dmac_addr; 436 hw_bd_p->cntl_status.control_bits.bcnt = len - 1; 437 438 membar_producer(); 439 hw_bd_p->cntl_status.control_bits.own = NGE_SET; 440 } 441 442 void 443 nge_sum_rxd_fill(void *hwd, const ddi_dma_cookie_t *cookie, size_t len) 444 { 445 uint64_t dmac_addr; 446 sum_rx_bd * hw_bd_p; 447 448 hw_bd_p = hwd; 449 dmac_addr = cookie->dmac_address + NGE_HEADROOM; 450 451 hw_bd_p->cntl_status.cntl_val = 0; 452 453 hw_bd_p->host_buf_addr = dmac_addr; 454 hw_bd_p->cntl_status.control_bits.bcnt = len - 1; 455 456 membar_producer(); 457 hw_bd_p->cntl_status.control_bits.own = NGE_SET; 458 } 459 460 uint32_t 461 nge_hot_rxd_check(const void *hwd, size_t *len) 462 { 463 uint32_t err_flag; 464 const hot_rx_bd * hrbdp; 465 466 hrbdp = hwd; 467 468 err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK; 469 *len = hrbdp->cntl_status.status_bits_legacy.bcnt; 470 471 return (err_flag); 472 } 473 474 uint32_t 475 nge_sum_rxd_check(const void *hwd, size_t *len) 476 { 477 uint32_t err_flag; 478 const sum_rx_bd * hrbdp; 479 480 hrbdp = hwd; 481 482 err_flag = hrbdp->cntl_status.cntl_val & ~RXD_BCNT_MSK; 483 *len = hrbdp->cntl_status.status_bits.bcnt; 484 485 return (err_flag); 486 } 487