1c7fd2ed0Sgs150176 /* 2c7fd2ed0Sgs150176 * CDDL HEADER START 3c7fd2ed0Sgs150176 * 4c7fd2ed0Sgs150176 * The contents of this file are subject to the terms of the 5ba2e4443Sseb * Common Development and Distribution License (the "License"). 6ba2e4443Sseb * You may not use this file except in compliance with the License. 7c7fd2ed0Sgs150176 * 8c7fd2ed0Sgs150176 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9c7fd2ed0Sgs150176 * or http://www.opensolaris.org/os/licensing. 10c7fd2ed0Sgs150176 * See the License for the specific language governing permissions 11c7fd2ed0Sgs150176 * and limitations under the License. 12c7fd2ed0Sgs150176 * 13c7fd2ed0Sgs150176 * When distributing Covered Code, include this CDDL HEADER in each 14c7fd2ed0Sgs150176 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15c7fd2ed0Sgs150176 * If applicable, add the following below this CDDL HEADER, with the 16c7fd2ed0Sgs150176 * fields enclosed by brackets "[]" replaced with your own identifying 17c7fd2ed0Sgs150176 * information: Portions Copyright [yyyy] [name of copyright owner] 18c7fd2ed0Sgs150176 * 19c7fd2ed0Sgs150176 * CDDL HEADER END 20c7fd2ed0Sgs150176 */ 21c7fd2ed0Sgs150176 /* 227b114c4bSWinson Wang - Sun Microsystems - Beijing China * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23c7fd2ed0Sgs150176 * Use is subject to license terms. 24c7fd2ed0Sgs150176 */ 25c7fd2ed0Sgs150176 26c7fd2ed0Sgs150176 #include "rge.h" 27c7fd2ed0Sgs150176 28c7fd2ed0Sgs150176 #define U32TOPTR(x) ((void *)(uintptr_t)(uint32_t)(x)) 29c7fd2ed0Sgs150176 #define PTRTOU32(x) ((uint32_t)(uintptr_t)(void *)(x)) 30c7fd2ed0Sgs150176 31c7fd2ed0Sgs150176 /* 32c7fd2ed0Sgs150176 * ========== RX side routines ========== 33c7fd2ed0Sgs150176 */ 34c7fd2ed0Sgs150176 35c7fd2ed0Sgs150176 #define RGE_DBG RGE_DBG_RECV /* debug flag for this code */ 36c7fd2ed0Sgs150176 37c7fd2ed0Sgs150176 static uint32_t rge_atomic_reserve(uint32_t *count_p, uint32_t n); 38c7fd2ed0Sgs150176 #pragma inline(rge_atomic_reserve) 39c7fd2ed0Sgs150176 40c7fd2ed0Sgs150176 static uint32_t 41c7fd2ed0Sgs150176 rge_atomic_reserve(uint32_t *count_p, uint32_t n) 42c7fd2ed0Sgs150176 { 43c7fd2ed0Sgs150176 uint32_t oldval; 44c7fd2ed0Sgs150176 uint32_t newval; 45c7fd2ed0Sgs150176 46c7fd2ed0Sgs150176 /* ATOMICALLY */ 47c7fd2ed0Sgs150176 do { 48c7fd2ed0Sgs150176 oldval = *count_p; 49c7fd2ed0Sgs150176 newval = oldval - n; 50c7fd2ed0Sgs150176 if (oldval <= n) 51c7fd2ed0Sgs150176 return (0); /* no resources left */ 52*75d94465SJosef 'Jeff' Sipek } while (atomic_cas_32(count_p, oldval, newval) != oldval); 53c7fd2ed0Sgs150176 54c7fd2ed0Sgs150176 return (newval); 55c7fd2ed0Sgs150176 } 56c7fd2ed0Sgs150176 57c7fd2ed0Sgs150176 /* 58c7fd2ed0Sgs150176 * Atomically increment a counter 59c7fd2ed0Sgs150176 */ 60c7fd2ed0Sgs150176 static void rge_atomic_renounce(uint32_t *count_p, uint32_t n); 61c7fd2ed0Sgs150176 #pragma inline(rge_atomic_renounce) 62c7fd2ed0Sgs150176 63c7fd2ed0Sgs150176 static void 64c7fd2ed0Sgs150176 rge_atomic_renounce(uint32_t *count_p, uint32_t n) 65c7fd2ed0Sgs150176 { 66c7fd2ed0Sgs150176 uint32_t oldval; 67c7fd2ed0Sgs150176 uint32_t newval; 68c7fd2ed0Sgs150176 69c7fd2ed0Sgs150176 /* ATOMICALLY */ 70c7fd2ed0Sgs150176 do { 71c7fd2ed0Sgs150176 oldval = *count_p; 72c7fd2ed0Sgs150176 newval = oldval + n; 73*75d94465SJosef 'Jeff' Sipek } while (atomic_cas_32(count_p, oldval, newval) != oldval); 74c7fd2ed0Sgs150176 } 75c7fd2ed0Sgs150176 76c7fd2ed0Sgs150176 /* 77c7fd2ed0Sgs150176 * Callback code invoked from STREAMs when the recv data buffer is free 78c7fd2ed0Sgs150176 * for recycling. 79c7fd2ed0Sgs150176 */ 80c7fd2ed0Sgs150176 void 81c7fd2ed0Sgs150176 rge_rx_recycle(caddr_t arg) 82c7fd2ed0Sgs150176 { 83c7fd2ed0Sgs150176 rge_t *rgep; 84c7fd2ed0Sgs150176 dma_buf_t *rx_buf; 85aa817493Sgs150176 sw_rbd_t *free_srbdp; 86c7fd2ed0Sgs150176 uint32_t slot_recy; 87c7fd2ed0Sgs150176 88c7fd2ed0Sgs150176 rx_buf = (dma_buf_t *)arg; 89c7fd2ed0Sgs150176 rgep = (rge_t *)rx_buf->private; 90c7fd2ed0Sgs150176 91c7fd2ed0Sgs150176 /* 92aa817493Sgs150176 * In rge_unattach() and rge_attach(), this callback function will 93aa817493Sgs150176 * also be called to free mp in rge_fini_rings() and rge_init_rings(). 94aa817493Sgs150176 * In such situation, we shouldn't do below desballoc(), otherwise, 95c7fd2ed0Sgs150176 * there'll be memory leak. 96c7fd2ed0Sgs150176 */ 97aa817493Sgs150176 if (rgep->rge_mac_state == RGE_MAC_UNATTACH || 98aa817493Sgs150176 rgep->rge_mac_state == RGE_MAC_ATTACH) 99c7fd2ed0Sgs150176 return; 100c7fd2ed0Sgs150176 101c7fd2ed0Sgs150176 /* 102c7fd2ed0Sgs150176 * Recycle the data buffer again 103c7fd2ed0Sgs150176 * and fill them in free ring 104c7fd2ed0Sgs150176 */ 105c7fd2ed0Sgs150176 rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf), 106c7fd2ed0Sgs150176 rgep->rxbuf_size, 0, &rx_buf->rx_recycle); 107c7fd2ed0Sgs150176 if (rx_buf->mp == NULL) { 108c7fd2ed0Sgs150176 rge_problem(rgep, "rge_rx_recycle: desballoc() failed"); 109c7fd2ed0Sgs150176 return; 110c7fd2ed0Sgs150176 } 111c7fd2ed0Sgs150176 mutex_enter(rgep->rc_lock); 112c7fd2ed0Sgs150176 slot_recy = rgep->rc_next; 113aa817493Sgs150176 free_srbdp = &rgep->free_srbds[slot_recy]; 114aa817493Sgs150176 115aa817493Sgs150176 ASSERT(free_srbdp->rx_buf == NULL); 116aa817493Sgs150176 free_srbdp->rx_buf = rx_buf; 117c7fd2ed0Sgs150176 rgep->rc_next = NEXT(slot_recy, RGE_BUF_SLOTS); 118c7fd2ed0Sgs150176 rge_atomic_renounce(&rgep->rx_free, 1); 119c7fd2ed0Sgs150176 if (rgep->rx_bcopy && rgep->rx_free == RGE_BUF_SLOTS) 120c7fd2ed0Sgs150176 rgep->rx_bcopy = B_FALSE; 121c7fd2ed0Sgs150176 ASSERT(rgep->rx_free <= RGE_BUF_SLOTS); 122aa817493Sgs150176 123c7fd2ed0Sgs150176 mutex_exit(rgep->rc_lock); 124c7fd2ed0Sgs150176 } 125c7fd2ed0Sgs150176 126c7fd2ed0Sgs150176 static int rge_rx_refill(rge_t *rgep, uint32_t slot); 127c7fd2ed0Sgs150176 #pragma inline(rge_rx_refill) 128c7fd2ed0Sgs150176 129c7fd2ed0Sgs150176 static int 130c7fd2ed0Sgs150176 rge_rx_refill(rge_t *rgep, uint32_t slot) 131c7fd2ed0Sgs150176 { 132c7fd2ed0Sgs150176 dma_buf_t *free_buf; 133c7fd2ed0Sgs150176 rge_bd_t *hw_rbd_p; 134c7fd2ed0Sgs150176 sw_rbd_t *srbdp; 135c7fd2ed0Sgs150176 uint32_t free_slot; 136c7fd2ed0Sgs150176 137c7fd2ed0Sgs150176 srbdp = &rgep->sw_rbds[slot]; 138c7fd2ed0Sgs150176 hw_rbd_p = &rgep->rx_ring[slot]; 139c7fd2ed0Sgs150176 free_slot = rgep->rf_next; 140aa817493Sgs150176 free_buf = rgep->free_srbds[free_slot].rx_buf; 141c7fd2ed0Sgs150176 if (free_buf != NULL) { 142c7fd2ed0Sgs150176 srbdp->rx_buf = free_buf; 143aa817493Sgs150176 rgep->free_srbds[free_slot].rx_buf = NULL; 144aa817493Sgs150176 hw_rbd_p->host_buf_addr = RGE_BSWAP_32(rgep->head_room + 145c7fd2ed0Sgs150176 + free_buf->pbuf.cookie.dmac_laddress); 146c7fd2ed0Sgs150176 hw_rbd_p->host_buf_addr_hi = 147c7fd2ed0Sgs150176 RGE_BSWAP_32(free_buf->pbuf.cookie.dmac_laddress >> 32); 148c7fd2ed0Sgs150176 rgep->rf_next = NEXT(free_slot, RGE_BUF_SLOTS); 149c7fd2ed0Sgs150176 return (1); 150c7fd2ed0Sgs150176 } else { 151c7fd2ed0Sgs150176 /* 152c7fd2ed0Sgs150176 * This situation shouldn't happen 153c7fd2ed0Sgs150176 */ 154c7fd2ed0Sgs150176 rge_problem(rgep, "rge_rx_refill: free buffer %d is NULL", 155c7fd2ed0Sgs150176 free_slot); 156c7fd2ed0Sgs150176 rgep->rx_bcopy = B_TRUE; 157c7fd2ed0Sgs150176 return (0); 158c7fd2ed0Sgs150176 } 159c7fd2ed0Sgs150176 } 160c7fd2ed0Sgs150176 161c7fd2ed0Sgs150176 static mblk_t *rge_receive_packet(rge_t *rgep, uint32_t slot); 162c7fd2ed0Sgs150176 #pragma inline(rge_receive_packet) 163c7fd2ed0Sgs150176 164c7fd2ed0Sgs150176 static mblk_t * 165c7fd2ed0Sgs150176 rge_receive_packet(rge_t *rgep, uint32_t slot) 166c7fd2ed0Sgs150176 { 167c7fd2ed0Sgs150176 rge_bd_t *hw_rbd_p; 168c7fd2ed0Sgs150176 sw_rbd_t *srbdp; 169c7fd2ed0Sgs150176 uchar_t *dp; 170c7fd2ed0Sgs150176 mblk_t *mp; 171c7fd2ed0Sgs150176 uint8_t *rx_ptr; 172c7fd2ed0Sgs150176 uint32_t rx_status; 173c7fd2ed0Sgs150176 uint_t packet_len; 174c7fd2ed0Sgs150176 uint_t minsize; 175c7fd2ed0Sgs150176 uint_t maxsize; 176c7fd2ed0Sgs150176 uint32_t proto; 177c7fd2ed0Sgs150176 uint32_t pflags; 178c7fd2ed0Sgs150176 struct ether_vlan_header *ehp; 179c7fd2ed0Sgs150176 uint16_t vtag = 0; 180c7fd2ed0Sgs150176 181c7fd2ed0Sgs150176 hw_rbd_p = &rgep->rx_ring[slot]; 182c7fd2ed0Sgs150176 srbdp = &rgep->sw_rbds[slot]; 183c7fd2ed0Sgs150176 184c7fd2ed0Sgs150176 /* 185c7fd2ed0Sgs150176 * Read receive status 186c7fd2ed0Sgs150176 */ 187c7fd2ed0Sgs150176 rx_status = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_FLAGS_MASK; 188c7fd2ed0Sgs150176 189c7fd2ed0Sgs150176 /* 190c7fd2ed0Sgs150176 * Handle error packet 191c7fd2ed0Sgs150176 */ 192c7fd2ed0Sgs150176 if (!(rx_status & BD_FLAG_PKT_END)) { 193c7fd2ed0Sgs150176 RGE_DEBUG(("rge_receive_packet: not a complete packat")); 194c7fd2ed0Sgs150176 return (NULL); 195c7fd2ed0Sgs150176 } 196c7fd2ed0Sgs150176 if (rx_status & RBD_FLAG_ERROR) { 197c7fd2ed0Sgs150176 if (rx_status & RBD_FLAG_CRC_ERR) 198c7fd2ed0Sgs150176 rgep->stats.crc_err++; 199c7fd2ed0Sgs150176 if (rx_status & RBD_FLAG_RUNT) 200c7fd2ed0Sgs150176 rgep->stats.in_short++; 201c7fd2ed0Sgs150176 /* 202c7fd2ed0Sgs150176 * Set chip_error flag to reset chip: 203c7fd2ed0Sgs150176 * (suggested in Realtek programming guide.) 204c7fd2ed0Sgs150176 */ 205c7fd2ed0Sgs150176 RGE_DEBUG(("rge_receive_packet: error packet, status = %x", 206c7fd2ed0Sgs150176 rx_status)); 207c7fd2ed0Sgs150176 mutex_enter(rgep->genlock); 208c7fd2ed0Sgs150176 rgep->rge_chip_state = RGE_CHIP_ERROR; 209c7fd2ed0Sgs150176 mutex_exit(rgep->genlock); 210c7fd2ed0Sgs150176 return (NULL); 211c7fd2ed0Sgs150176 } 212c7fd2ed0Sgs150176 213c7fd2ed0Sgs150176 /* 214c7fd2ed0Sgs150176 * Handle size error packet 215c7fd2ed0Sgs150176 */ 216aa817493Sgs150176 packet_len = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_LEN_MASK; 217aa817493Sgs150176 packet_len -= ETHERFCSL; 218aa817493Sgs150176 minsize = ETHERMIN; 219aa817493Sgs150176 pflags = RGE_BSWAP_32(hw_rbd_p->vlan_tag); 220aa817493Sgs150176 if (pflags & RBD_VLAN_PKT) 221aa817493Sgs150176 minsize -= VLAN_TAGSZ; 222aa817493Sgs150176 maxsize = rgep->ethmax_size; 223c7fd2ed0Sgs150176 if (packet_len < minsize || packet_len > maxsize) { 224c7fd2ed0Sgs150176 RGE_DEBUG(("rge_receive_packet: len err = %d", packet_len)); 225c7fd2ed0Sgs150176 return (NULL); 226c7fd2ed0Sgs150176 } 227c7fd2ed0Sgs150176 228c7fd2ed0Sgs150176 DMA_SYNC(srbdp->rx_buf->pbuf, DDI_DMA_SYNC_FORKERNEL); 229aa817493Sgs150176 if (rgep->rx_bcopy || packet_len <= RGE_RECV_COPY_SIZE || 230c7fd2ed0Sgs150176 !rge_atomic_reserve(&rgep->rx_free, 1)) { 231c7fd2ed0Sgs150176 /* 232c7fd2ed0Sgs150176 * Allocate buffer to receive this good packet 233c7fd2ed0Sgs150176 */ 234c7fd2ed0Sgs150176 mp = allocb(packet_len + RGE_HEADROOM, 0); 235c7fd2ed0Sgs150176 if (mp == NULL) { 236c7fd2ed0Sgs150176 RGE_DEBUG(("rge_receive_packet: allocate buffer fail")); 237c7fd2ed0Sgs150176 rgep->stats.no_rcvbuf++; 238c7fd2ed0Sgs150176 return (NULL); 239c7fd2ed0Sgs150176 } 240c7fd2ed0Sgs150176 241c7fd2ed0Sgs150176 /* 242c7fd2ed0Sgs150176 * Copy the data found into the new cluster 243c7fd2ed0Sgs150176 */ 244c7fd2ed0Sgs150176 rx_ptr = DMA_VPTR(srbdp->rx_buf->pbuf); 245c7fd2ed0Sgs150176 mp->b_rptr = dp = mp->b_rptr + RGE_HEADROOM; 246aa817493Sgs150176 bcopy(rx_ptr + rgep->head_room, dp, packet_len); 247aa817493Sgs150176 mp->b_wptr = dp + packet_len; 248c7fd2ed0Sgs150176 } else { 249c7fd2ed0Sgs150176 mp = srbdp->rx_buf->mp; 250aa817493Sgs150176 mp->b_rptr += rgep->head_room; 251aa817493Sgs150176 mp->b_wptr = mp->b_rptr + packet_len; 252c7fd2ed0Sgs150176 mp->b_next = mp->b_cont = NULL; 253c7fd2ed0Sgs150176 /* 254c7fd2ed0Sgs150176 * Refill the current receive bd buffer 255c7fd2ed0Sgs150176 * if fails, will just keep the mp. 256c7fd2ed0Sgs150176 */ 257c7fd2ed0Sgs150176 if (!rge_rx_refill(rgep, slot)) 258c7fd2ed0Sgs150176 return (NULL); 259c7fd2ed0Sgs150176 } 260c7fd2ed0Sgs150176 rgep->stats.rbytes += packet_len; 26122dc2133Smx205022 rgep->stats.rpackets ++; 262c7fd2ed0Sgs150176 263c7fd2ed0Sgs150176 /* 264c7fd2ed0Sgs150176 * VLAN packet ? 265c7fd2ed0Sgs150176 */ 266c7fd2ed0Sgs150176 if (pflags & RBD_VLAN_PKT) 267c7fd2ed0Sgs150176 vtag = pflags & RBD_VLAN_TAG; 268c7fd2ed0Sgs150176 if (vtag) { 269c7fd2ed0Sgs150176 vtag = TCI_CHIP2OS(vtag); 270c7fd2ed0Sgs150176 /* 271c7fd2ed0Sgs150176 * As h/w strips the VLAN tag from incoming packet, we need 272c7fd2ed0Sgs150176 * insert VLAN tag into this packet before send up here. 273c7fd2ed0Sgs150176 */ 274c7fd2ed0Sgs150176 (void) memmove(mp->b_rptr - VLAN_TAGSZ, mp->b_rptr, 275c7fd2ed0Sgs150176 2 * ETHERADDRL); 276c7fd2ed0Sgs150176 mp->b_rptr -= VLAN_TAGSZ; 277c7fd2ed0Sgs150176 ehp = (struct ether_vlan_header *)mp->b_rptr; 278605445d5Sdg199075 ehp->ether_tpid = htons(ETHERTYPE_VLAN); 279c7fd2ed0Sgs150176 ehp->ether_tci = htons(vtag); 280aa817493Sgs150176 rgep->stats.rbytes += VLAN_TAGSZ; 281c7fd2ed0Sgs150176 } 282c7fd2ed0Sgs150176 283c7fd2ed0Sgs150176 /* 284c7fd2ed0Sgs150176 * Check h/w checksum offload status 285c7fd2ed0Sgs150176 */ 286c7fd2ed0Sgs150176 pflags = 0; 287c7fd2ed0Sgs150176 proto = rx_status & RBD_FLAG_PROTOCOL; 288c7fd2ed0Sgs150176 if ((proto == RBD_FLAG_TCP && !(rx_status & RBD_TCP_CKSUM_ERR)) || 289c7fd2ed0Sgs150176 (proto == RBD_FLAG_UDP && !(rx_status & RBD_UDP_CKSUM_ERR))) 2900dc2366fSVenugopal Iyer pflags |= HCK_FULLCKSUM_OK; 291c7fd2ed0Sgs150176 if (proto != RBD_FLAG_NONE_IP && !(rx_status & RBD_IP_CKSUM_ERR)) 2920dc2366fSVenugopal Iyer pflags |= HCK_IPV4_HDRCKSUM_OK; 293c7fd2ed0Sgs150176 if (pflags != 0) { 2940dc2366fSVenugopal Iyer mac_hcksum_set(mp, 0, 0, 0, 0, pflags); 295c7fd2ed0Sgs150176 } 296c7fd2ed0Sgs150176 297c7fd2ed0Sgs150176 return (mp); 298c7fd2ed0Sgs150176 } 299c7fd2ed0Sgs150176 300c7fd2ed0Sgs150176 /* 301c7fd2ed0Sgs150176 * Accept the packets received in rx ring. 302c7fd2ed0Sgs150176 * 303c7fd2ed0Sgs150176 * Returns a chain of mblks containing the received data, to be 304c7fd2ed0Sgs150176 * passed up to mac_rx(). 305c7fd2ed0Sgs150176 * The routine returns only when a complete scan has been performed 306c7fd2ed0Sgs150176 * without finding any packets to receive. 307c7fd2ed0Sgs150176 * This function must SET the OWN bit of BD to indicate the packets 308c7fd2ed0Sgs150176 * it has accepted from the ring. 309c7fd2ed0Sgs150176 */ 310c7fd2ed0Sgs150176 static mblk_t *rge_receive_ring(rge_t *rgep); 311c7fd2ed0Sgs150176 #pragma inline(rge_receive_ring) 312c7fd2ed0Sgs150176 313c7fd2ed0Sgs150176 static mblk_t * 314c7fd2ed0Sgs150176 rge_receive_ring(rge_t *rgep) 315c7fd2ed0Sgs150176 { 316c7fd2ed0Sgs150176 rge_bd_t *hw_rbd_p; 317c7fd2ed0Sgs150176 mblk_t *head; 318c7fd2ed0Sgs150176 mblk_t **tail; 319c7fd2ed0Sgs150176 mblk_t *mp; 320c7fd2ed0Sgs150176 uint32_t slot; 321c7fd2ed0Sgs150176 322c7fd2ed0Sgs150176 ASSERT(mutex_owned(rgep->rx_lock)); 323c7fd2ed0Sgs150176 324c7fd2ed0Sgs150176 /* 325c7fd2ed0Sgs150176 * Sync (all) the receive ring descriptors 326c7fd2ed0Sgs150176 * before accepting the packets they describe 327c7fd2ed0Sgs150176 */ 328c7fd2ed0Sgs150176 DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORKERNEL); 329c7fd2ed0Sgs150176 slot = rgep->rx_next; 330c7fd2ed0Sgs150176 hw_rbd_p = &rgep->rx_ring[slot]; 331c7fd2ed0Sgs150176 head = NULL; 332c7fd2ed0Sgs150176 tail = &head; 333c7fd2ed0Sgs150176 334c7fd2ed0Sgs150176 while (!(hw_rbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN))) { 335c7fd2ed0Sgs150176 if ((mp = rge_receive_packet(rgep, slot)) != NULL) { 336c7fd2ed0Sgs150176 *tail = mp; 337c7fd2ed0Sgs150176 tail = &mp->b_next; 338c7fd2ed0Sgs150176 } 339c7fd2ed0Sgs150176 340c7fd2ed0Sgs150176 /* 341c7fd2ed0Sgs150176 * Clear RBD flags 342c7fd2ed0Sgs150176 */ 343c7fd2ed0Sgs150176 hw_rbd_p->flags_len = 344aa817493Sgs150176 RGE_BSWAP_32(rgep->rxbuf_size - rgep->head_room); 345c7fd2ed0Sgs150176 HW_RBD_INIT(hw_rbd_p, slot); 346c7fd2ed0Sgs150176 slot = NEXT(slot, RGE_RECV_SLOTS); 347c7fd2ed0Sgs150176 hw_rbd_p = &rgep->rx_ring[slot]; 348c7fd2ed0Sgs150176 } 349c7fd2ed0Sgs150176 350c7fd2ed0Sgs150176 rgep->rx_next = slot; 351c7fd2ed0Sgs150176 return (head); 352c7fd2ed0Sgs150176 } 353c7fd2ed0Sgs150176 354c7fd2ed0Sgs150176 /* 355c7fd2ed0Sgs150176 * Receive all ready packets. 356c7fd2ed0Sgs150176 */ 357c7fd2ed0Sgs150176 void rge_receive(rge_t *rgep); 358c7fd2ed0Sgs150176 #pragma no_inline(rge_receive) 359c7fd2ed0Sgs150176 360c7fd2ed0Sgs150176 void 361c7fd2ed0Sgs150176 rge_receive(rge_t *rgep) 362c7fd2ed0Sgs150176 { 363c7fd2ed0Sgs150176 mblk_t *mp; 364c7fd2ed0Sgs150176 365c7fd2ed0Sgs150176 mutex_enter(rgep->rx_lock); 366c7fd2ed0Sgs150176 mp = rge_receive_ring(rgep); 367c7fd2ed0Sgs150176 mutex_exit(rgep->rx_lock); 368c7fd2ed0Sgs150176 369c7fd2ed0Sgs150176 if (mp != NULL) 370da14cebeSEric Cheng mac_rx(rgep->mh, NULL, mp); 371c7fd2ed0Sgs150176 } 372c7fd2ed0Sgs150176 373c7fd2ed0Sgs150176 374c7fd2ed0Sgs150176 #undef RGE_DBG 375c7fd2ed0Sgs150176 #define RGE_DBG RGE_DBG_SEND /* debug flag for this code */ 376c7fd2ed0Sgs150176 377c7fd2ed0Sgs150176 378c7fd2ed0Sgs150176 /* 379c7fd2ed0Sgs150176 * ========== Send-side recycle routines ========== 380c7fd2ed0Sgs150176 */ 381c7fd2ed0Sgs150176 static uint32_t rge_send_claim(rge_t *rgep); 382c7fd2ed0Sgs150176 #pragma inline(rge_send_claim) 383c7fd2ed0Sgs150176 384c7fd2ed0Sgs150176 static uint32_t 385c7fd2ed0Sgs150176 rge_send_claim(rge_t *rgep) 386c7fd2ed0Sgs150176 { 387c7fd2ed0Sgs150176 uint32_t slot; 388c7fd2ed0Sgs150176 uint32_t next; 389c7fd2ed0Sgs150176 390c7fd2ed0Sgs150176 mutex_enter(rgep->tx_lock); 391c7fd2ed0Sgs150176 slot = rgep->tx_next; 392c7fd2ed0Sgs150176 next = NEXT(slot, RGE_SEND_SLOTS); 393c7fd2ed0Sgs150176 rgep->tx_next = next; 394c7fd2ed0Sgs150176 rgep->tx_flow++; 395c7fd2ed0Sgs150176 mutex_exit(rgep->tx_lock); 396c7fd2ed0Sgs150176 397c7fd2ed0Sgs150176 /* 398c7fd2ed0Sgs150176 * We check that our invariants still hold: 399c7fd2ed0Sgs150176 * + the slot and next indexes are in range 400c7fd2ed0Sgs150176 * + the slot must not be the last one (i.e. the *next* 401c7fd2ed0Sgs150176 * index must not match the next-recycle index), 'cos 402c7fd2ed0Sgs150176 * there must always be at least one free slot in a ring 403c7fd2ed0Sgs150176 */ 404c7fd2ed0Sgs150176 ASSERT(slot < RGE_SEND_SLOTS); 405c7fd2ed0Sgs150176 ASSERT(next < RGE_SEND_SLOTS); 406c7fd2ed0Sgs150176 ASSERT(next != rgep->tc_next); 407c7fd2ed0Sgs150176 408c7fd2ed0Sgs150176 return (slot); 409c7fd2ed0Sgs150176 } 410c7fd2ed0Sgs150176 411c7fd2ed0Sgs150176 /* 412c7fd2ed0Sgs150176 * We don't want to call this function every time after a successful 413c7fd2ed0Sgs150176 * h/w transmit done in ISR. Instead, we call this function in the 414c7fd2ed0Sgs150176 * rge_send() when there're few or no free tx BDs remained. 415c7fd2ed0Sgs150176 */ 4169e1a9180SLi-Zhen You void rge_send_recycle(rge_t *rgep); 417c7fd2ed0Sgs150176 #pragma inline(rge_send_recycle) 418c7fd2ed0Sgs150176 4199e1a9180SLi-Zhen You void 420c7fd2ed0Sgs150176 rge_send_recycle(rge_t *rgep) 421c7fd2ed0Sgs150176 { 422c7fd2ed0Sgs150176 rge_bd_t *hw_sbd_p; 423c7fd2ed0Sgs150176 uint32_t tc_tail; 424c7fd2ed0Sgs150176 uint32_t tc_head; 425c7fd2ed0Sgs150176 uint32_t n; 426c7fd2ed0Sgs150176 427c7fd2ed0Sgs150176 mutex_enter(rgep->tc_lock); 428c7fd2ed0Sgs150176 tc_head = rgep->tc_next; 429c7fd2ed0Sgs150176 tc_tail = rgep->tc_tail; 430aa817493Sgs150176 if (tc_head == tc_tail) 431aa817493Sgs150176 goto resched; 432c7fd2ed0Sgs150176 433c7fd2ed0Sgs150176 do { 434c7fd2ed0Sgs150176 tc_tail = LAST(tc_tail, RGE_SEND_SLOTS); 435c7fd2ed0Sgs150176 hw_sbd_p = &rgep->tx_ring[tc_tail]; 436c7fd2ed0Sgs150176 if (tc_tail == tc_head) { 437c7fd2ed0Sgs150176 if (hw_sbd_p->flags_len & 438c7fd2ed0Sgs150176 RGE_BSWAP_32(BD_FLAG_HW_OWN)) { 439c7fd2ed0Sgs150176 /* 440aa817493Sgs150176 * Recyled nothing: bump the watchdog counter, 441aa817493Sgs150176 * thus guaranteeing that it's nonzero 442aa817493Sgs150176 * (watchdog activated). 443c7fd2ed0Sgs150176 */ 4449e1a9180SLi-Zhen You if (rgep->watchdog == 0) 4459e1a9180SLi-Zhen You rgep->watchdog = 1; 446c7fd2ed0Sgs150176 mutex_exit(rgep->tc_lock); 447c7fd2ed0Sgs150176 return; 448c7fd2ed0Sgs150176 } 449c7fd2ed0Sgs150176 break; 450c7fd2ed0Sgs150176 } 451c7fd2ed0Sgs150176 } while (hw_sbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN)); 452c7fd2ed0Sgs150176 453aa817493Sgs150176 /* 454aa817493Sgs150176 * Recyled something :-) 455aa817493Sgs150176 */ 456c7fd2ed0Sgs150176 rgep->tc_next = NEXT(tc_tail, RGE_SEND_SLOTS); 457c7fd2ed0Sgs150176 n = rgep->tc_next - tc_head; 458c7fd2ed0Sgs150176 if (rgep->tc_next < tc_head) 459c7fd2ed0Sgs150176 n += RGE_SEND_SLOTS; 460c7fd2ed0Sgs150176 rge_atomic_renounce(&rgep->tx_free, n); 461c7fd2ed0Sgs150176 rgep->watchdog = 0; 462aa817493Sgs150176 ASSERT(rgep->tx_free <= RGE_SEND_SLOTS); 463c7fd2ed0Sgs150176 464aa817493Sgs150176 resched: 465aa817493Sgs150176 mutex_exit(rgep->tc_lock); 466aa817493Sgs150176 if (rgep->resched_needed && 467aa817493Sgs150176 rgep->rge_mac_state == RGE_MAC_STARTED) { 468aa817493Sgs150176 rgep->resched_needed = B_FALSE; 469aa817493Sgs150176 mac_tx_update(rgep->mh); 470c7fd2ed0Sgs150176 } 471c7fd2ed0Sgs150176 } 472c7fd2ed0Sgs150176 473c7fd2ed0Sgs150176 /* 474c7fd2ed0Sgs150176 * Send a message by copying it into a preallocated (and premapped) buffer 475c7fd2ed0Sgs150176 */ 476aa817493Sgs150176 static void rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci); 477c7fd2ed0Sgs150176 #pragma inline(rge_send_copy) 478c7fd2ed0Sgs150176 479c7fd2ed0Sgs150176 static void 480aa817493Sgs150176 rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci) 481c7fd2ed0Sgs150176 { 482c7fd2ed0Sgs150176 rge_bd_t *hw_sbd_p; 483c7fd2ed0Sgs150176 sw_sbd_t *ssbdp; 484c7fd2ed0Sgs150176 mblk_t *bp; 485c7fd2ed0Sgs150176 char *txb; 486c7fd2ed0Sgs150176 uint32_t slot; 487c7fd2ed0Sgs150176 size_t totlen; 488c7fd2ed0Sgs150176 size_t mblen; 489c7fd2ed0Sgs150176 uint32_t pflags; 490aa817493Sgs150176 struct ether_header *ethhdr; 491aa817493Sgs150176 struct ip *ip_hdr; 492c7fd2ed0Sgs150176 493c7fd2ed0Sgs150176 /* 494c7fd2ed0Sgs150176 * IMPORTANT: 495c7fd2ed0Sgs150176 * Up to the point where it claims a place, a send_msg() 496c7fd2ed0Sgs150176 * routine can indicate failure by returning B_FALSE. Once it's 497c7fd2ed0Sgs150176 * claimed a place, it mustn't fail. 498c7fd2ed0Sgs150176 * 499c7fd2ed0Sgs150176 * In this version, there's no setup to be done here, and there's 500c7fd2ed0Sgs150176 * nothing that can fail, so we can go straight to claiming our 501c7fd2ed0Sgs150176 * already-reserved place on the train. 502c7fd2ed0Sgs150176 * 503c7fd2ed0Sgs150176 * This is the point of no return! 504c7fd2ed0Sgs150176 */ 505c7fd2ed0Sgs150176 slot = rge_send_claim(rgep); 506c7fd2ed0Sgs150176 ssbdp = &rgep->sw_sbds[slot]; 507c7fd2ed0Sgs150176 508c7fd2ed0Sgs150176 /* 509c7fd2ed0Sgs150176 * Copy the data into a pre-mapped buffer, which avoids the 510c7fd2ed0Sgs150176 * overhead (and complication) of mapping/unmapping STREAMS 511c7fd2ed0Sgs150176 * buffers and keeping hold of them until the DMA has completed. 512c7fd2ed0Sgs150176 * 513c7fd2ed0Sgs150176 * Because all buffers are the same size, and larger than the 514c7fd2ed0Sgs150176 * longest single valid message, we don't have to bother about 515c7fd2ed0Sgs150176 * splitting the message across multiple buffers either. 516c7fd2ed0Sgs150176 */ 517c7fd2ed0Sgs150176 txb = DMA_VPTR(ssbdp->pbuf); 518aa817493Sgs150176 totlen = 0; 519aa817493Sgs150176 bp = mp; 520aa817493Sgs150176 if (tci != 0) { 521aa817493Sgs150176 /* 522aa817493Sgs150176 * Do not copy the vlan tag 523aa817493Sgs150176 */ 524aa817493Sgs150176 bcopy(bp->b_rptr, txb, 2 * ETHERADDRL); 525aa817493Sgs150176 txb += 2 * ETHERADDRL; 526aa817493Sgs150176 totlen += 2 * ETHERADDRL; 52722eb7cb5Sgd78059 mblen = MBLKL(bp); 528aa817493Sgs150176 ASSERT(mblen >= 2 * ETHERADDRL + VLAN_TAGSZ); 529aa817493Sgs150176 mblen -= 2 * ETHERADDRL + VLAN_TAGSZ; 530aa817493Sgs150176 if ((totlen += mblen) <= rgep->ethmax_size) { 531aa817493Sgs150176 bcopy(bp->b_rptr + 2 * ETHERADDRL + VLAN_TAGSZ, 532aa817493Sgs150176 txb, mblen); 533aa817493Sgs150176 txb += mblen; 534aa817493Sgs150176 } 535aa817493Sgs150176 bp = bp->b_cont; 536aa817493Sgs150176 rgep->stats.obytes += VLAN_TAGSZ; 537aa817493Sgs150176 } 538aa817493Sgs150176 for (; bp != NULL; bp = bp->b_cont) { 53922eb7cb5Sgd78059 mblen = MBLKL(bp); 540c7fd2ed0Sgs150176 if ((totlen += mblen) <= rgep->ethmax_size) { 541c7fd2ed0Sgs150176 bcopy(bp->b_rptr, txb, mblen); 542c7fd2ed0Sgs150176 txb += mblen; 543c7fd2ed0Sgs150176 } 544c7fd2ed0Sgs150176 } 545aa817493Sgs150176 rgep->stats.obytes += totlen; 54622dc2133Smx205022 rgep->stats.tx_pre_ismax = rgep->stats.tx_cur_ismax; 54722dc2133Smx205022 if (totlen == rgep->ethmax_size) 54822dc2133Smx205022 rgep->stats.tx_cur_ismax = B_TRUE; 54922dc2133Smx205022 else 55022dc2133Smx205022 rgep->stats.tx_cur_ismax = B_FALSE; 551c7fd2ed0Sgs150176 552c7fd2ed0Sgs150176 /* 553c7fd2ed0Sgs150176 * We'e reached the end of the chain; and we should have 554c7fd2ed0Sgs150176 * collected no more than ETHERMAX bytes into our buffer. 555c7fd2ed0Sgs150176 */ 556c7fd2ed0Sgs150176 ASSERT(bp == NULL); 557c7fd2ed0Sgs150176 ASSERT(totlen <= rgep->ethmax_size); 558c7fd2ed0Sgs150176 DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV); 559c7fd2ed0Sgs150176 560c7fd2ed0Sgs150176 /* 561aa817493Sgs150176 * Update the hardware send buffer descriptor flags 562c7fd2ed0Sgs150176 */ 563c7fd2ed0Sgs150176 hw_sbd_p = &rgep->tx_ring[slot]; 564aa817493Sgs150176 ASSERT(hw_sbd_p == ssbdp->desc.mem_va); 565c7fd2ed0Sgs150176 hw_sbd_p->flags_len = RGE_BSWAP_32(totlen & SBD_LEN_MASK); 566c7fd2ed0Sgs150176 if (tci != 0) { 567c7fd2ed0Sgs150176 tci = TCI_OS2CHIP(tci); 568c7fd2ed0Sgs150176 hw_sbd_p->vlan_tag = RGE_BSWAP_32(tci); 569c7fd2ed0Sgs150176 hw_sbd_p->vlan_tag |= RGE_BSWAP_32(SBD_VLAN_PKT); 570c7fd2ed0Sgs150176 } else { 571c7fd2ed0Sgs150176 hw_sbd_p->vlan_tag = 0; 572c7fd2ed0Sgs150176 } 573c7fd2ed0Sgs150176 574aa817493Sgs150176 /* 575aa817493Sgs150176 * h/w checksum offload flags 576aa817493Sgs150176 */ 5770dc2366fSVenugopal Iyer mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags); 578c7fd2ed0Sgs150176 if (pflags & HCK_FULLCKSUM) { 579aa817493Sgs150176 ASSERT(totlen >= sizeof (struct ether_header) + 580aa817493Sgs150176 sizeof (struct ip)); 581aa817493Sgs150176 ethhdr = (struct ether_header *)(DMA_VPTR(ssbdp->pbuf)); 582aa817493Sgs150176 /* 583aa817493Sgs150176 * Is the packet an IP(v4) packet? 584aa817493Sgs150176 */ 585aa817493Sgs150176 if (ntohs(ethhdr->ether_type) == ETHERTYPE_IP) { 586aa817493Sgs150176 ip_hdr = (struct ip *) 587aa817493Sgs150176 ((uint8_t *)DMA_VPTR(ssbdp->pbuf) + 588aa817493Sgs150176 sizeof (struct ether_header)); 589aa817493Sgs150176 if (ip_hdr->ip_p == IPPROTO_TCP) 590aa817493Sgs150176 hw_sbd_p->flags_len |= 591aa817493Sgs150176 RGE_BSWAP_32(SBD_FLAG_TCP_CKSUM); 592aa817493Sgs150176 else if (ip_hdr->ip_p == IPPROTO_UDP) 593aa817493Sgs150176 hw_sbd_p->flags_len |= 594aa817493Sgs150176 RGE_BSWAP_32(SBD_FLAG_UDP_CKSUM); 595c7fd2ed0Sgs150176 } 596c7fd2ed0Sgs150176 } 597aa817493Sgs150176 if (pflags & HCK_IPV4_HDRCKSUM) 598c7fd2ed0Sgs150176 hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_IP_CKSUM); 599c7fd2ed0Sgs150176 600c7fd2ed0Sgs150176 HW_SBD_SET(hw_sbd_p, slot); 601aa817493Sgs150176 602aa817493Sgs150176 /* 603aa817493Sgs150176 * We're done. 604aa817493Sgs150176 * The message can be freed right away, as we've already 605aa817493Sgs150176 * copied the contents ... 606aa817493Sgs150176 */ 607aa817493Sgs150176 freemsg(mp); 608c7fd2ed0Sgs150176 } 609c7fd2ed0Sgs150176 610c7fd2ed0Sgs150176 static boolean_t 611c7fd2ed0Sgs150176 rge_send(rge_t *rgep, mblk_t *mp) 612c7fd2ed0Sgs150176 { 613c7fd2ed0Sgs150176 struct ether_vlan_header *ehp; 614aa817493Sgs150176 uint16_t tci; 615c7fd2ed0Sgs150176 616c7fd2ed0Sgs150176 ASSERT(mp->b_next == NULL); 617c7fd2ed0Sgs150176 618c7fd2ed0Sgs150176 /* 619c7fd2ed0Sgs150176 * Try to reserve a place in the transmit ring. 620c7fd2ed0Sgs150176 */ 621c7fd2ed0Sgs150176 if (!rge_atomic_reserve(&rgep->tx_free, 1)) { 622c7fd2ed0Sgs150176 RGE_DEBUG(("rge_send: no free slots")); 623c7fd2ed0Sgs150176 rgep->stats.defer++; 624c7fd2ed0Sgs150176 rgep->resched_needed = B_TRUE; 625c7fd2ed0Sgs150176 return (B_FALSE); 626c7fd2ed0Sgs150176 } 627c7fd2ed0Sgs150176 628c7fd2ed0Sgs150176 /* 629aa817493Sgs150176 * Determine if the packet is VLAN tagged. 630aa817493Sgs150176 */ 631aa817493Sgs150176 ASSERT(MBLKL(mp) >= sizeof (struct ether_header)); 632aa817493Sgs150176 tci = 0; 633aa817493Sgs150176 ehp = (struct ether_vlan_header *)mp->b_rptr; 634605445d5Sdg199075 if (ehp->ether_tpid == htons(ETHERTYPE_VLAN)) 635aa817493Sgs150176 tci = ntohs(ehp->ether_tci); 636aa817493Sgs150176 637aa817493Sgs150176 /* 638c7fd2ed0Sgs150176 * We've reserved a place :-) 639c7fd2ed0Sgs150176 * These ASSERTions check that our invariants still hold: 640c7fd2ed0Sgs150176 * there must still be at least one free place 641c7fd2ed0Sgs150176 * there must be at least one place NOT free (ours!) 642c7fd2ed0Sgs150176 */ 643c7fd2ed0Sgs150176 ASSERT(rgep->tx_free < RGE_SEND_SLOTS); 644aa817493Sgs150176 rge_send_copy(rgep, mp, tci); 645c7fd2ed0Sgs150176 646c7fd2ed0Sgs150176 /* 647c7fd2ed0Sgs150176 * Trigger chip h/w transmit ... 648c7fd2ed0Sgs150176 */ 649c7fd2ed0Sgs150176 mutex_enter(rgep->tx_lock); 650c7fd2ed0Sgs150176 if (--rgep->tx_flow == 0) { 651c7fd2ed0Sgs150176 DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV); 652c7fd2ed0Sgs150176 rgep->tc_tail = rgep->tx_next; 65322dc2133Smx205022 } 6543a84c50fSWinson Wang - Sun Microsystems - Beijing China rgep->stats.opackets++; 655c7fd2ed0Sgs150176 mutex_exit(rgep->tx_lock); 656c7fd2ed0Sgs150176 657c7fd2ed0Sgs150176 return (B_TRUE); 658c7fd2ed0Sgs150176 } 659c7fd2ed0Sgs150176 660c7fd2ed0Sgs150176 uint_t 661aa817493Sgs150176 rge_reschedule(caddr_t arg1, caddr_t arg2) 662c7fd2ed0Sgs150176 { 663c7fd2ed0Sgs150176 rge_t *rgep; 664c7fd2ed0Sgs150176 665aa817493Sgs150176 rgep = (rge_t *)arg1; 666aa817493Sgs150176 _NOTE(ARGUNUSED(arg2)) 667c7fd2ed0Sgs150176 668aa817493Sgs150176 rge_send_recycle(rgep); 669c7fd2ed0Sgs150176 6703a84c50fSWinson Wang - Sun Microsystems - Beijing China if (rgep->chipid.is_pcie && rgep->tx_free != RGE_SEND_SLOTS) { 6713a84c50fSWinson Wang - Sun Microsystems - Beijing China /* 6723a84c50fSWinson Wang - Sun Microsystems - Beijing China * It's observed that in current Realtek PCI-E chips, tx 6733a84c50fSWinson Wang - Sun Microsystems - Beijing China * request of the second fragment for upper layer packets 6743a84c50fSWinson Wang - Sun Microsystems - Beijing China * will be ignored if the hardware transmission is in 6753a84c50fSWinson Wang - Sun Microsystems - Beijing China * progress and will not be processed when the tx engine 6763a84c50fSWinson Wang - Sun Microsystems - Beijing China * is idle. So one solution is to re-issue the requests 6773a84c50fSWinson Wang - Sun Microsystems - Beijing China * if there are untransmitted packets after tx interrupts 6783a84c50fSWinson Wang - Sun Microsystems - Beijing China * occur. 6793a84c50fSWinson Wang - Sun Microsystems - Beijing China */ 6803a84c50fSWinson Wang - Sun Microsystems - Beijing China rge_tx_trigger(rgep); 6813a84c50fSWinson Wang - Sun Microsystems - Beijing China } 6823a84c50fSWinson Wang - Sun Microsystems - Beijing China 683aa817493Sgs150176 return (DDI_INTR_CLAIMED); 684c7fd2ed0Sgs150176 } 685c7fd2ed0Sgs150176 686c7fd2ed0Sgs150176 /* 687c7fd2ed0Sgs150176 * rge_m_tx() - send a chain of packets 688c7fd2ed0Sgs150176 */ 689c7fd2ed0Sgs150176 mblk_t * 690c7fd2ed0Sgs150176 rge_m_tx(void *arg, mblk_t *mp) 691c7fd2ed0Sgs150176 { 692c7fd2ed0Sgs150176 rge_t *rgep = arg; /* private device info */ 693c7fd2ed0Sgs150176 mblk_t *next; 6943a84c50fSWinson Wang - Sun Microsystems - Beijing China mblk_t *mp_org = mp; 695c7fd2ed0Sgs150176 696c7fd2ed0Sgs150176 ASSERT(mp != NULL); 697c7fd2ed0Sgs150176 698343c2616Smx205022 rw_enter(rgep->errlock, RW_READER); 699343c2616Smx205022 if ((rgep->rge_mac_state != RGE_MAC_STARTED) || 7007b114c4bSWinson Wang - Sun Microsystems - Beijing China (rgep->rge_chip_state != RGE_CHIP_RUNNING) || 7017b114c4bSWinson Wang - Sun Microsystems - Beijing China (rgep->param_link_up != LINK_STATE_UP)) { 702343c2616Smx205022 rw_exit(rgep->errlock); 7037b114c4bSWinson Wang - Sun Microsystems - Beijing China RGE_DEBUG(("rge_m_tx: tx doesn't work")); 7047b114c4bSWinson Wang - Sun Microsystems - Beijing China freemsgchain(mp); 7057b114c4bSWinson Wang - Sun Microsystems - Beijing China return (NULL); 706c7fd2ed0Sgs150176 } 707c7fd2ed0Sgs150176 708c7fd2ed0Sgs150176 while (mp != NULL) { 709c7fd2ed0Sgs150176 next = mp->b_next; 710c7fd2ed0Sgs150176 mp->b_next = NULL; 711c7fd2ed0Sgs150176 712c7fd2ed0Sgs150176 if (!rge_send(rgep, mp)) { 713c7fd2ed0Sgs150176 mp->b_next = next; 714c7fd2ed0Sgs150176 break; 715c7fd2ed0Sgs150176 } 716c7fd2ed0Sgs150176 717c7fd2ed0Sgs150176 mp = next; 718c7fd2ed0Sgs150176 } 7193a84c50fSWinson Wang - Sun Microsystems - Beijing China if (mp != mp_org) { 7203a84c50fSWinson Wang - Sun Microsystems - Beijing China rge_tx_trigger(rgep); 7213a84c50fSWinson Wang - Sun Microsystems - Beijing China } 722c7fd2ed0Sgs150176 rw_exit(rgep->errlock); 723c7fd2ed0Sgs150176 724c7fd2ed0Sgs150176 return (mp); 725c7fd2ed0Sgs150176 } 726