xref: /titanic_51/usr/src/uts/common/io/ixgbe/ixgbe_rx.c (revision 0d166b18feda26f6f45f5be1c0c8c5e539b90e6c)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 /* function prototypes */
31 static mblk_t *ixgbe_rx_bind(ixgbe_rx_data_t *, uint32_t, uint32_t);
32 static mblk_t *ixgbe_rx_copy(ixgbe_rx_data_t *, uint32_t, uint32_t);
33 static void ixgbe_rx_assoc_hcksum(mblk_t *, uint32_t);
34 
35 #ifndef IXGBE_DEBUG
36 #pragma inline(ixgbe_rx_assoc_hcksum)
37 #endif
38 
39 /*
40  * ixgbe_rx_recycle - The call-back function to reclaim rx buffer.
41  *
42  * This function is called when an mp is freed by the user thru
43  * freeb call (Only for mp constructed through desballoc call).
44  * It returns back the freed buffer to the free list.
45  */
46 void
47 ixgbe_rx_recycle(caddr_t arg)
48 {
49 	ixgbe_t *ixgbe;
50 	ixgbe_rx_ring_t *rx_ring;
51 	ixgbe_rx_data_t	*rx_data;
52 	rx_control_block_t *recycle_rcb;
53 	uint32_t free_index;
54 	uint32_t ref_cnt;
55 
56 	recycle_rcb = (rx_control_block_t *)(uintptr_t)arg;
57 	rx_data = recycle_rcb->rx_data;
58 	rx_ring = rx_data->rx_ring;
59 	ixgbe = rx_ring->ixgbe;
60 
61 	if (recycle_rcb->ref_cnt == 0) {
62 		/*
63 		 * This case only happens when rx buffers are being freed
64 		 * in ixgbe_stop() and freemsg() is called.
65 		 */
66 		return;
67 	}
68 
69 	ASSERT(recycle_rcb->mp == NULL);
70 
71 	/*
72 	 * Using the recycled data buffer to generate a new mblk
73 	 */
74 	recycle_rcb->mp = desballoc((unsigned char *)
75 	    recycle_rcb->rx_buf.address,
76 	    recycle_rcb->rx_buf.size,
77 	    0, &recycle_rcb->free_rtn);
78 
79 	/*
80 	 * Put the recycled rx control block into free list
81 	 */
82 	mutex_enter(&rx_data->recycle_lock);
83 
84 	free_index = rx_data->rcb_tail;
85 	ASSERT(rx_data->free_list[free_index] == NULL);
86 
87 	rx_data->free_list[free_index] = recycle_rcb;
88 	rx_data->rcb_tail = NEXT_INDEX(free_index, 1, rx_data->free_list_size);
89 
90 	mutex_exit(&rx_data->recycle_lock);
91 
92 	/*
93 	 * The atomic operation on the number of the available rx control
94 	 * blocks in the free list is used to make the recycling mutual
95 	 * exclusive with the receiving.
96 	 */
97 	atomic_inc_32(&rx_data->rcb_free);
98 	ASSERT(rx_data->rcb_free <= rx_data->free_list_size);
99 
100 	/*
101 	 * Considering the case that the interface is unplumbed
102 	 * and there are still some buffers held by the upper layer.
103 	 * When the buffer is returned back, we need to free it.
104 	 */
105 	ref_cnt = atomic_dec_32_nv(&recycle_rcb->ref_cnt);
106 	if (ref_cnt == 0) {
107 		if (recycle_rcb->mp != NULL) {
108 			freemsg(recycle_rcb->mp);
109 			recycle_rcb->mp = NULL;
110 		}
111 
112 		ixgbe_free_dma_buffer(&recycle_rcb->rx_buf);
113 
114 		mutex_enter(&ixgbe->rx_pending_lock);
115 		atomic_dec_32(&rx_data->rcb_pending);
116 		atomic_dec_32(&ixgbe->rcb_pending);
117 
118 		/*
119 		 * When there is not any buffer belonging to this rx_data
120 		 * held by the upper layer, the rx_data can be freed.
121 		 */
122 		if ((rx_data->flag & IXGBE_RX_STOPPED) &&
123 		    (rx_data->rcb_pending == 0))
124 			ixgbe_free_rx_ring_data(rx_data);
125 
126 		mutex_exit(&ixgbe->rx_pending_lock);
127 	}
128 }
129 
130 /*
131  * ixgbe_rx_copy - Use copy to process the received packet.
132  *
133  * This function will use bcopy to process the packet
134  * and send the copied packet upstream.
135  */
136 static mblk_t *
137 ixgbe_rx_copy(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len)
138 {
139 	ixgbe_t *ixgbe;
140 	rx_control_block_t *current_rcb;
141 	mblk_t *mp;
142 
143 	ixgbe = rx_data->rx_ring->ixgbe;
144 	current_rcb = rx_data->work_list[index];
145 
146 	DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
147 
148 	if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
149 	    DDI_FM_OK) {
150 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
151 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
152 		return (NULL);
153 	}
154 
155 	/*
156 	 * Allocate buffer to receive this packet
157 	 */
158 	mp = allocb(pkt_len + IPHDR_ALIGN_ROOM, 0);
159 	if (mp == NULL) {
160 		ixgbe_log(ixgbe, "ixgbe_rx_copy: allocate buffer failed");
161 		return (NULL);
162 	}
163 
164 	/*
165 	 * Copy the data received into the new cluster
166 	 */
167 	mp->b_rptr += IPHDR_ALIGN_ROOM;
168 	bcopy(current_rcb->rx_buf.address, mp->b_rptr, pkt_len);
169 	mp->b_wptr = mp->b_rptr + pkt_len;
170 
171 	return (mp);
172 }
173 
174 /*
175  * ixgbe_rx_bind - Use existing DMA buffer to build mblk for receiving.
176  *
177  * This function will use pre-bound DMA buffer to receive the packet
178  * and build mblk that will be sent upstream.
179  */
180 static mblk_t *
181 ixgbe_rx_bind(ixgbe_rx_data_t *rx_data, uint32_t index, uint32_t pkt_len)
182 {
183 	rx_control_block_t *current_rcb;
184 	rx_control_block_t *free_rcb;
185 	uint32_t free_index;
186 	mblk_t *mp;
187 	ixgbe_t	*ixgbe = rx_data->rx_ring->ixgbe;
188 
189 	/*
190 	 * If the free list is empty, we cannot proceed to send
191 	 * the current DMA buffer upstream. We'll have to return
192 	 * and use bcopy to process the packet.
193 	 */
194 	if (ixgbe_atomic_reserve(&rx_data->rcb_free, 1) < 0)
195 		return (NULL);
196 
197 	current_rcb = rx_data->work_list[index];
198 	/*
199 	 * If the mp of the rx control block is NULL, try to do
200 	 * desballoc again.
201 	 */
202 	if (current_rcb->mp == NULL) {
203 		current_rcb->mp = desballoc((unsigned char *)
204 		    current_rcb->rx_buf.address,
205 		    current_rcb->rx_buf.size,
206 		    0, &current_rcb->free_rtn);
207 		/*
208 		 * If it is failed to built a mblk using the current
209 		 * DMA buffer, we have to return and use bcopy to
210 		 * process the packet.
211 		 */
212 		if (current_rcb->mp == NULL) {
213 			atomic_inc_32(&rx_data->rcb_free);
214 			return (NULL);
215 		}
216 	}
217 	/*
218 	 * Sync up the data received
219 	 */
220 	DMA_SYNC(&current_rcb->rx_buf, DDI_DMA_SYNC_FORKERNEL);
221 
222 	if (ixgbe_check_dma_handle(current_rcb->rx_buf.dma_handle) !=
223 	    DDI_FM_OK) {
224 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
225 		atomic_inc_32(&rx_data->rcb_free);
226 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
227 		return (NULL);
228 	}
229 
230 	mp = current_rcb->mp;
231 	current_rcb->mp = NULL;
232 	atomic_inc_32(&current_rcb->ref_cnt);
233 
234 	mp->b_wptr = mp->b_rptr + pkt_len;
235 	mp->b_next = mp->b_cont = NULL;
236 
237 	/*
238 	 * Strip off one free rx control block from the free list
239 	 */
240 	free_index = rx_data->rcb_head;
241 	free_rcb = rx_data->free_list[free_index];
242 	ASSERT(free_rcb != NULL);
243 	rx_data->free_list[free_index] = NULL;
244 	rx_data->rcb_head = NEXT_INDEX(free_index, 1, rx_data->free_list_size);
245 
246 	/*
247 	 * Put the rx control block to the work list
248 	 */
249 	rx_data->work_list[index] = free_rcb;
250 
251 	return (mp);
252 }
253 
254 /*
255  * ixgbe_rx_assoc_hcksum - Check the rx hardware checksum status and associate
256  * the hcksum flags.
257  */
258 static void
259 ixgbe_rx_assoc_hcksum(mblk_t *mp, uint32_t status_error)
260 {
261 	uint32_t hcksum_flags = 0;
262 
263 	/*
264 	 * Check TCP/UDP checksum
265 	 */
266 	if ((status_error & IXGBE_RXD_STAT_L4CS) &&
267 	    !(status_error & IXGBE_RXDADV_ERR_TCPE))
268 		hcksum_flags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
269 
270 	/*
271 	 * Check IP Checksum
272 	 */
273 	if ((status_error & IXGBE_RXD_STAT_IPCS) &&
274 	    !(status_error & IXGBE_RXDADV_ERR_IPE))
275 		hcksum_flags |= HCK_IPV4_HDRCKSUM;
276 
277 	if (hcksum_flags != 0) {
278 		(void) hcksum_assoc(mp,
279 		    NULL, NULL, 0, 0, 0, 0, hcksum_flags, 0);
280 	}
281 }
282 
283 /*
284  * ixgbe_ring_rx - Receive the data of one ring.
285  *
286  * This function goes throught h/w descriptor in one specified rx ring,
287  * receives the data if the descriptor status shows the data is ready.
288  * It returns a chain of mblks containing the received data, to be
289  * passed up to mac_rx().
290  */
291 mblk_t *
292 ixgbe_ring_rx(ixgbe_rx_ring_t *rx_ring, int poll_bytes)
293 {
294 	union ixgbe_adv_rx_desc *current_rbd;
295 	rx_control_block_t *current_rcb;
296 	mblk_t *mp;
297 	mblk_t *mblk_head;
298 	mblk_t **mblk_tail;
299 	uint32_t rx_next;
300 	uint32_t rx_tail;
301 	uint32_t pkt_len;
302 	uint32_t status_error;
303 	uint32_t pkt_num;
304 	uint32_t received_bytes;
305 	ixgbe_t *ixgbe = rx_ring->ixgbe;
306 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
307 
308 	if ((ixgbe->ixgbe_state & IXGBE_SUSPENDED) ||
309 	    (ixgbe->ixgbe_state & IXGBE_ERROR) ||
310 	    !(ixgbe->ixgbe_state & IXGBE_STARTED))
311 		return (NULL);
312 
313 	mblk_head = NULL;
314 	mblk_tail = &mblk_head;
315 
316 	/*
317 	 * Sync the receive descriptors before accepting the packets
318 	 */
319 	DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORKERNEL);
320 
321 	if (ixgbe_check_dma_handle(rx_data->rbd_area.dma_handle) != DDI_FM_OK) {
322 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
323 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
324 		return (NULL);
325 	}
326 
327 	/*
328 	 * Get the start point of rx bd ring which should be examined
329 	 * during this cycle.
330 	 */
331 	rx_next = rx_data->rbd_next;
332 
333 	current_rbd = &rx_data->rbd_ring[rx_next];
334 	received_bytes = 0;
335 	pkt_num = 0;
336 	status_error = current_rbd->wb.upper.status_error;
337 	while (status_error & IXGBE_RXD_STAT_DD) {
338 		/*
339 		 * If adapter has found errors, but the error
340 		 * is hardware checksum error, this does not discard the
341 		 * packet: let upper layer compute the checksum;
342 		 * Otherwise discard the packet.
343 		 */
344 		if ((status_error & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) ||
345 		    !(status_error & IXGBE_RXD_STAT_EOP)) {
346 			IXGBE_DEBUG_STAT(rx_ring->stat_frame_error);
347 			goto rx_discard;
348 		}
349 
350 		IXGBE_DEBUG_STAT_COND(rx_ring->stat_cksum_error,
351 		    (status_error & IXGBE_RXDADV_ERR_TCPE) ||
352 		    (status_error & IXGBE_RXDADV_ERR_IPE));
353 
354 		pkt_len = current_rbd->wb.upper.length;
355 
356 		if ((poll_bytes != IXGBE_POLL_NULL) &&
357 		    ((received_bytes + pkt_len) > poll_bytes))
358 			break;
359 
360 		received_bytes += pkt_len;
361 
362 		mp = NULL;
363 		/*
364 		 * For packets with length more than the copy threshold,
365 		 * we'll first try to use the existing DMA buffer to build
366 		 * an mblk and send the mblk upstream.
367 		 *
368 		 * If the first method fails, or the packet length is less
369 		 * than the copy threshold, we'll allocate a new mblk and
370 		 * copy the packet data to the new mblk.
371 		 */
372 		if (pkt_len > ixgbe->rx_copy_thresh)
373 			mp = ixgbe_rx_bind(rx_data, rx_next, pkt_len);
374 
375 		if (mp == NULL)
376 			mp = ixgbe_rx_copy(rx_data, rx_next, pkt_len);
377 
378 		if (mp != NULL) {
379 			/*
380 			 * Check h/w checksum offload status
381 			 */
382 			if (ixgbe->rx_hcksum_enable)
383 				ixgbe_rx_assoc_hcksum(mp, status_error);
384 
385 			*mblk_tail = mp;
386 			mblk_tail = &mp->b_next;
387 		}
388 
389 rx_discard:
390 		/*
391 		 * Reset rx descriptor read bits
392 		 */
393 		current_rcb = rx_data->work_list[rx_next];
394 		current_rbd->read.pkt_addr = current_rcb->rx_buf.dma_address;
395 		current_rbd->read.hdr_addr = 0;
396 
397 		rx_next = NEXT_INDEX(rx_next, 1, rx_data->ring_size);
398 
399 		/*
400 		 * The receive function is in interrupt context, so here
401 		 * rx_limit_per_intr is used to avoid doing receiving too long
402 		 * per interrupt.
403 		 */
404 		if (++pkt_num > ixgbe->rx_limit_per_intr) {
405 			IXGBE_DEBUG_STAT(rx_ring->stat_exceed_pkt);
406 			break;
407 		}
408 
409 		current_rbd = &rx_data->rbd_ring[rx_next];
410 		status_error = current_rbd->wb.upper.status_error;
411 	}
412 
413 	DMA_SYNC(&rx_data->rbd_area, DDI_DMA_SYNC_FORDEV);
414 
415 	rx_data->rbd_next = rx_next;
416 
417 	/*
418 	 * Update the h/w tail accordingly
419 	 */
420 	rx_tail = PREV_INDEX(rx_next, 1, rx_data->ring_size);
421 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_RDT(rx_ring->index), rx_tail);
422 
423 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
424 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
425 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
426 	}
427 
428 	return (mblk_head);
429 }
430 
431 mblk_t *
432 ixgbe_ring_rx_poll(void *arg, int n_bytes)
433 {
434 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)arg;
435 	mblk_t *mp = NULL;
436 
437 	ASSERT(n_bytes >= 0);
438 
439 	if (n_bytes == 0)
440 		return (NULL);
441 
442 	mutex_enter(&rx_ring->rx_lock);
443 	mp = ixgbe_ring_rx(rx_ring, n_bytes);
444 	mutex_exit(&rx_ring->rx_lock);
445 
446 	return (mp);
447 }
448