xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-desc.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116 
117 #include "xgbe.h"
118 #include "xgbe-common.h"
119 
120 
121 static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
122 
123 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
124 			   struct xgbe_ring *ring)
125 {
126 	struct xgbe_ring_data *rdata;
127 	unsigned int i;
128 
129 	if (!ring)
130 		return;
131 
132 	if (ring->rdata) {
133 		for (i = 0; i < ring->rdesc_count; i++) {
134 			rdata = XGBE_GET_DESC_DATA(ring, i);
135 			xgbe_unmap_skb(pdata, rdata);
136 		}
137 
138 		kfree(ring->rdata);
139 		ring->rdata = NULL;
140 	}
141 
142 	if (ring->rdesc) {
143 		dma_free_coherent(pdata->dev,
144 				  (sizeof(struct xgbe_ring_desc) *
145 				   ring->rdesc_count),
146 				  ring->rdesc, ring->rdesc_dma);
147 		ring->rdesc = NULL;
148 	}
149 }
150 
151 static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
152 {
153 	struct xgbe_channel *channel;
154 	unsigned int i;
155 
156 	DBGPR("-->xgbe_free_ring_resources\n");
157 
158 	channel = pdata->channel;
159 	for (i = 0; i < pdata->channel_count; i++, channel++) {
160 		xgbe_free_ring(pdata, channel->tx_ring);
161 		xgbe_free_ring(pdata, channel->rx_ring);
162 	}
163 
164 	DBGPR("<--xgbe_free_ring_resources\n");
165 }
166 
167 static int xgbe_init_ring(struct xgbe_prv_data *pdata,
168 			  struct xgbe_ring *ring, unsigned int rdesc_count)
169 {
170 	DBGPR("-->xgbe_init_ring\n");
171 
172 	if (!ring)
173 		return 0;
174 
175 	/* Descriptors */
176 	ring->rdesc_count = rdesc_count;
177 	ring->rdesc = dma_alloc_coherent(pdata->dev,
178 					 (sizeof(struct xgbe_ring_desc) *
179 					  rdesc_count), &ring->rdesc_dma,
180 					 GFP_KERNEL);
181 	if (!ring->rdesc)
182 		return -ENOMEM;
183 
184 	/* Descriptor information */
185 	ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
186 			      GFP_KERNEL);
187 	if (!ring->rdata)
188 		return -ENOMEM;
189 
190 	DBGPR("    rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
191 	      ring->rdesc, ring->rdesc_dma, ring->rdata);
192 
193 	DBGPR("<--xgbe_init_ring\n");
194 
195 	return 0;
196 }
197 
198 static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
199 {
200 	struct xgbe_channel *channel;
201 	unsigned int i;
202 	int ret;
203 
204 	DBGPR("-->xgbe_alloc_ring_resources\n");
205 
206 	channel = pdata->channel;
207 	for (i = 0; i < pdata->channel_count; i++, channel++) {
208 		DBGPR("  %s - tx_ring:\n", channel->name);
209 		ret = xgbe_init_ring(pdata, channel->tx_ring,
210 				     pdata->tx_desc_count);
211 		if (ret) {
212 			netdev_alert(pdata->netdev,
213 				     "error initializing Tx ring\n");
214 			goto err_ring;
215 		}
216 
217 		DBGPR("  %s - rx_ring:\n", channel->name);
218 		ret = xgbe_init_ring(pdata, channel->rx_ring,
219 				     pdata->rx_desc_count);
220 		if (ret) {
221 			netdev_alert(pdata->netdev,
222 				     "error initializing Tx ring\n");
223 			goto err_ring;
224 		}
225 	}
226 
227 	DBGPR("<--xgbe_alloc_ring_resources\n");
228 
229 	return 0;
230 
231 err_ring:
232 	xgbe_free_ring_resources(pdata);
233 
234 	return ret;
235 }
236 
237 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
238 {
239 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
240 	struct xgbe_channel *channel;
241 	struct xgbe_ring *ring;
242 	struct xgbe_ring_data *rdata;
243 	struct xgbe_ring_desc *rdesc;
244 	dma_addr_t rdesc_dma;
245 	unsigned int i, j;
246 
247 	DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
248 
249 	channel = pdata->channel;
250 	for (i = 0; i < pdata->channel_count; i++, channel++) {
251 		ring = channel->tx_ring;
252 		if (!ring)
253 			break;
254 
255 		rdesc = ring->rdesc;
256 		rdesc_dma = ring->rdesc_dma;
257 
258 		for (j = 0; j < ring->rdesc_count; j++) {
259 			rdata = XGBE_GET_DESC_DATA(ring, j);
260 
261 			rdata->rdesc = rdesc;
262 			rdata->rdesc_dma = rdesc_dma;
263 
264 			rdesc++;
265 			rdesc_dma += sizeof(struct xgbe_ring_desc);
266 		}
267 
268 		ring->cur = 0;
269 		ring->dirty = 0;
270 		ring->tx.queue_stopped = 0;
271 
272 		hw_if->tx_desc_init(channel);
273 	}
274 
275 	DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
276 }
277 
278 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
279 {
280 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
281 	struct xgbe_channel *channel;
282 	struct xgbe_ring *ring;
283 	struct xgbe_ring_desc *rdesc;
284 	struct xgbe_ring_data *rdata;
285 	dma_addr_t rdesc_dma, skb_dma;
286 	struct sk_buff *skb = NULL;
287 	unsigned int i, j;
288 
289 	DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
290 
291 	channel = pdata->channel;
292 	for (i = 0; i < pdata->channel_count; i++, channel++) {
293 		ring = channel->rx_ring;
294 		if (!ring)
295 			break;
296 
297 		rdesc = ring->rdesc;
298 		rdesc_dma = ring->rdesc_dma;
299 
300 		for (j = 0; j < ring->rdesc_count; j++) {
301 			rdata = XGBE_GET_DESC_DATA(ring, j);
302 
303 			rdata->rdesc = rdesc;
304 			rdata->rdesc_dma = rdesc_dma;
305 
306 			/* Allocate skb & assign to each rdesc */
307 			skb = dev_alloc_skb(pdata->rx_buf_size);
308 			if (skb == NULL)
309 				break;
310 			skb_dma = dma_map_single(pdata->dev, skb->data,
311 						 pdata->rx_buf_size,
312 						 DMA_FROM_DEVICE);
313 			if (dma_mapping_error(pdata->dev, skb_dma)) {
314 				netdev_alert(pdata->netdev,
315 					     "failed to do the dma map\n");
316 				dev_kfree_skb_any(skb);
317 				break;
318 			}
319 			rdata->skb = skb;
320 			rdata->skb_dma = skb_dma;
321 			rdata->skb_dma_len = pdata->rx_buf_size;
322 
323 			rdesc++;
324 			rdesc_dma += sizeof(struct xgbe_ring_desc);
325 		}
326 
327 		ring->cur = 0;
328 		ring->dirty = 0;
329 		ring->rx.realloc_index = 0;
330 		ring->rx.realloc_threshold = 0;
331 
332 		hw_if->rx_desc_init(channel);
333 	}
334 
335 	DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
336 }
337 
338 static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
339 			   struct xgbe_ring_data *rdata)
340 {
341 	if (rdata->skb_dma) {
342 		if (rdata->mapped_as_page) {
343 			dma_unmap_page(pdata->dev, rdata->skb_dma,
344 				       rdata->skb_dma_len, DMA_TO_DEVICE);
345 		} else {
346 			dma_unmap_single(pdata->dev, rdata->skb_dma,
347 					 rdata->skb_dma_len, DMA_TO_DEVICE);
348 		}
349 		rdata->skb_dma = 0;
350 		rdata->skb_dma_len = 0;
351 	}
352 
353 	if (rdata->skb) {
354 		dev_kfree_skb_any(rdata->skb);
355 		rdata->skb = NULL;
356 	}
357 
358 	rdata->tso_header = 0;
359 	rdata->len = 0;
360 	rdata->interrupt = 0;
361 	rdata->mapped_as_page = 0;
362 
363 	if (rdata->state_saved) {
364 		rdata->state_saved = 0;
365 		rdata->state.incomplete = 0;
366 		rdata->state.context_next = 0;
367 		rdata->state.skb = NULL;
368 		rdata->state.len = 0;
369 		rdata->state.error = 0;
370 	}
371 }
372 
373 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
374 {
375 	struct xgbe_prv_data *pdata = channel->pdata;
376 	struct xgbe_ring *ring = channel->tx_ring;
377 	struct xgbe_ring_data *rdata;
378 	struct xgbe_packet_data *packet;
379 	struct skb_frag_struct *frag;
380 	dma_addr_t skb_dma;
381 	unsigned int start_index, cur_index;
382 	unsigned int offset, tso, vlan, datalen, len;
383 	unsigned int i;
384 
385 	DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
386 
387 	offset = 0;
388 	start_index = ring->cur;
389 	cur_index = ring->cur;
390 
391 	packet = &ring->packet_data;
392 	packet->rdesc_count = 0;
393 	packet->length = 0;
394 
395 	tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
396 			     TSO_ENABLE);
397 	vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
398 			      VLAN_CTAG);
399 
400 	/* Save space for a context descriptor if needed */
401 	if ((tso && (packet->mss != ring->tx.cur_mss)) ||
402 	    (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
403 		cur_index++;
404 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
405 
406 	if (tso) {
407 		DBGPR("  TSO packet\n");
408 
409 		/* Map the TSO header */
410 		skb_dma = dma_map_single(pdata->dev, skb->data,
411 					 packet->header_len, DMA_TO_DEVICE);
412 		if (dma_mapping_error(pdata->dev, skb_dma)) {
413 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
414 			goto err_out;
415 		}
416 		rdata->skb_dma = skb_dma;
417 		rdata->skb_dma_len = packet->header_len;
418 		rdata->tso_header = 1;
419 
420 		offset = packet->header_len;
421 
422 		packet->length += packet->header_len;
423 
424 		cur_index++;
425 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
426 	}
427 
428 	/* Map the (remainder of the) packet */
429 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
430 		len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
431 
432 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
433 					 DMA_TO_DEVICE);
434 		if (dma_mapping_error(pdata->dev, skb_dma)) {
435 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
436 			goto err_out;
437 		}
438 		rdata->skb_dma = skb_dma;
439 		rdata->skb_dma_len = len;
440 		DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
441 		      cur_index, skb_dma, len);
442 
443 		datalen -= len;
444 		offset += len;
445 
446 		packet->length += len;
447 
448 		cur_index++;
449 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
450 	}
451 
452 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
453 		DBGPR("  mapping frag %u\n", i);
454 
455 		frag = &skb_shinfo(skb)->frags[i];
456 		offset = 0;
457 
458 		for (datalen = skb_frag_size(frag); datalen; ) {
459 			len = min_t(unsigned int, datalen,
460 				    XGBE_TX_MAX_BUF_SIZE);
461 
462 			skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
463 						   len, DMA_TO_DEVICE);
464 			if (dma_mapping_error(pdata->dev, skb_dma)) {
465 				netdev_alert(pdata->netdev,
466 					     "skb_frag_dma_map failed\n");
467 				goto err_out;
468 			}
469 			rdata->skb_dma = skb_dma;
470 			rdata->skb_dma_len = len;
471 			rdata->mapped_as_page = 1;
472 			DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
473 			      cur_index, skb_dma, len);
474 
475 			datalen -= len;
476 			offset += len;
477 
478 			packet->length += len;
479 
480 			cur_index++;
481 			rdata = XGBE_GET_DESC_DATA(ring, cur_index);
482 		}
483 	}
484 
485 	/* Save the skb address in the last entry */
486 	rdata->skb = skb;
487 
488 	/* Save the number of descriptor entries used */
489 	packet->rdesc_count = cur_index - start_index;
490 
491 	DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
492 
493 	return packet->rdesc_count;
494 
495 err_out:
496 	while (start_index < cur_index) {
497 		rdata = XGBE_GET_DESC_DATA(ring, start_index++);
498 		xgbe_unmap_skb(pdata, rdata);
499 	}
500 
501 	DBGPR("<--xgbe_map_tx_skb: count=0\n");
502 
503 	return 0;
504 }
505 
506 static void xgbe_realloc_skb(struct xgbe_channel *channel)
507 {
508 	struct xgbe_prv_data *pdata = channel->pdata;
509 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
510 	struct xgbe_ring *ring = channel->rx_ring;
511 	struct xgbe_ring_data *rdata;
512 	struct sk_buff *skb = NULL;
513 	dma_addr_t skb_dma;
514 	int i;
515 
516 	DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
517 	      ring->rx.realloc_index);
518 
519 	for (i = 0; i < ring->dirty; i++) {
520 		rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
521 
522 		/* Reset rdata values */
523 		xgbe_unmap_skb(pdata, rdata);
524 
525 		/* Allocate skb & assign to each rdesc */
526 		skb = dev_alloc_skb(pdata->rx_buf_size);
527 		if (skb == NULL) {
528 			netdev_alert(pdata->netdev,
529 				     "failed to allocate skb\n");
530 			break;
531 		}
532 		skb_dma = dma_map_single(pdata->dev, skb->data,
533 					 pdata->rx_buf_size, DMA_FROM_DEVICE);
534 		if (dma_mapping_error(pdata->dev, skb_dma)) {
535 			netdev_alert(pdata->netdev,
536 				     "failed to do the dma map\n");
537 			dev_kfree_skb_any(skb);
538 			break;
539 		}
540 		rdata->skb = skb;
541 		rdata->skb_dma = skb_dma;
542 		rdata->skb_dma_len = pdata->rx_buf_size;
543 
544 		hw_if->rx_desc_reset(rdata);
545 
546 		ring->rx.realloc_index++;
547 	}
548 	ring->dirty = 0;
549 
550 	DBGPR("<--xgbe_realloc_skb\n");
551 }
552 
553 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
554 {
555 	DBGPR("-->xgbe_init_function_ptrs_desc\n");
556 
557 	desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
558 	desc_if->free_ring_resources = xgbe_free_ring_resources;
559 	desc_if->map_tx_skb = xgbe_map_tx_skb;
560 	desc_if->realloc_skb = xgbe_realloc_skb;
561 	desc_if->unmap_skb = xgbe_unmap_skb;
562 	desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
563 	desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
564 
565 	DBGPR("<--xgbe_init_function_ptrs_desc\n");
566 }
567