xref: /linux/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
2  *
3  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
4  *
5  * This program is dual-licensed; you may select either version 2 of
6  * the GNU General Public License ("GPL") or BSD license ("BSD").
7  *
8  * This Synopsys DWC XLGMAC software driver and associated documentation
9  * (hereinafter the "Software") is an unsupported proprietary work of
10  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
11  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
12  * Licensed Product under any End User Software License Agreement or
13  * Agreement for Licensed Products with Synopsys or any supplement thereto.
14  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
15  * in the SOFTWARE may be the trademarks of their respective owners.
16  */
17 
18 #include "dwc-xlgmac.h"
19 #include "dwc-xlgmac-reg.h"
20 
21 static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
22 				   struct xlgmac_desc_data *desc_data)
23 {
24 	if (desc_data->skb_dma) {
25 		if (desc_data->mapped_as_page) {
26 			dma_unmap_page(pdata->dev, desc_data->skb_dma,
27 				       desc_data->skb_dma_len, DMA_TO_DEVICE);
28 		} else {
29 			dma_unmap_single(pdata->dev, desc_data->skb_dma,
30 					 desc_data->skb_dma_len, DMA_TO_DEVICE);
31 		}
32 		desc_data->skb_dma = 0;
33 		desc_data->skb_dma_len = 0;
34 	}
35 
36 	if (desc_data->skb) {
37 		dev_kfree_skb_any(desc_data->skb);
38 		desc_data->skb = NULL;
39 	}
40 
41 	if (desc_data->rx.hdr.pa.pages)
42 		put_page(desc_data->rx.hdr.pa.pages);
43 
44 	if (desc_data->rx.hdr.pa_unmap.pages) {
45 		dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
46 			       desc_data->rx.hdr.pa_unmap.pages_len,
47 			       DMA_FROM_DEVICE);
48 		put_page(desc_data->rx.hdr.pa_unmap.pages);
49 	}
50 
51 	if (desc_data->rx.buf.pa.pages)
52 		put_page(desc_data->rx.buf.pa.pages);
53 
54 	if (desc_data->rx.buf.pa_unmap.pages) {
55 		dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
56 			       desc_data->rx.buf.pa_unmap.pages_len,
57 			       DMA_FROM_DEVICE);
58 		put_page(desc_data->rx.buf.pa_unmap.pages);
59 	}
60 
61 	memset(&desc_data->tx, 0, sizeof(desc_data->tx));
62 	memset(&desc_data->rx, 0, sizeof(desc_data->rx));
63 
64 	desc_data->mapped_as_page = 0;
65 
66 	if (desc_data->state_saved) {
67 		desc_data->state_saved = 0;
68 		desc_data->state.skb = NULL;
69 		desc_data->state.len = 0;
70 		desc_data->state.error = 0;
71 	}
72 }
73 
74 static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
75 			     struct xlgmac_ring *ring)
76 {
77 	struct xlgmac_desc_data *desc_data;
78 	unsigned int i;
79 
80 	if (!ring)
81 		return;
82 
83 	if (ring->desc_data_head) {
84 		for (i = 0; i < ring->dma_desc_count; i++) {
85 			desc_data = XLGMAC_GET_DESC_DATA(ring, i);
86 			xlgmac_unmap_desc_data(pdata, desc_data);
87 		}
88 
89 		kfree(ring->desc_data_head);
90 		ring->desc_data_head = NULL;
91 	}
92 
93 	if (ring->rx_hdr_pa.pages) {
94 		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
95 			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
96 		put_page(ring->rx_hdr_pa.pages);
97 
98 		ring->rx_hdr_pa.pages = NULL;
99 		ring->rx_hdr_pa.pages_len = 0;
100 		ring->rx_hdr_pa.pages_offset = 0;
101 		ring->rx_hdr_pa.pages_dma = 0;
102 	}
103 
104 	if (ring->rx_buf_pa.pages) {
105 		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
106 			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
107 		put_page(ring->rx_buf_pa.pages);
108 
109 		ring->rx_buf_pa.pages = NULL;
110 		ring->rx_buf_pa.pages_len = 0;
111 		ring->rx_buf_pa.pages_offset = 0;
112 		ring->rx_buf_pa.pages_dma = 0;
113 	}
114 
115 	if (ring->dma_desc_head) {
116 		dma_free_coherent(pdata->dev,
117 				  (sizeof(struct xlgmac_dma_desc) *
118 				  ring->dma_desc_count),
119 				  ring->dma_desc_head,
120 				  ring->dma_desc_head_addr);
121 		ring->dma_desc_head = NULL;
122 	}
123 }
124 
125 static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
126 			    struct xlgmac_ring *ring,
127 			    unsigned int dma_desc_count)
128 {
129 	if (!ring)
130 		return 0;
131 
132 	/* Descriptors */
133 	ring->dma_desc_count = dma_desc_count;
134 	ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
135 					(sizeof(struct xlgmac_dma_desc) *
136 					 dma_desc_count),
137 					&ring->dma_desc_head_addr,
138 					GFP_KERNEL);
139 	if (!ring->dma_desc_head)
140 		return -ENOMEM;
141 
142 	/* Array of descriptor data */
143 	ring->desc_data_head = kcalloc(dma_desc_count,
144 					sizeof(struct xlgmac_desc_data),
145 					GFP_KERNEL);
146 	if (!ring->desc_data_head)
147 		return -ENOMEM;
148 
149 	netif_dbg(pdata, drv, pdata->netdev,
150 		  "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
151 		ring->dma_desc_head,
152 		&ring->dma_desc_head_addr,
153 		ring->desc_data_head);
154 
155 	return 0;
156 }
157 
158 static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
159 {
160 	struct xlgmac_channel *channel;
161 	unsigned int i;
162 
163 	if (!pdata->channel_head)
164 		return;
165 
166 	channel = pdata->channel_head;
167 	for (i = 0; i < pdata->channel_count; i++, channel++) {
168 		xlgmac_free_ring(pdata, channel->tx_ring);
169 		xlgmac_free_ring(pdata, channel->rx_ring);
170 	}
171 }
172 
173 static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
174 {
175 	struct xlgmac_channel *channel;
176 	unsigned int i;
177 	int ret;
178 
179 	channel = pdata->channel_head;
180 	for (i = 0; i < pdata->channel_count; i++, channel++) {
181 		netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
182 			  channel->name);
183 
184 		ret = xlgmac_init_ring(pdata, channel->tx_ring,
185 				       pdata->tx_desc_count);
186 
187 		if (ret) {
188 			netdev_alert(pdata->netdev,
189 				     "error initializing Tx ring");
190 			goto err_init_ring;
191 		}
192 
193 		netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
194 			  channel->name);
195 
196 		ret = xlgmac_init_ring(pdata, channel->rx_ring,
197 				       pdata->rx_desc_count);
198 		if (ret) {
199 			netdev_alert(pdata->netdev,
200 				     "error initializing Rx ring\n");
201 			goto err_init_ring;
202 		}
203 	}
204 
205 	return 0;
206 
207 err_init_ring:
208 	xlgmac_free_rings(pdata);
209 
210 	return ret;
211 }
212 
213 static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
214 {
215 	if (!pdata->channel_head)
216 		return;
217 
218 	kfree(pdata->channel_head->tx_ring);
219 	pdata->channel_head->tx_ring = NULL;
220 
221 	kfree(pdata->channel_head->rx_ring);
222 	pdata->channel_head->rx_ring = NULL;
223 
224 	kfree(pdata->channel_head);
225 
226 	pdata->channel_head = NULL;
227 	pdata->channel_count = 0;
228 }
229 
230 static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
231 {
232 	struct xlgmac_channel *channel_head, *channel;
233 	struct xlgmac_ring *tx_ring, *rx_ring;
234 	int ret = -ENOMEM;
235 	unsigned int i;
236 
237 	channel_head = kcalloc(pdata->channel_count,
238 			       sizeof(struct xlgmac_channel), GFP_KERNEL);
239 	if (!channel_head)
240 		return ret;
241 
242 	netif_dbg(pdata, drv, pdata->netdev,
243 		  "channel_head=%p\n", channel_head);
244 
245 	tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
246 			  GFP_KERNEL);
247 	if (!tx_ring)
248 		goto err_tx_ring;
249 
250 	rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
251 			  GFP_KERNEL);
252 	if (!rx_ring)
253 		goto err_rx_ring;
254 
255 	for (i = 0, channel = channel_head; i < pdata->channel_count;
256 		i++, channel++) {
257 		snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
258 		channel->pdata = pdata;
259 		channel->queue_index = i;
260 		channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
261 				    (DMA_CH_INC * i);
262 
263 		if (pdata->per_channel_irq) {
264 			/* Get the per DMA interrupt */
265 			ret = pdata->channel_irq[i];
266 			if (ret < 0) {
267 				netdev_err(pdata->netdev,
268 					   "get_irq %u failed\n",
269 					   i + 1);
270 				goto err_irq;
271 			}
272 			channel->dma_irq = ret;
273 		}
274 
275 		if (i < pdata->tx_ring_count)
276 			channel->tx_ring = tx_ring++;
277 
278 		if (i < pdata->rx_ring_count)
279 			channel->rx_ring = rx_ring++;
280 
281 		netif_dbg(pdata, drv, pdata->netdev,
282 			  "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
283 			  channel->name, channel->dma_regs,
284 			  channel->tx_ring, channel->rx_ring);
285 	}
286 
287 	pdata->channel_head = channel_head;
288 
289 	return 0;
290 
291 err_irq:
292 	kfree(rx_ring);
293 
294 err_rx_ring:
295 	kfree(tx_ring);
296 
297 err_tx_ring:
298 	kfree(channel_head);
299 
300 	return ret;
301 }
302 
303 static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
304 {
305 	xlgmac_free_rings(pdata);
306 
307 	xlgmac_free_channels(pdata);
308 }
309 
310 static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
311 {
312 	int ret;
313 
314 	ret = xlgmac_alloc_channels(pdata);
315 	if (ret)
316 		goto err_alloc;
317 
318 	ret = xlgmac_alloc_rings(pdata);
319 	if (ret)
320 		goto err_alloc;
321 
322 	return 0;
323 
324 err_alloc:
325 	xlgmac_free_channels_and_rings(pdata);
326 
327 	return ret;
328 }
329 
330 static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
331 			      struct xlgmac_page_alloc *pa,
332 			      gfp_t gfp, int order)
333 {
334 	struct page *pages = NULL;
335 	dma_addr_t pages_dma;
336 
337 	/* Try to obtain pages, decreasing order if necessary */
338 	gfp |= __GFP_COMP | __GFP_NOWARN;
339 	while (order >= 0) {
340 		pages = alloc_pages(gfp, order);
341 		if (pages)
342 			break;
343 
344 		order--;
345 	}
346 	if (!pages)
347 		return -ENOMEM;
348 
349 	/* Map the pages */
350 	pages_dma = dma_map_page(pdata->dev, pages, 0,
351 				 PAGE_SIZE << order, DMA_FROM_DEVICE);
352 	if (dma_mapping_error(pdata->dev, pages_dma)) {
353 		put_page(pages);
354 		return -ENOMEM;
355 	}
356 
357 	pa->pages = pages;
358 	pa->pages_len = PAGE_SIZE << order;
359 	pa->pages_offset = 0;
360 	pa->pages_dma = pages_dma;
361 
362 	return 0;
363 }
364 
365 static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
366 				   struct xlgmac_page_alloc *pa,
367 				   unsigned int len)
368 {
369 	get_page(pa->pages);
370 	bd->pa = *pa;
371 
372 	bd->dma_base = pa->pages_dma;
373 	bd->dma_off = pa->pages_offset;
374 	bd->dma_len = len;
375 
376 	pa->pages_offset += len;
377 	if ((pa->pages_offset + len) > pa->pages_len) {
378 		/* This data descriptor is responsible for unmapping page(s) */
379 		bd->pa_unmap = *pa;
380 
381 		/* Get a new allocation next time */
382 		pa->pages = NULL;
383 		pa->pages_len = 0;
384 		pa->pages_offset = 0;
385 		pa->pages_dma = 0;
386 	}
387 }
388 
389 static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
390 				struct xlgmac_ring *ring,
391 				struct xlgmac_desc_data *desc_data)
392 {
393 	int order, ret;
394 
395 	if (!ring->rx_hdr_pa.pages) {
396 		ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
397 					 GFP_ATOMIC, 0);
398 		if (ret)
399 			return ret;
400 	}
401 
402 	if (!ring->rx_buf_pa.pages) {
403 		order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
404 		ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
405 					 GFP_ATOMIC, order);
406 		if (ret)
407 			return ret;
408 	}
409 
410 	/* Set up the header page info */
411 	xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
412 			       XLGMAC_SKB_ALLOC_SIZE);
413 
414 	/* Set up the buffer page info */
415 	xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
416 			       pdata->rx_buf_size);
417 
418 	return 0;
419 }
420 
421 static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
422 {
423 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
424 	struct xlgmac_desc_data *desc_data;
425 	struct xlgmac_dma_desc *dma_desc;
426 	struct xlgmac_channel *channel;
427 	struct xlgmac_ring *ring;
428 	dma_addr_t dma_desc_addr;
429 	unsigned int i, j;
430 
431 	channel = pdata->channel_head;
432 	for (i = 0; i < pdata->channel_count; i++, channel++) {
433 		ring = channel->tx_ring;
434 		if (!ring)
435 			break;
436 
437 		dma_desc = ring->dma_desc_head;
438 		dma_desc_addr = ring->dma_desc_head_addr;
439 
440 		for (j = 0; j < ring->dma_desc_count; j++) {
441 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
442 
443 			desc_data->dma_desc = dma_desc;
444 			desc_data->dma_desc_addr = dma_desc_addr;
445 
446 			dma_desc++;
447 			dma_desc_addr += sizeof(struct xlgmac_dma_desc);
448 		}
449 
450 		ring->cur = 0;
451 		ring->dirty = 0;
452 		memset(&ring->tx, 0, sizeof(ring->tx));
453 
454 		hw_ops->tx_desc_init(channel);
455 	}
456 }
457 
458 static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
459 {
460 	struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
461 	struct xlgmac_desc_data *desc_data;
462 	struct xlgmac_dma_desc *dma_desc;
463 	struct xlgmac_channel *channel;
464 	struct xlgmac_ring *ring;
465 	dma_addr_t dma_desc_addr;
466 	unsigned int i, j;
467 
468 	channel = pdata->channel_head;
469 	for (i = 0; i < pdata->channel_count; i++, channel++) {
470 		ring = channel->rx_ring;
471 		if (!ring)
472 			break;
473 
474 		dma_desc = ring->dma_desc_head;
475 		dma_desc_addr = ring->dma_desc_head_addr;
476 
477 		for (j = 0; j < ring->dma_desc_count; j++) {
478 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
479 
480 			desc_data->dma_desc = dma_desc;
481 			desc_data->dma_desc_addr = dma_desc_addr;
482 
483 			if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
484 				break;
485 
486 			dma_desc++;
487 			dma_desc_addr += sizeof(struct xlgmac_dma_desc);
488 		}
489 
490 		ring->cur = 0;
491 		ring->dirty = 0;
492 
493 		hw_ops->rx_desc_init(channel);
494 	}
495 }
496 
497 static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
498 			     struct sk_buff *skb)
499 {
500 	struct xlgmac_pdata *pdata = channel->pdata;
501 	struct xlgmac_ring *ring = channel->tx_ring;
502 	unsigned int start_index, cur_index;
503 	struct xlgmac_desc_data *desc_data;
504 	unsigned int offset, datalen, len;
505 	struct xlgmac_pkt_info *pkt_info;
506 	skb_frag_t *frag;
507 	unsigned int tso, vlan;
508 	dma_addr_t skb_dma;
509 	unsigned int i;
510 
511 	offset = 0;
512 	start_index = ring->cur;
513 	cur_index = ring->cur;
514 
515 	pkt_info = &ring->pkt_info;
516 	pkt_info->desc_count = 0;
517 	pkt_info->length = 0;
518 
519 	tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
520 				  TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
521 				  TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
522 	vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
523 				   TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
524 				   TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
525 
526 	/* Save space for a context descriptor if needed */
527 	if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
528 	    (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
529 		cur_index++;
530 	desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
531 
532 	if (tso) {
533 		/* Map the TSO header */
534 		skb_dma = dma_map_single(pdata->dev, skb->data,
535 					 pkt_info->header_len, DMA_TO_DEVICE);
536 		if (dma_mapping_error(pdata->dev, skb_dma)) {
537 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
538 			goto err_out;
539 		}
540 		desc_data->skb_dma = skb_dma;
541 		desc_data->skb_dma_len = pkt_info->header_len;
542 		netif_dbg(pdata, tx_queued, pdata->netdev,
543 			  "skb header: index=%u, dma=%pad, len=%u\n",
544 			  cur_index, &skb_dma, pkt_info->header_len);
545 
546 		offset = pkt_info->header_len;
547 
548 		pkt_info->length += pkt_info->header_len;
549 
550 		cur_index++;
551 		desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
552 	}
553 
554 	/* Map the (remainder of the) packet */
555 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
556 		len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
557 
558 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
559 					 DMA_TO_DEVICE);
560 		if (dma_mapping_error(pdata->dev, skb_dma)) {
561 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
562 			goto err_out;
563 		}
564 		desc_data->skb_dma = skb_dma;
565 		desc_data->skb_dma_len = len;
566 		netif_dbg(pdata, tx_queued, pdata->netdev,
567 			  "skb data: index=%u, dma=%pad, len=%u\n",
568 			  cur_index, &skb_dma, len);
569 
570 		datalen -= len;
571 		offset += len;
572 
573 		pkt_info->length += len;
574 
575 		cur_index++;
576 		desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
577 	}
578 
579 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
580 		netif_dbg(pdata, tx_queued, pdata->netdev,
581 			  "mapping frag %u\n", i);
582 
583 		frag = &skb_shinfo(skb)->frags[i];
584 		offset = 0;
585 
586 		for (datalen = skb_frag_size(frag); datalen; ) {
587 			len = min_t(unsigned int, datalen,
588 				    XLGMAC_TX_MAX_BUF_SIZE);
589 
590 			skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
591 						   len, DMA_TO_DEVICE);
592 			if (dma_mapping_error(pdata->dev, skb_dma)) {
593 				netdev_alert(pdata->netdev,
594 					     "skb_frag_dma_map failed\n");
595 				goto err_out;
596 			}
597 			desc_data->skb_dma = skb_dma;
598 			desc_data->skb_dma_len = len;
599 			desc_data->mapped_as_page = 1;
600 			netif_dbg(pdata, tx_queued, pdata->netdev,
601 				  "skb frag: index=%u, dma=%pad, len=%u\n",
602 				  cur_index, &skb_dma, len);
603 
604 			datalen -= len;
605 			offset += len;
606 
607 			pkt_info->length += len;
608 
609 			cur_index++;
610 			desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
611 		}
612 	}
613 
614 	/* Save the skb address in the last entry. We always have some data
615 	 * that has been mapped so desc_data is always advanced past the last
616 	 * piece of mapped data - use the entry pointed to by cur_index - 1.
617 	 */
618 	desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
619 	desc_data->skb = skb;
620 
621 	/* Save the number of descriptor entries used */
622 	pkt_info->desc_count = cur_index - start_index;
623 
624 	return pkt_info->desc_count;
625 
626 err_out:
627 	while (start_index < cur_index) {
628 		desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
629 		xlgmac_unmap_desc_data(pdata, desc_data);
630 	}
631 
632 	return 0;
633 }
634 
635 void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
636 {
637 	desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
638 	desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
639 	desc_ops->map_tx_skb = xlgmac_map_tx_skb;
640 	desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
641 	desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
642 	desc_ops->tx_desc_init = xlgmac_tx_desc_init;
643 	desc_ops->rx_desc_init = xlgmac_rx_desc_init;
644 }
645