xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-desc.c (revision 85502b2214d50ba0ddf2a5fb454e4d28a160d175)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include "xgbe.h"
9 #include "xgbe-common.h"
10 
11 static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
12 
xgbe_free_ring(struct xgbe_prv_data * pdata,struct xgbe_ring * ring)13 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
14 			   struct xgbe_ring *ring)
15 {
16 	struct xgbe_ring_data *rdata;
17 	unsigned int i;
18 
19 	if (!ring)
20 		return;
21 
22 	if (ring->rdata) {
23 		for (i = 0; i < ring->rdesc_count; i++) {
24 			rdata = XGBE_GET_DESC_DATA(ring, i);
25 			xgbe_unmap_rdata(pdata, rdata);
26 		}
27 
28 		kfree(ring->rdata);
29 		ring->rdata = NULL;
30 	}
31 
32 	if (ring->rx_hdr_pa.pages) {
33 		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
34 			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
35 		put_page(ring->rx_hdr_pa.pages);
36 
37 		ring->rx_hdr_pa.pages = NULL;
38 		ring->rx_hdr_pa.pages_len = 0;
39 		ring->rx_hdr_pa.pages_offset = 0;
40 		ring->rx_hdr_pa.pages_dma = 0;
41 	}
42 
43 	if (ring->rx_buf_pa.pages) {
44 		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
45 			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
46 		put_page(ring->rx_buf_pa.pages);
47 
48 		ring->rx_buf_pa.pages = NULL;
49 		ring->rx_buf_pa.pages_len = 0;
50 		ring->rx_buf_pa.pages_offset = 0;
51 		ring->rx_buf_pa.pages_dma = 0;
52 	}
53 
54 	if (ring->rdesc) {
55 		dma_free_coherent(pdata->dev,
56 				  (sizeof(struct xgbe_ring_desc) *
57 				   ring->rdesc_count),
58 				  ring->rdesc, ring->rdesc_dma);
59 		ring->rdesc = NULL;
60 	}
61 }
62 
xgbe_free_ring_resources(struct xgbe_prv_data * pdata)63 static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
64 {
65 	struct xgbe_channel *channel;
66 	unsigned int i;
67 
68 	DBGPR("-->xgbe_free_ring_resources\n");
69 
70 	for (i = 0; i < pdata->channel_count; i++) {
71 		channel = pdata->channel[i];
72 		xgbe_free_ring(pdata, channel->tx_ring);
73 		xgbe_free_ring(pdata, channel->rx_ring);
74 	}
75 
76 	DBGPR("<--xgbe_free_ring_resources\n");
77 }
78 
xgbe_alloc_node(size_t size,int node)79 static void *xgbe_alloc_node(size_t size, int node)
80 {
81 	void *mem;
82 
83 	mem = kzalloc_node(size, GFP_KERNEL, node);
84 	if (!mem)
85 		mem = kzalloc(size, GFP_KERNEL);
86 
87 	return mem;
88 }
89 
xgbe_dma_alloc_node(struct device * dev,size_t size,dma_addr_t * dma,int node)90 static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
91 				 dma_addr_t *dma, int node)
92 {
93 	void *mem;
94 	int cur_node = dev_to_node(dev);
95 
96 	set_dev_node(dev, node);
97 	mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
98 	set_dev_node(dev, cur_node);
99 
100 	if (!mem)
101 		mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
102 
103 	return mem;
104 }
105 
xgbe_init_ring(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,unsigned int rdesc_count)106 static int xgbe_init_ring(struct xgbe_prv_data *pdata,
107 			  struct xgbe_ring *ring, unsigned int rdesc_count)
108 {
109 	size_t size;
110 
111 	if (!ring)
112 		return 0;
113 
114 	/* Descriptors */
115 	size = rdesc_count * sizeof(struct xgbe_ring_desc);
116 
117 	ring->rdesc_count = rdesc_count;
118 	ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
119 					  ring->node);
120 	if (!ring->rdesc)
121 		return -ENOMEM;
122 
123 	/* Descriptor information */
124 	size = rdesc_count * sizeof(struct xgbe_ring_data);
125 
126 	ring->rdata = xgbe_alloc_node(size, ring->node);
127 	if (!ring->rdata)
128 		return -ENOMEM;
129 
130 	netif_dbg(pdata, drv, pdata->netdev,
131 		  "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
132 		  ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
133 
134 	return 0;
135 }
136 
xgbe_alloc_ring_resources(struct xgbe_prv_data * pdata)137 static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
138 {
139 	struct xgbe_channel *channel;
140 	unsigned int i;
141 	int ret;
142 
143 	for (i = 0; i < pdata->channel_count; i++) {
144 		channel = pdata->channel[i];
145 		netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
146 			  channel->name);
147 
148 		ret = xgbe_init_ring(pdata, channel->tx_ring,
149 				     pdata->tx_desc_count);
150 		if (ret) {
151 			netdev_alert(pdata->netdev,
152 				     "error initializing Tx ring\n");
153 			goto err_ring;
154 		}
155 
156 		netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
157 			  channel->name);
158 
159 		ret = xgbe_init_ring(pdata, channel->rx_ring,
160 				     pdata->rx_desc_count);
161 		if (ret) {
162 			netdev_alert(pdata->netdev,
163 				     "error initializing Rx ring\n");
164 			goto err_ring;
165 		}
166 	}
167 
168 	return 0;
169 
170 err_ring:
171 	xgbe_free_ring_resources(pdata);
172 
173 	return ret;
174 }
175 
xgbe_alloc_pages(struct xgbe_prv_data * pdata,struct xgbe_page_alloc * pa,int alloc_order,int node)176 static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
177 			    struct xgbe_page_alloc *pa, int alloc_order,
178 			    int node)
179 {
180 	struct page *pages = NULL;
181 	dma_addr_t pages_dma;
182 	gfp_t gfp;
183 	int order;
184 
185 again:
186 	order = alloc_order;
187 
188 	/* Try to obtain pages, decreasing order if necessary */
189 	gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
190 	while (order >= 0) {
191 		pages = alloc_pages_node(node, gfp, order);
192 		if (pages)
193 			break;
194 
195 		order--;
196 	}
197 
198 	/* If we couldn't get local pages, try getting from anywhere */
199 	if (!pages && (node != NUMA_NO_NODE)) {
200 		node = NUMA_NO_NODE;
201 		goto again;
202 	}
203 
204 	if (!pages)
205 		return -ENOMEM;
206 
207 	/* Map the pages */
208 	pages_dma = dma_map_page(pdata->dev, pages, 0,
209 				 PAGE_SIZE << order, DMA_FROM_DEVICE);
210 	if (dma_mapping_error(pdata->dev, pages_dma)) {
211 		put_page(pages);
212 		return -ENOMEM;
213 	}
214 
215 	pa->pages = pages;
216 	pa->pages_len = PAGE_SIZE << order;
217 	pa->pages_offset = 0;
218 	pa->pages_dma = pages_dma;
219 
220 	return 0;
221 }
222 
xgbe_set_buffer_data(struct xgbe_buffer_data * bd,struct xgbe_page_alloc * pa,unsigned int len)223 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
224 				 struct xgbe_page_alloc *pa,
225 				 unsigned int len)
226 {
227 	get_page(pa->pages);
228 	bd->pa = *pa;
229 
230 	bd->dma_base = pa->pages_dma;
231 	bd->dma_off = pa->pages_offset;
232 	bd->dma_len = len;
233 
234 	pa->pages_offset += len;
235 	if ((pa->pages_offset + len) > pa->pages_len) {
236 		/* This data descriptor is responsible for unmapping page(s) */
237 		bd->pa_unmap = *pa;
238 
239 		/* Get a new allocation next time */
240 		pa->pages = NULL;
241 		pa->pages_len = 0;
242 		pa->pages_offset = 0;
243 		pa->pages_dma = 0;
244 	}
245 }
246 
xgbe_map_rx_buffer(struct xgbe_prv_data * pdata,struct xgbe_ring * ring,struct xgbe_ring_data * rdata)247 static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
248 			      struct xgbe_ring *ring,
249 			      struct xgbe_ring_data *rdata)
250 {
251 	int ret;
252 
253 	if (!ring->rx_hdr_pa.pages) {
254 		ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
255 		if (ret)
256 			return ret;
257 	}
258 
259 	if (!ring->rx_buf_pa.pages) {
260 		ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
261 				       PAGE_ALLOC_COSTLY_ORDER, ring->node);
262 		if (ret)
263 			return ret;
264 	}
265 
266 	/* Set up the header page info */
267 	if (pdata->netdev->features & NETIF_F_RXCSUM) {
268 		xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
269 				     XGBE_SKB_ALLOC_SIZE);
270 	} else {
271 		xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
272 				     pdata->rx_buf_size);
273 	}
274 
275 	/* Set up the buffer page info */
276 	xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
277 			     pdata->rx_buf_size);
278 
279 	return 0;
280 }
281 
xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data * pdata)282 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
283 {
284 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
285 	struct xgbe_channel *channel;
286 	struct xgbe_ring *ring;
287 	struct xgbe_ring_data *rdata;
288 	struct xgbe_ring_desc *rdesc;
289 	dma_addr_t rdesc_dma;
290 	unsigned int i, j;
291 
292 	DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
293 
294 	for (i = 0; i < pdata->channel_count; i++) {
295 		channel = pdata->channel[i];
296 		ring = channel->tx_ring;
297 		if (!ring)
298 			break;
299 
300 		rdesc = ring->rdesc;
301 		rdesc_dma = ring->rdesc_dma;
302 
303 		for (j = 0; j < ring->rdesc_count; j++) {
304 			rdata = XGBE_GET_DESC_DATA(ring, j);
305 
306 			rdata->rdesc = rdesc;
307 			rdata->rdesc_dma = rdesc_dma;
308 
309 			rdesc++;
310 			rdesc_dma += sizeof(struct xgbe_ring_desc);
311 		}
312 
313 		ring->cur = 0;
314 		ring->dirty = 0;
315 		memset(&ring->tx, 0, sizeof(ring->tx));
316 
317 		hw_if->tx_desc_init(channel);
318 	}
319 
320 	DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
321 }
322 
xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data * pdata)323 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
324 {
325 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
326 	struct xgbe_channel *channel;
327 	struct xgbe_ring *ring;
328 	struct xgbe_ring_desc *rdesc;
329 	struct xgbe_ring_data *rdata;
330 	dma_addr_t rdesc_dma;
331 	unsigned int i, j;
332 
333 	DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
334 
335 	for (i = 0; i < pdata->channel_count; i++) {
336 		channel = pdata->channel[i];
337 		ring = channel->rx_ring;
338 		if (!ring)
339 			break;
340 
341 		rdesc = ring->rdesc;
342 		rdesc_dma = ring->rdesc_dma;
343 
344 		for (j = 0; j < ring->rdesc_count; j++) {
345 			rdata = XGBE_GET_DESC_DATA(ring, j);
346 
347 			rdata->rdesc = rdesc;
348 			rdata->rdesc_dma = rdesc_dma;
349 
350 			if (xgbe_map_rx_buffer(pdata, ring, rdata))
351 				break;
352 
353 			rdesc++;
354 			rdesc_dma += sizeof(struct xgbe_ring_desc);
355 		}
356 
357 		ring->cur = 0;
358 		ring->dirty = 0;
359 
360 		hw_if->rx_desc_init(channel);
361 	}
362 
363 	DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
364 }
365 
xgbe_unmap_rdata(struct xgbe_prv_data * pdata,struct xgbe_ring_data * rdata)366 static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
367 			     struct xgbe_ring_data *rdata)
368 {
369 	if (rdata->skb_dma) {
370 		if (rdata->mapped_as_page) {
371 			dma_unmap_page(pdata->dev, rdata->skb_dma,
372 				       rdata->skb_dma_len, DMA_TO_DEVICE);
373 		} else {
374 			dma_unmap_single(pdata->dev, rdata->skb_dma,
375 					 rdata->skb_dma_len, DMA_TO_DEVICE);
376 		}
377 		rdata->skb_dma = 0;
378 		rdata->skb_dma_len = 0;
379 	}
380 
381 	if (rdata->skb) {
382 		dev_kfree_skb_any(rdata->skb);
383 		rdata->skb = NULL;
384 	}
385 
386 	if (rdata->rx.hdr.pa.pages)
387 		put_page(rdata->rx.hdr.pa.pages);
388 
389 	if (rdata->rx.hdr.pa_unmap.pages) {
390 		dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
391 			       rdata->rx.hdr.pa_unmap.pages_len,
392 			       DMA_FROM_DEVICE);
393 		put_page(rdata->rx.hdr.pa_unmap.pages);
394 	}
395 
396 	if (rdata->rx.buf.pa.pages)
397 		put_page(rdata->rx.buf.pa.pages);
398 
399 	if (rdata->rx.buf.pa_unmap.pages) {
400 		dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
401 			       rdata->rx.buf.pa_unmap.pages_len,
402 			       DMA_FROM_DEVICE);
403 		put_page(rdata->rx.buf.pa_unmap.pages);
404 	}
405 
406 	memset(&rdata->tx, 0, sizeof(rdata->tx));
407 	memset(&rdata->rx, 0, sizeof(rdata->rx));
408 
409 	rdata->mapped_as_page = 0;
410 
411 	if (rdata->state_saved) {
412 		rdata->state_saved = 0;
413 		rdata->state.skb = NULL;
414 		rdata->state.len = 0;
415 		rdata->state.error = 0;
416 	}
417 }
418 
xgbe_map_tx_skb(struct xgbe_channel * channel,struct sk_buff * skb)419 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
420 {
421 	struct xgbe_prv_data *pdata = channel->pdata;
422 	struct xgbe_ring *ring = channel->tx_ring;
423 	struct xgbe_ring_data *rdata;
424 	struct xgbe_packet_data *packet;
425 	skb_frag_t *frag;
426 	dma_addr_t skb_dma;
427 	unsigned int start_index, cur_index;
428 	unsigned int offset, tso, vlan, datalen, len;
429 	unsigned int i;
430 
431 	DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
432 
433 	offset = 0;
434 	start_index = ring->cur;
435 	cur_index = ring->cur;
436 
437 	packet = &ring->packet_data;
438 	packet->rdesc_count = 0;
439 	packet->length = 0;
440 
441 	tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
442 			     TSO_ENABLE);
443 	vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
444 			      VLAN_CTAG);
445 
446 	/* Save space for a context descriptor if needed */
447 	if ((tso && (packet->mss != ring->tx.cur_mss)) ||
448 	    (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
449 		cur_index++;
450 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
451 
452 	if (tso) {
453 		/* Map the TSO header */
454 		skb_dma = dma_map_single(pdata->dev, skb->data,
455 					 packet->header_len, DMA_TO_DEVICE);
456 		if (dma_mapping_error(pdata->dev, skb_dma)) {
457 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
458 			goto err_out;
459 		}
460 		rdata->skb_dma = skb_dma;
461 		rdata->skb_dma_len = packet->header_len;
462 		netif_dbg(pdata, tx_queued, pdata->netdev,
463 			  "skb header: index=%u, dma=%pad, len=%u\n",
464 			  cur_index, &skb_dma, packet->header_len);
465 
466 		offset = packet->header_len;
467 
468 		packet->length += packet->header_len;
469 
470 		cur_index++;
471 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
472 	}
473 
474 	/* Map the (remainder of the) packet */
475 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
476 		len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
477 
478 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
479 					 DMA_TO_DEVICE);
480 		if (dma_mapping_error(pdata->dev, skb_dma)) {
481 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
482 			goto err_out;
483 		}
484 		rdata->skb_dma = skb_dma;
485 		rdata->skb_dma_len = len;
486 		netif_dbg(pdata, tx_queued, pdata->netdev,
487 			  "skb data: index=%u, dma=%pad, len=%u\n",
488 			  cur_index, &skb_dma, len);
489 
490 		datalen -= len;
491 		offset += len;
492 
493 		packet->length += len;
494 
495 		cur_index++;
496 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
497 	}
498 
499 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
500 		netif_dbg(pdata, tx_queued, pdata->netdev,
501 			  "mapping frag %u\n", i);
502 
503 		frag = &skb_shinfo(skb)->frags[i];
504 		offset = 0;
505 
506 		for (datalen = skb_frag_size(frag); datalen; ) {
507 			len = min_t(unsigned int, datalen,
508 				    XGBE_TX_MAX_BUF_SIZE);
509 
510 			skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
511 						   len, DMA_TO_DEVICE);
512 			if (dma_mapping_error(pdata->dev, skb_dma)) {
513 				netdev_alert(pdata->netdev,
514 					     "skb_frag_dma_map failed\n");
515 				goto err_out;
516 			}
517 			rdata->skb_dma = skb_dma;
518 			rdata->skb_dma_len = len;
519 			rdata->mapped_as_page = 1;
520 			netif_dbg(pdata, tx_queued, pdata->netdev,
521 				  "skb frag: index=%u, dma=%pad, len=%u\n",
522 				  cur_index, &skb_dma, len);
523 
524 			datalen -= len;
525 			offset += len;
526 
527 			packet->length += len;
528 
529 			cur_index++;
530 			rdata = XGBE_GET_DESC_DATA(ring, cur_index);
531 		}
532 	}
533 
534 	/* Save the skb address in the last entry. We always have some data
535 	 * that has been mapped so rdata is always advanced past the last
536 	 * piece of mapped data - use the entry pointed to by cur_index - 1.
537 	 */
538 	rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
539 	rdata->skb = skb;
540 
541 	/* Save the number of descriptor entries used */
542 	packet->rdesc_count = cur_index - start_index;
543 
544 	DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
545 
546 	return packet->rdesc_count;
547 
548 err_out:
549 	while (start_index < cur_index) {
550 		rdata = XGBE_GET_DESC_DATA(ring, start_index++);
551 		xgbe_unmap_rdata(pdata, rdata);
552 	}
553 
554 	DBGPR("<--xgbe_map_tx_skb: count=0\n");
555 
556 	return 0;
557 }
558 
xgbe_init_function_ptrs_desc(struct xgbe_desc_if * desc_if)559 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
560 {
561 	DBGPR("-->xgbe_init_function_ptrs_desc\n");
562 
563 	desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
564 	desc_if->free_ring_resources = xgbe_free_ring_resources;
565 	desc_if->map_tx_skb = xgbe_map_tx_skb;
566 	desc_if->map_rx_buffer = xgbe_map_rx_buffer;
567 	desc_if->unmap_rdata = xgbe_unmap_rdata;
568 	desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
569 	desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
570 
571 	DBGPR("<--xgbe_init_function_ptrs_desc\n");
572 }
573