xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-desc.c (revision a0285236ab93fdfdd1008afaa04561d142d6c276)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include "xgbe.h"
9 #include "xgbe-common.h"
10 
11 static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
12 
13 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
14 			   struct xgbe_ring *ring)
15 {
16 	struct xgbe_ring_data *rdata;
17 	unsigned int i;
18 
19 	if (!ring)
20 		return;
21 
22 	if (ring->rdata) {
23 		for (i = 0; i < ring->rdesc_count; i++) {
24 			rdata = XGBE_GET_DESC_DATA(ring, i);
25 			xgbe_unmap_rdata(pdata, rdata);
26 		}
27 
28 		kfree(ring->rdata);
29 		ring->rdata = NULL;
30 	}
31 
32 	if (ring->rx_hdr_pa.pages) {
33 		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
34 			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
35 		put_page(ring->rx_hdr_pa.pages);
36 
37 		ring->rx_hdr_pa.pages = NULL;
38 		ring->rx_hdr_pa.pages_len = 0;
39 		ring->rx_hdr_pa.pages_offset = 0;
40 		ring->rx_hdr_pa.pages_dma = 0;
41 	}
42 
43 	if (ring->rx_buf_pa.pages) {
44 		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
45 			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
46 		put_page(ring->rx_buf_pa.pages);
47 
48 		ring->rx_buf_pa.pages = NULL;
49 		ring->rx_buf_pa.pages_len = 0;
50 		ring->rx_buf_pa.pages_offset = 0;
51 		ring->rx_buf_pa.pages_dma = 0;
52 	}
53 
54 	if (ring->rdesc) {
55 		dma_free_coherent(pdata->dev,
56 				  (sizeof(struct xgbe_ring_desc) *
57 				   ring->rdesc_count),
58 				  ring->rdesc, ring->rdesc_dma);
59 		ring->rdesc = NULL;
60 	}
61 }
62 
63 static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
64 {
65 	struct xgbe_channel *channel;
66 	unsigned int i;
67 
68 	DBGPR("-->xgbe_free_ring_resources\n");
69 
70 	for (i = 0; i < pdata->channel_count; i++) {
71 		channel = pdata->channel[i];
72 		xgbe_free_ring(pdata, channel->tx_ring);
73 		xgbe_free_ring(pdata, channel->rx_ring);
74 	}
75 
76 	DBGPR("<--xgbe_free_ring_resources\n");
77 }
78 
79 static void *xgbe_alloc_node(size_t size, int node)
80 {
81 	void *mem;
82 
83 	mem = kzalloc_node(size, GFP_KERNEL, node);
84 	if (!mem)
85 		mem = kzalloc(size, GFP_KERNEL);
86 
87 	return mem;
88 }
89 
90 static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
91 				 dma_addr_t *dma, int node)
92 {
93 	void *mem;
94 	int cur_node = dev_to_node(dev);
95 
96 	set_dev_node(dev, node);
97 	mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
98 	set_dev_node(dev, cur_node);
99 
100 	if (!mem)
101 		mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
102 
103 	return mem;
104 }
105 
106 static int xgbe_init_ring(struct xgbe_prv_data *pdata,
107 			  struct xgbe_ring *ring, unsigned int rdesc_count)
108 {
109 	size_t size;
110 
111 	if (!ring)
112 		return 0;
113 
114 	/* Descriptors */
115 	size = rdesc_count * sizeof(struct xgbe_ring_desc);
116 
117 	ring->rdesc_count = rdesc_count;
118 	ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
119 					  ring->node);
120 	if (!ring->rdesc)
121 		return -ENOMEM;
122 
123 	/* Descriptor information */
124 	size = rdesc_count * sizeof(struct xgbe_ring_data);
125 
126 	ring->rdata = xgbe_alloc_node(size, ring->node);
127 	if (!ring->rdata)
128 		return -ENOMEM;
129 
130 	netif_dbg(pdata, drv, pdata->netdev,
131 		  "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
132 		  ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
133 
134 	return 0;
135 }
136 
137 static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
138 {
139 	struct xgbe_channel *channel;
140 	unsigned int i;
141 	int ret;
142 
143 	for (i = 0; i < pdata->channel_count; i++) {
144 		channel = pdata->channel[i];
145 		netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
146 			  channel->name);
147 
148 		ret = xgbe_init_ring(pdata, channel->tx_ring,
149 				     pdata->tx_desc_count);
150 		if (ret) {
151 			netdev_alert(pdata->netdev,
152 				     "error initializing Tx ring\n");
153 			goto err_ring;
154 		}
155 
156 		netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
157 			  channel->name);
158 
159 		ret = xgbe_init_ring(pdata, channel->rx_ring,
160 				     pdata->rx_desc_count);
161 		if (ret) {
162 			netdev_alert(pdata->netdev,
163 				     "error initializing Rx ring\n");
164 			goto err_ring;
165 		}
166 	}
167 
168 	return 0;
169 
170 err_ring:
171 	xgbe_free_ring_resources(pdata);
172 
173 	return ret;
174 }
175 
176 static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
177 			    struct xgbe_page_alloc *pa, int alloc_order,
178 			    int node)
179 {
180 	struct page *pages = NULL;
181 	dma_addr_t pages_dma;
182 	gfp_t gfp;
183 	int order;
184 
185 again:
186 	order = alloc_order;
187 
188 	/* Try to obtain pages, decreasing order if necessary */
189 	gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
190 	while (order >= 0) {
191 		pages = alloc_pages_node(node, gfp, order);
192 		if (pages)
193 			break;
194 
195 		order--;
196 	}
197 
198 	/* If we couldn't get local pages, try getting from anywhere */
199 	if (!pages && (node != NUMA_NO_NODE)) {
200 		node = NUMA_NO_NODE;
201 		goto again;
202 	}
203 
204 	if (!pages)
205 		return -ENOMEM;
206 
207 	/* Map the pages */
208 	pages_dma = dma_map_page(pdata->dev, pages, 0,
209 				 PAGE_SIZE << order, DMA_FROM_DEVICE);
210 	if (dma_mapping_error(pdata->dev, pages_dma)) {
211 		put_page(pages);
212 		return -ENOMEM;
213 	}
214 
215 	pa->pages = pages;
216 	pa->pages_len = PAGE_SIZE << order;
217 	pa->pages_offset = 0;
218 	pa->pages_dma = pages_dma;
219 
220 	return 0;
221 }
222 
223 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
224 				 struct xgbe_page_alloc *pa,
225 				 unsigned int len)
226 {
227 	get_page(pa->pages);
228 	bd->pa = *pa;
229 
230 	bd->dma_base = pa->pages_dma;
231 	bd->dma_off = pa->pages_offset;
232 	bd->dma_len = len;
233 
234 	pa->pages_offset += len;
235 	if ((pa->pages_offset + len) > pa->pages_len) {
236 		/* This data descriptor is responsible for unmapping page(s) */
237 		bd->pa_unmap = *pa;
238 
239 		/* Get a new allocation next time */
240 		pa->pages = NULL;
241 		pa->pages_len = 0;
242 		pa->pages_offset = 0;
243 		pa->pages_dma = 0;
244 	}
245 }
246 
247 static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
248 			      struct xgbe_ring *ring,
249 			      struct xgbe_ring_data *rdata)
250 {
251 	int ret;
252 
253 	if (!ring->rx_hdr_pa.pages) {
254 		ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
255 		if (ret)
256 			return ret;
257 	}
258 
259 	if (!ring->rx_buf_pa.pages) {
260 		ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
261 				       PAGE_ALLOC_COSTLY_ORDER, ring->node);
262 		if (ret)
263 			return ret;
264 	}
265 
266 	/* Set up the header page info */
267 	xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
268 			     XGBE_SKB_ALLOC_SIZE);
269 
270 	/* Set up the buffer page info */
271 	xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
272 			     pdata->rx_buf_size);
273 
274 	return 0;
275 }
276 
277 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
278 {
279 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
280 	struct xgbe_channel *channel;
281 	struct xgbe_ring *ring;
282 	struct xgbe_ring_data *rdata;
283 	struct xgbe_ring_desc *rdesc;
284 	dma_addr_t rdesc_dma;
285 	unsigned int i, j;
286 
287 	DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
288 
289 	for (i = 0; i < pdata->channel_count; i++) {
290 		channel = pdata->channel[i];
291 		ring = channel->tx_ring;
292 		if (!ring)
293 			break;
294 
295 		rdesc = ring->rdesc;
296 		rdesc_dma = ring->rdesc_dma;
297 
298 		for (j = 0; j < ring->rdesc_count; j++) {
299 			rdata = XGBE_GET_DESC_DATA(ring, j);
300 
301 			rdata->rdesc = rdesc;
302 			rdata->rdesc_dma = rdesc_dma;
303 
304 			rdesc++;
305 			rdesc_dma += sizeof(struct xgbe_ring_desc);
306 		}
307 
308 		ring->cur = 0;
309 		ring->dirty = 0;
310 		memset(&ring->tx, 0, sizeof(ring->tx));
311 
312 		hw_if->tx_desc_init(channel);
313 	}
314 
315 	DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
316 }
317 
318 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
319 {
320 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
321 	struct xgbe_channel *channel;
322 	struct xgbe_ring *ring;
323 	struct xgbe_ring_desc *rdesc;
324 	struct xgbe_ring_data *rdata;
325 	dma_addr_t rdesc_dma;
326 	unsigned int i, j;
327 
328 	DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
329 
330 	for (i = 0; i < pdata->channel_count; i++) {
331 		channel = pdata->channel[i];
332 		ring = channel->rx_ring;
333 		if (!ring)
334 			break;
335 
336 		rdesc = ring->rdesc;
337 		rdesc_dma = ring->rdesc_dma;
338 
339 		for (j = 0; j < ring->rdesc_count; j++) {
340 			rdata = XGBE_GET_DESC_DATA(ring, j);
341 
342 			rdata->rdesc = rdesc;
343 			rdata->rdesc_dma = rdesc_dma;
344 
345 			if (xgbe_map_rx_buffer(pdata, ring, rdata))
346 				break;
347 
348 			rdesc++;
349 			rdesc_dma += sizeof(struct xgbe_ring_desc);
350 		}
351 
352 		ring->cur = 0;
353 		ring->dirty = 0;
354 
355 		hw_if->rx_desc_init(channel);
356 	}
357 
358 	DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
359 }
360 
361 static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
362 			     struct xgbe_ring_data *rdata)
363 {
364 	if (rdata->skb_dma) {
365 		if (rdata->mapped_as_page) {
366 			dma_unmap_page(pdata->dev, rdata->skb_dma,
367 				       rdata->skb_dma_len, DMA_TO_DEVICE);
368 		} else {
369 			dma_unmap_single(pdata->dev, rdata->skb_dma,
370 					 rdata->skb_dma_len, DMA_TO_DEVICE);
371 		}
372 		rdata->skb_dma = 0;
373 		rdata->skb_dma_len = 0;
374 	}
375 
376 	if (rdata->skb) {
377 		dev_kfree_skb_any(rdata->skb);
378 		rdata->skb = NULL;
379 	}
380 
381 	if (rdata->rx.hdr.pa.pages)
382 		put_page(rdata->rx.hdr.pa.pages);
383 
384 	if (rdata->rx.hdr.pa_unmap.pages) {
385 		dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
386 			       rdata->rx.hdr.pa_unmap.pages_len,
387 			       DMA_FROM_DEVICE);
388 		put_page(rdata->rx.hdr.pa_unmap.pages);
389 	}
390 
391 	if (rdata->rx.buf.pa.pages)
392 		put_page(rdata->rx.buf.pa.pages);
393 
394 	if (rdata->rx.buf.pa_unmap.pages) {
395 		dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
396 			       rdata->rx.buf.pa_unmap.pages_len,
397 			       DMA_FROM_DEVICE);
398 		put_page(rdata->rx.buf.pa_unmap.pages);
399 	}
400 
401 	memset(&rdata->tx, 0, sizeof(rdata->tx));
402 	memset(&rdata->rx, 0, sizeof(rdata->rx));
403 
404 	rdata->mapped_as_page = 0;
405 
406 	if (rdata->state_saved) {
407 		rdata->state_saved = 0;
408 		rdata->state.skb = NULL;
409 		rdata->state.len = 0;
410 		rdata->state.error = 0;
411 	}
412 }
413 
414 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
415 {
416 	struct xgbe_prv_data *pdata = channel->pdata;
417 	struct xgbe_ring *ring = channel->tx_ring;
418 	struct xgbe_ring_data *rdata;
419 	struct xgbe_packet_data *packet;
420 	skb_frag_t *frag;
421 	dma_addr_t skb_dma;
422 	unsigned int start_index, cur_index;
423 	unsigned int offset, tso, vlan, datalen, len;
424 	unsigned int i;
425 
426 	DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
427 
428 	offset = 0;
429 	start_index = ring->cur;
430 	cur_index = ring->cur;
431 
432 	packet = &ring->packet_data;
433 	packet->rdesc_count = 0;
434 	packet->length = 0;
435 
436 	tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
437 			     TSO_ENABLE);
438 	vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
439 			      VLAN_CTAG);
440 
441 	/* Save space for a context descriptor if needed */
442 	if ((tso && (packet->mss != ring->tx.cur_mss)) ||
443 	    (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
444 		cur_index++;
445 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
446 
447 	if (tso) {
448 		/* Map the TSO header */
449 		skb_dma = dma_map_single(pdata->dev, skb->data,
450 					 packet->header_len, DMA_TO_DEVICE);
451 		if (dma_mapping_error(pdata->dev, skb_dma)) {
452 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
453 			goto err_out;
454 		}
455 		rdata->skb_dma = skb_dma;
456 		rdata->skb_dma_len = packet->header_len;
457 		netif_dbg(pdata, tx_queued, pdata->netdev,
458 			  "skb header: index=%u, dma=%pad, len=%u\n",
459 			  cur_index, &skb_dma, packet->header_len);
460 
461 		offset = packet->header_len;
462 
463 		packet->length += packet->header_len;
464 
465 		cur_index++;
466 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
467 	}
468 
469 	/* Map the (remainder of the) packet */
470 	for (datalen = skb_headlen(skb) - offset; datalen; ) {
471 		len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
472 
473 		skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
474 					 DMA_TO_DEVICE);
475 		if (dma_mapping_error(pdata->dev, skb_dma)) {
476 			netdev_alert(pdata->netdev, "dma_map_single failed\n");
477 			goto err_out;
478 		}
479 		rdata->skb_dma = skb_dma;
480 		rdata->skb_dma_len = len;
481 		netif_dbg(pdata, tx_queued, pdata->netdev,
482 			  "skb data: index=%u, dma=%pad, len=%u\n",
483 			  cur_index, &skb_dma, len);
484 
485 		datalen -= len;
486 		offset += len;
487 
488 		packet->length += len;
489 
490 		cur_index++;
491 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
492 	}
493 
494 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
495 		netif_dbg(pdata, tx_queued, pdata->netdev,
496 			  "mapping frag %u\n", i);
497 
498 		frag = &skb_shinfo(skb)->frags[i];
499 		offset = 0;
500 
501 		for (datalen = skb_frag_size(frag); datalen; ) {
502 			len = min_t(unsigned int, datalen,
503 				    XGBE_TX_MAX_BUF_SIZE);
504 
505 			skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
506 						   len, DMA_TO_DEVICE);
507 			if (dma_mapping_error(pdata->dev, skb_dma)) {
508 				netdev_alert(pdata->netdev,
509 					     "skb_frag_dma_map failed\n");
510 				goto err_out;
511 			}
512 			rdata->skb_dma = skb_dma;
513 			rdata->skb_dma_len = len;
514 			rdata->mapped_as_page = 1;
515 			netif_dbg(pdata, tx_queued, pdata->netdev,
516 				  "skb frag: index=%u, dma=%pad, len=%u\n",
517 				  cur_index, &skb_dma, len);
518 
519 			datalen -= len;
520 			offset += len;
521 
522 			packet->length += len;
523 
524 			cur_index++;
525 			rdata = XGBE_GET_DESC_DATA(ring, cur_index);
526 		}
527 	}
528 
529 	/* Save the skb address in the last entry. We always have some data
530 	 * that has been mapped so rdata is always advanced past the last
531 	 * piece of mapped data - use the entry pointed to by cur_index - 1.
532 	 */
533 	rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
534 	rdata->skb = skb;
535 
536 	/* Save the number of descriptor entries used */
537 	packet->rdesc_count = cur_index - start_index;
538 
539 	DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
540 
541 	return packet->rdesc_count;
542 
543 err_out:
544 	while (start_index < cur_index) {
545 		rdata = XGBE_GET_DESC_DATA(ring, start_index++);
546 		xgbe_unmap_rdata(pdata, rdata);
547 	}
548 
549 	DBGPR("<--xgbe_map_tx_skb: count=0\n");
550 
551 	return 0;
552 }
553 
554 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
555 {
556 	DBGPR("-->xgbe_init_function_ptrs_desc\n");
557 
558 	desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
559 	desc_if->free_ring_resources = xgbe_free_ring_resources;
560 	desc_if->map_tx_skb = xgbe_map_tx_skb;
561 	desc_if->map_rx_buffer = xgbe_map_rx_buffer;
562 	desc_if->unmap_rdata = xgbe_unmap_rdata;
563 	desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
564 	desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
565 
566 	DBGPR("<--xgbe_init_function_ptrs_desc\n");
567 }
568