xref: /linux/drivers/net/ethernet/stmicro/stmmac/chain_mode.c (revision 6aac2aa2dfae38b60f22c3dfe4103ceefbe2d761)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   Specialised functions for managing Chained mode
4 
5   Copyright(C) 2011  STMicroelectronics Ltd
6 
7   It defines all the functions used to handle the normal/enhanced
8   descriptors in case of the DMA is configured to work in chained or
9   in ring mode.
10 
11 
12   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13 *******************************************************************************/
14 
15 #include "stmmac.h"
16 
17 static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
18 		     int csum)
19 {
20 	unsigned int nopaged_len = skb_headlen(skb);
21 	struct stmmac_priv *priv = tx_q->priv_data;
22 	unsigned int entry = tx_q->cur_tx;
23 	unsigned int bmax, des2;
24 	unsigned int i = 1, len;
25 	struct dma_desc *desc;
26 
27 	desc = tx_q->dma_tx + entry;
28 
29 	if (priv->plat->enh_desc)
30 		bmax = BUF_SIZE_8KiB;
31 	else
32 		bmax = BUF_SIZE_2KiB;
33 
34 	len = nopaged_len - bmax;
35 
36 	des2 = dma_map_single(priv->device, skb->data,
37 			      bmax, DMA_TO_DEVICE);
38 	desc->des2 = cpu_to_le32(des2);
39 	if (dma_mapping_error(priv->device, des2))
40 		return -1;
41 	tx_q->tx_skbuff_dma[entry].buf = des2;
42 	tx_q->tx_skbuff_dma[entry].len = bmax;
43 	/* do not close the descriptor and do not set own bit */
44 	stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
45 			0, false, skb->len);
46 
47 	while (len != 0) {
48 		tx_q->tx_skbuff[entry] = NULL;
49 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
50 		desc = tx_q->dma_tx + entry;
51 
52 		if (len > bmax) {
53 			des2 = dma_map_single(priv->device,
54 					      (skb->data + bmax * i),
55 					      bmax, DMA_TO_DEVICE);
56 			desc->des2 = cpu_to_le32(des2);
57 			if (dma_mapping_error(priv->device, des2))
58 				return -1;
59 			tx_q->tx_skbuff_dma[entry].buf = des2;
60 			tx_q->tx_skbuff_dma[entry].len = bmax;
61 			stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
62 					STMMAC_CHAIN_MODE, 1, false, skb->len);
63 			len -= bmax;
64 			i++;
65 		} else {
66 			des2 = dma_map_single(priv->device,
67 					      (skb->data + bmax * i), len,
68 					      DMA_TO_DEVICE);
69 			desc->des2 = cpu_to_le32(des2);
70 			if (dma_mapping_error(priv->device, des2))
71 				return -1;
72 			tx_q->tx_skbuff_dma[entry].buf = des2;
73 			tx_q->tx_skbuff_dma[entry].len = len;
74 			/* last descriptor can be set now */
75 			stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
76 					STMMAC_CHAIN_MODE, 1, true, skb->len);
77 			len = 0;
78 		}
79 	}
80 
81 	tx_q->cur_tx = entry;
82 
83 	return entry;
84 }
85 
86 static bool is_jumbo_frm(unsigned int len, bool enh_desc)
87 {
88 	bool ret = false;
89 
90 	if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
91 	    (!enh_desc && (len > BUF_SIZE_2KiB)))
92 		ret = true;
93 
94 	return ret;
95 }
96 
97 static void init_dma_chain(void *des, dma_addr_t phy_addr,
98 				  unsigned int size, unsigned int extend_desc)
99 {
100 	/*
101 	 * In chained mode the des3 points to the next element in the ring.
102 	 * The latest element has to point to the head.
103 	 */
104 	int i;
105 	dma_addr_t dma_phy = phy_addr;
106 
107 	if (extend_desc) {
108 		struct dma_extended_desc *p = (struct dma_extended_desc *)des;
109 		for (i = 0; i < (size - 1); i++) {
110 			dma_phy += sizeof(struct dma_extended_desc);
111 			p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
112 			p++;
113 		}
114 		p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
115 
116 	} else {
117 		struct dma_desc *p = (struct dma_desc *)des;
118 		for (i = 0; i < (size - 1); i++) {
119 			dma_phy += sizeof(struct dma_desc);
120 			p->des3 = cpu_to_le32((unsigned int)dma_phy);
121 			p++;
122 		}
123 		p->des3 = cpu_to_le32((unsigned int)phy_addr);
124 	}
125 }
126 
127 static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
128 {
129 	struct stmmac_priv *priv = rx_q->priv_data;
130 
131 	if (priv->hwts_rx_en && !priv->extend_desc)
132 		/* NOTE: Device will overwrite des3 with timestamp value if
133 		 * 1588-2002 time stamping is enabled, hence reinitialize it
134 		 * to keep explicit chaining in the descriptor.
135 		 */
136 		p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
137 				      (((rx_q->dirty_rx) + 1) %
138 				       priv->dma_conf.dma_rx_size) *
139 				      sizeof(struct dma_desc)));
140 }
141 
142 static void clean_desc3(struct stmmac_tx_queue *tx_q, struct dma_desc *p)
143 {
144 	struct stmmac_priv *priv = tx_q->priv_data;
145 	unsigned int entry = tx_q->dirty_tx;
146 
147 	if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
148 	    priv->hwts_tx_en)
149 		/* NOTE: Device will overwrite des3 with timestamp value if
150 		 * 1588-2002 time stamping is enabled, hence reinitialize it
151 		 * to keep explicit chaining in the descriptor.
152 		 */
153 		p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
154 				      ((tx_q->dirty_tx + 1) %
155 				       priv->dma_conf.dma_tx_size))
156 				      * sizeof(struct dma_desc)));
157 }
158 
159 const struct stmmac_mode_ops chain_mode_ops = {
160 	.init = init_dma_chain,
161 	.is_jumbo_frm = is_jumbo_frm,
162 	.jumbo_frm = jumbo_frm,
163 	.refill_desc3 = refill_desc3,
164 	.clean_desc3 = clean_desc3,
165 };
166