1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 Specialised functions for managing Chained mode
4
5 Copyright(C) 2011 STMicroelectronics Ltd
6
7 It defines all the functions used to handle the normal/enhanced
8 descriptors in case of the DMA is configured to work in chained or
9 in ring mode.
10
11
12 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13 *******************************************************************************/
14
15 #include "stmmac.h"
16
jumbo_frm(struct stmmac_tx_queue * tx_q,struct sk_buff * skb,int csum)17 static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
18 int csum)
19 {
20 unsigned int nopaged_len = skb_headlen(skb);
21 struct stmmac_priv *priv = tx_q->priv_data;
22 unsigned int entry = tx_q->cur_tx;
23 unsigned int bmax, des2;
24 unsigned int i = 1, len;
25 struct dma_desc *desc;
26
27 desc = tx_q->dma_tx + entry;
28
29 if (priv->plat->enh_desc)
30 bmax = BUF_SIZE_8KiB;
31 else
32 bmax = BUF_SIZE_2KiB;
33
34 len = nopaged_len - bmax;
35
36 des2 = dma_map_single(priv->device, skb->data,
37 bmax, DMA_TO_DEVICE);
38 desc->des2 = cpu_to_le32(des2);
39 if (dma_mapping_error(priv->device, des2))
40 return -1;
41 tx_q->tx_skbuff_dma[entry].buf = des2;
42 tx_q->tx_skbuff_dma[entry].len = bmax;
43 /* do not close the descriptor and do not set own bit */
44 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
45 0, false, skb->len);
46
47 while (len != 0) {
48 tx_q->tx_skbuff[entry] = NULL;
49 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
50 desc = tx_q->dma_tx + entry;
51
52 if (len > bmax) {
53 des2 = dma_map_single(priv->device,
54 (skb->data + bmax * i),
55 bmax, DMA_TO_DEVICE);
56 desc->des2 = cpu_to_le32(des2);
57 if (dma_mapping_error(priv->device, des2))
58 return -1;
59 tx_q->tx_skbuff_dma[entry].buf = des2;
60 tx_q->tx_skbuff_dma[entry].len = bmax;
61 stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
62 STMMAC_CHAIN_MODE, 1, false, skb->len);
63 len -= bmax;
64 i++;
65 } else {
66 des2 = dma_map_single(priv->device,
67 (skb->data + bmax * i), len,
68 DMA_TO_DEVICE);
69 desc->des2 = cpu_to_le32(des2);
70 if (dma_mapping_error(priv->device, des2))
71 return -1;
72 tx_q->tx_skbuff_dma[entry].buf = des2;
73 tx_q->tx_skbuff_dma[entry].len = len;
74 /* last descriptor can be set now */
75 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
76 STMMAC_CHAIN_MODE, 1, true, skb->len);
77 len = 0;
78 }
79 }
80
81 tx_q->cur_tx = entry;
82
83 return entry;
84 }
85
is_jumbo_frm(int len,int enh_desc)86 static unsigned int is_jumbo_frm(int len, int enh_desc)
87 {
88 unsigned int ret = 0;
89
90 if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
91 (!enh_desc && (len > BUF_SIZE_2KiB))) {
92 ret = 1;
93 }
94
95 return ret;
96 }
97
init_dma_chain(void * des,dma_addr_t phy_addr,unsigned int size,unsigned int extend_desc)98 static void init_dma_chain(void *des, dma_addr_t phy_addr,
99 unsigned int size, unsigned int extend_desc)
100 {
101 /*
102 * In chained mode the des3 points to the next element in the ring.
103 * The latest element has to point to the head.
104 */
105 int i;
106 dma_addr_t dma_phy = phy_addr;
107
108 if (extend_desc) {
109 struct dma_extended_desc *p = (struct dma_extended_desc *)des;
110 for (i = 0; i < (size - 1); i++) {
111 dma_phy += sizeof(struct dma_extended_desc);
112 p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
113 p++;
114 }
115 p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
116
117 } else {
118 struct dma_desc *p = (struct dma_desc *)des;
119 for (i = 0; i < (size - 1); i++) {
120 dma_phy += sizeof(struct dma_desc);
121 p->des3 = cpu_to_le32((unsigned int)dma_phy);
122 p++;
123 }
124 p->des3 = cpu_to_le32((unsigned int)phy_addr);
125 }
126 }
127
refill_desc3(struct stmmac_rx_queue * rx_q,struct dma_desc * p)128 static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
129 {
130 struct stmmac_priv *priv = rx_q->priv_data;
131
132 if (priv->hwts_rx_en && !priv->extend_desc)
133 /* NOTE: Device will overwrite des3 with timestamp value if
134 * 1588-2002 time stamping is enabled, hence reinitialize it
135 * to keep explicit chaining in the descriptor.
136 */
137 p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
138 (((rx_q->dirty_rx) + 1) %
139 priv->dma_conf.dma_rx_size) *
140 sizeof(struct dma_desc)));
141 }
142
clean_desc3(struct stmmac_tx_queue * tx_q,struct dma_desc * p)143 static void clean_desc3(struct stmmac_tx_queue *tx_q, struct dma_desc *p)
144 {
145 struct stmmac_priv *priv = tx_q->priv_data;
146 unsigned int entry = tx_q->dirty_tx;
147
148 if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
149 priv->hwts_tx_en)
150 /* NOTE: Device will overwrite des3 with timestamp value if
151 * 1588-2002 time stamping is enabled, hence reinitialize it
152 * to keep explicit chaining in the descriptor.
153 */
154 p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
155 ((tx_q->dirty_tx + 1) %
156 priv->dma_conf.dma_tx_size))
157 * sizeof(struct dma_desc)));
158 }
159
160 const struct stmmac_mode_ops chain_mode_ops = {
161 .init = init_dma_chain,
162 .is_jumbo_frm = is_jumbo_frm,
163 .jumbo_frm = jumbo_frm,
164 .refill_desc3 = refill_desc3,
165 .clean_desc3 = clean_desc3,
166 };
167