xref: /linux/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c (revision 8fdb05de0e2db89d8f56144c60ab784812e8c3b7)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3 
4 #include "enetc.h"
5 
enetc_setup_cbdr(struct device * dev,struct enetc_hw * hw,int bd_count,struct enetc_cbdr * cbdr)6 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
7 		     struct enetc_cbdr *cbdr)
8 {
9 	int size = bd_count * sizeof(struct enetc_cbd);
10 
11 	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
12 					   GFP_KERNEL);
13 	if (!cbdr->bd_base)
14 		return -ENOMEM;
15 
16 	/* h/w requires 128B alignment */
17 	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
18 		dma_free_coherent(dev, size, cbdr->bd_base,
19 				  cbdr->bd_dma_base);
20 		return -EINVAL;
21 	}
22 
23 	cbdr->next_to_clean = 0;
24 	cbdr->next_to_use = 0;
25 	cbdr->dma_dev = dev;
26 	cbdr->bd_count = bd_count;
27 
28 	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
29 	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
30 	cbdr->mr = hw->reg + ENETC_SICBDRMR;
31 
32 	/* set CBDR cache attributes */
33 	enetc_wr(hw, ENETC_SICAR2,
34 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
35 
36 	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
37 	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
38 	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
39 
40 	enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
41 	enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
42 	/* enable ring */
43 	enetc_wr_reg(cbdr->mr, BIT(31));
44 
45 	return 0;
46 }
47 EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
48 
enetc_teardown_cbdr(struct enetc_cbdr * cbdr)49 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
50 {
51 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
52 
53 	/* disable ring */
54 	enetc_wr_reg(cbdr->mr, 0);
55 
56 	dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
57 			  cbdr->bd_dma_base);
58 	cbdr->bd_base = NULL;
59 	cbdr->dma_dev = NULL;
60 }
61 EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
62 
enetc4_setup_cbdr(struct enetc_si * si)63 int enetc4_setup_cbdr(struct enetc_si *si)
64 {
65 	struct ntmp_user *user = &si->ntmp_user;
66 	struct device *dev = &si->pdev->dev;
67 	struct enetc_hw *hw = &si->hw;
68 	struct netc_cbdr_regs regs;
69 
70 	user->cbdr_num = 1;
71 	user->dev = dev;
72 	user->ring = devm_kcalloc(dev, user->cbdr_num,
73 				  sizeof(struct netc_cbdr), GFP_KERNEL);
74 	if (!user->ring)
75 		return -ENOMEM;
76 
77 	regs.pir = hw->reg + ENETC_SICBDRPIR;
78 	regs.cir = hw->reg + ENETC_SICBDRCIR;
79 	regs.mr = hw->reg + ENETC_SICBDRMR;
80 	regs.bar0 = hw->reg + ENETC_SICBDRBAR0;
81 	regs.bar1 = hw->reg + ENETC_SICBDRBAR1;
82 	regs.lenr = hw->reg + ENETC_SICBDRLENR;
83 
84 	return ntmp_init_cbdr(user->ring, dev, &regs);
85 }
86 EXPORT_SYMBOL_GPL(enetc4_setup_cbdr);
87 
enetc4_teardown_cbdr(struct enetc_si * si)88 void enetc4_teardown_cbdr(struct enetc_si *si)
89 {
90 	struct ntmp_user *user = &si->ntmp_user;
91 
92 	ntmp_free_cbdr(user->ring);
93 	user->dev = NULL;
94 }
95 EXPORT_SYMBOL_GPL(enetc4_teardown_cbdr);
96 
enetc_clean_cbdr(struct enetc_cbdr * ring)97 static void enetc_clean_cbdr(struct enetc_cbdr *ring)
98 {
99 	struct enetc_cbd *dest_cbd;
100 	int i, status;
101 
102 	i = ring->next_to_clean;
103 
104 	while (enetc_rd_reg(ring->cir) != i) {
105 		dest_cbd = ENETC_CBD(*ring, i);
106 		status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
107 		if (status)
108 			dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
109 				 status, dest_cbd->cmd);
110 
111 		memset(dest_cbd, 0, sizeof(*dest_cbd));
112 
113 		i = (i + 1) % ring->bd_count;
114 	}
115 
116 	ring->next_to_clean = i;
117 }
118 
enetc_cbd_unused(struct enetc_cbdr * r)119 static int enetc_cbd_unused(struct enetc_cbdr *r)
120 {
121 	return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
122 		r->bd_count;
123 }
124 
enetc_send_cmd(struct enetc_si * si,struct enetc_cbd * cbd)125 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
126 {
127 	struct enetc_cbdr *ring = &si->cbd_ring;
128 	int timeout = ENETC_CBDR_TIMEOUT;
129 	struct enetc_cbd *dest_cbd;
130 	int i;
131 
132 	if (unlikely(!ring->bd_base))
133 		return -EIO;
134 
135 	if (unlikely(!enetc_cbd_unused(ring)))
136 		enetc_clean_cbdr(ring);
137 
138 	i = ring->next_to_use;
139 	dest_cbd = ENETC_CBD(*ring, i);
140 
141 	/* copy command to the ring */
142 	*dest_cbd = *cbd;
143 	i = (i + 1) % ring->bd_count;
144 
145 	ring->next_to_use = i;
146 	/* let H/W know BD ring has been updated */
147 	enetc_wr_reg(ring->pir, i);
148 
149 	do {
150 		if (enetc_rd_reg(ring->cir) == i)
151 			break;
152 		udelay(10); /* cannot sleep, rtnl_lock() */
153 		timeout -= 10;
154 	} while (timeout);
155 
156 	if (!timeout)
157 		return -EBUSY;
158 
159 	/* CBD may writeback data, feedback up level */
160 	*cbd = *dest_cbd;
161 
162 	enetc_clean_cbdr(ring);
163 
164 	return 0;
165 }
166 EXPORT_SYMBOL_GPL(enetc_send_cmd);
167 
enetc_clear_mac_flt_entry(struct enetc_si * si,int index)168 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
169 {
170 	struct enetc_cbd cbd;
171 
172 	memset(&cbd, 0, sizeof(cbd));
173 
174 	cbd.cls = 1;
175 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
176 	cbd.index = cpu_to_le16(index);
177 
178 	return enetc_send_cmd(si, &cbd);
179 }
180 EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
181 
enetc_set_mac_flt_entry(struct enetc_si * si,int index,char * mac_addr,int si_map)182 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
183 			    char *mac_addr, int si_map)
184 {
185 	struct enetc_cbd cbd;
186 	u32 upper;
187 	u16 lower;
188 
189 	memset(&cbd, 0, sizeof(cbd));
190 
191 	/* fill up the "set" descriptor */
192 	cbd.cls = 1;
193 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
194 	cbd.index = cpu_to_le16(index);
195 	cbd.opt[3] = cpu_to_le32(si_map);
196 	/* enable entry */
197 	cbd.opt[0] = cpu_to_le32(BIT(31));
198 
199 	upper = *(const u32 *)mac_addr;
200 	lower = *(const u16 *)(mac_addr + 4);
201 	cbd.addr[0] = cpu_to_le32(upper);
202 	cbd.addr[1] = cpu_to_le32(lower);
203 
204 	return enetc_send_cmd(si, &cbd);
205 }
206 EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
207 
208 /* Set entry in RFS table */
enetc_set_fs_entry(struct enetc_si * si,struct enetc_cmd_rfse * rfse,int index)209 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
210 		       int index)
211 {
212 	struct enetc_cbdr *ring = &si->cbd_ring;
213 	struct enetc_cbd cbd = {.cmd = 0};
214 	void *tmp, *tmp_align;
215 	dma_addr_t dma;
216 	int err;
217 
218 	/* fill up the "set" descriptor */
219 	cbd.cmd = 0;
220 	cbd.cls = 4;
221 	cbd.index = cpu_to_le16(index);
222 	cbd.opt[3] = cpu_to_le32(0); /* SI */
223 
224 	tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
225 				       &dma, &tmp_align);
226 	if (!tmp)
227 		return -ENOMEM;
228 
229 	memcpy(tmp_align, rfse, sizeof(*rfse));
230 
231 	err = enetc_send_cmd(si, &cbd);
232 	if (err)
233 		dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
234 
235 	enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
236 
237 	return err;
238 }
239 EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
240 
enetc_cmd_rss_table(struct enetc_si * si,u32 * table,int count,bool read)241 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
242 			       bool read)
243 {
244 	struct enetc_cbdr *ring = &si->cbd_ring;
245 	struct enetc_cbd cbd = {.cmd = 0};
246 	u8 *tmp, *tmp_align;
247 	dma_addr_t dma;
248 	int err, i;
249 
250 	if (count < ENETC_CBD_DATA_MEM_ALIGN)
251 		/* HW only takes in a full 64 entry table */
252 		return -EINVAL;
253 
254 	tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
255 				       &dma, (void *)&tmp_align);
256 	if (!tmp)
257 		return -ENOMEM;
258 
259 	if (!read)
260 		for (i = 0; i < count; i++)
261 			tmp_align[i] = (u8)(table[i]);
262 
263 	/* fill up the descriptor */
264 	cbd.cmd = read ? 2 : 1;
265 	cbd.cls = 3;
266 
267 	err = enetc_send_cmd(si, &cbd);
268 	if (err)
269 		dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
270 
271 	if (read)
272 		for (i = 0; i < count; i++)
273 			table[i] = tmp_align[i];
274 
275 	enetc_cbd_free_data_mem(si, count, tmp, &dma);
276 
277 	return err;
278 }
279 
280 /* Get RSS table */
enetc_get_rss_table(struct enetc_si * si,u32 * table,int count)281 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
282 {
283 	return enetc_cmd_rss_table(si, table, count, true);
284 }
285 EXPORT_SYMBOL_GPL(enetc_get_rss_table);
286 
287 /* Set RSS table */
enetc_set_rss_table(struct enetc_si * si,const u32 * table,int count)288 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
289 {
290 	return enetc_cmd_rss_table(si, (u32 *)table, count, false);
291 }
292 EXPORT_SYMBOL_GPL(enetc_set_rss_table);
293 
enetc4_get_rss_table(struct enetc_si * si,u32 * table,int count)294 int enetc4_get_rss_table(struct enetc_si *si, u32 *table, int count)
295 {
296 	return ntmp_rsst_query_entry(&si->ntmp_user, table, count);
297 }
298 EXPORT_SYMBOL_GPL(enetc4_get_rss_table);
299 
enetc4_set_rss_table(struct enetc_si * si,const u32 * table,int count)300 int enetc4_set_rss_table(struct enetc_si *si, const u32 *table, int count)
301 {
302 	return ntmp_rsst_update_entry(&si->ntmp_user, table, count);
303 }
304 EXPORT_SYMBOL_GPL(enetc4_set_rss_table);
305