xref: /linux/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3 
4 #include "enetc.h"
5 
enetc_setup_cbdr(struct device * dev,struct enetc_hw * hw,int bd_count,struct enetc_cbdr * cbdr)6 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
7 		     struct enetc_cbdr *cbdr)
8 {
9 	int size = bd_count * sizeof(struct enetc_cbd);
10 
11 	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
12 					   GFP_KERNEL);
13 	if (!cbdr->bd_base)
14 		return -ENOMEM;
15 
16 	/* h/w requires 128B alignment */
17 	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
18 		dma_free_coherent(dev, size, cbdr->bd_base,
19 				  cbdr->bd_dma_base);
20 		return -EINVAL;
21 	}
22 
23 	cbdr->next_to_clean = 0;
24 	cbdr->next_to_use = 0;
25 	cbdr->dma_dev = dev;
26 	cbdr->bd_count = bd_count;
27 
28 	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
29 	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
30 	cbdr->mr = hw->reg + ENETC_SICBDRMR;
31 
32 	/* set CBDR cache attributes */
33 	enetc_wr(hw, ENETC_SICAR2,
34 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
35 
36 	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
37 	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
38 	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
39 
40 	enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
41 	enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
42 	/* enable ring */
43 	enetc_wr_reg(cbdr->mr, BIT(31));
44 
45 	return 0;
46 }
47 EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
48 
enetc_teardown_cbdr(struct enetc_cbdr * cbdr)49 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
50 {
51 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
52 
53 	/* disable ring */
54 	enetc_wr_reg(cbdr->mr, 0);
55 
56 	dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
57 			  cbdr->bd_dma_base);
58 	cbdr->bd_base = NULL;
59 	cbdr->dma_dev = NULL;
60 }
61 EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
62 
enetc_clean_cbdr(struct enetc_cbdr * ring)63 static void enetc_clean_cbdr(struct enetc_cbdr *ring)
64 {
65 	struct enetc_cbd *dest_cbd;
66 	int i, status;
67 
68 	i = ring->next_to_clean;
69 
70 	while (enetc_rd_reg(ring->cir) != i) {
71 		dest_cbd = ENETC_CBD(*ring, i);
72 		status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
73 		if (status)
74 			dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
75 				 status, dest_cbd->cmd);
76 
77 		memset(dest_cbd, 0, sizeof(*dest_cbd));
78 
79 		i = (i + 1) % ring->bd_count;
80 	}
81 
82 	ring->next_to_clean = i;
83 }
84 
enetc_cbd_unused(struct enetc_cbdr * r)85 static int enetc_cbd_unused(struct enetc_cbdr *r)
86 {
87 	return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
88 		r->bd_count;
89 }
90 
enetc_send_cmd(struct enetc_si * si,struct enetc_cbd * cbd)91 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
92 {
93 	struct enetc_cbdr *ring = &si->cbd_ring;
94 	int timeout = ENETC_CBDR_TIMEOUT;
95 	struct enetc_cbd *dest_cbd;
96 	int i;
97 
98 	if (unlikely(!ring->bd_base))
99 		return -EIO;
100 
101 	if (unlikely(!enetc_cbd_unused(ring)))
102 		enetc_clean_cbdr(ring);
103 
104 	i = ring->next_to_use;
105 	dest_cbd = ENETC_CBD(*ring, i);
106 
107 	/* copy command to the ring */
108 	*dest_cbd = *cbd;
109 	i = (i + 1) % ring->bd_count;
110 
111 	ring->next_to_use = i;
112 	/* let H/W know BD ring has been updated */
113 	enetc_wr_reg(ring->pir, i);
114 
115 	do {
116 		if (enetc_rd_reg(ring->cir) == i)
117 			break;
118 		udelay(10); /* cannot sleep, rtnl_lock() */
119 		timeout -= 10;
120 	} while (timeout);
121 
122 	if (!timeout)
123 		return -EBUSY;
124 
125 	/* CBD may writeback data, feedback up level */
126 	*cbd = *dest_cbd;
127 
128 	enetc_clean_cbdr(ring);
129 
130 	return 0;
131 }
132 EXPORT_SYMBOL_GPL(enetc_send_cmd);
133 
enetc_clear_mac_flt_entry(struct enetc_si * si,int index)134 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
135 {
136 	struct enetc_cbd cbd;
137 
138 	memset(&cbd, 0, sizeof(cbd));
139 
140 	cbd.cls = 1;
141 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
142 	cbd.index = cpu_to_le16(index);
143 
144 	return enetc_send_cmd(si, &cbd);
145 }
146 EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
147 
enetc_set_mac_flt_entry(struct enetc_si * si,int index,char * mac_addr,int si_map)148 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
149 			    char *mac_addr, int si_map)
150 {
151 	struct enetc_cbd cbd;
152 	u32 upper;
153 	u16 lower;
154 
155 	memset(&cbd, 0, sizeof(cbd));
156 
157 	/* fill up the "set" descriptor */
158 	cbd.cls = 1;
159 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
160 	cbd.index = cpu_to_le16(index);
161 	cbd.opt[3] = cpu_to_le32(si_map);
162 	/* enable entry */
163 	cbd.opt[0] = cpu_to_le32(BIT(31));
164 
165 	upper = *(const u32 *)mac_addr;
166 	lower = *(const u16 *)(mac_addr + 4);
167 	cbd.addr[0] = cpu_to_le32(upper);
168 	cbd.addr[1] = cpu_to_le32(lower);
169 
170 	return enetc_send_cmd(si, &cbd);
171 }
172 EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
173 
174 /* Set entry in RFS table */
enetc_set_fs_entry(struct enetc_si * si,struct enetc_cmd_rfse * rfse,int index)175 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
176 		       int index)
177 {
178 	struct enetc_cbdr *ring = &si->cbd_ring;
179 	struct enetc_cbd cbd = {.cmd = 0};
180 	void *tmp, *tmp_align;
181 	dma_addr_t dma;
182 	int err;
183 
184 	/* fill up the "set" descriptor */
185 	cbd.cmd = 0;
186 	cbd.cls = 4;
187 	cbd.index = cpu_to_le16(index);
188 	cbd.opt[3] = cpu_to_le32(0); /* SI */
189 
190 	tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
191 				       &dma, &tmp_align);
192 	if (!tmp)
193 		return -ENOMEM;
194 
195 	memcpy(tmp_align, rfse, sizeof(*rfse));
196 
197 	err = enetc_send_cmd(si, &cbd);
198 	if (err)
199 		dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
200 
201 	enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
202 
203 	return err;
204 }
205 EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
206 
enetc_cmd_rss_table(struct enetc_si * si,u32 * table,int count,bool read)207 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
208 			       bool read)
209 {
210 	struct enetc_cbdr *ring = &si->cbd_ring;
211 	struct enetc_cbd cbd = {.cmd = 0};
212 	u8 *tmp, *tmp_align;
213 	dma_addr_t dma;
214 	int err, i;
215 
216 	if (count < ENETC_CBD_DATA_MEM_ALIGN)
217 		/* HW only takes in a full 64 entry table */
218 		return -EINVAL;
219 
220 	tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
221 				       &dma, (void *)&tmp_align);
222 	if (!tmp)
223 		return -ENOMEM;
224 
225 	if (!read)
226 		for (i = 0; i < count; i++)
227 			tmp_align[i] = (u8)(table[i]);
228 
229 	/* fill up the descriptor */
230 	cbd.cmd = read ? 2 : 1;
231 	cbd.cls = 3;
232 
233 	err = enetc_send_cmd(si, &cbd);
234 	if (err)
235 		dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
236 
237 	if (read)
238 		for (i = 0; i < count; i++)
239 			table[i] = tmp_align[i];
240 
241 	enetc_cbd_free_data_mem(si, count, tmp, &dma);
242 
243 	return err;
244 }
245 
246 /* Get RSS table */
enetc_get_rss_table(struct enetc_si * si,u32 * table,int count)247 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
248 {
249 	return enetc_cmd_rss_table(si, table, count, true);
250 }
251 EXPORT_SYMBOL_GPL(enetc_get_rss_table);
252 
253 /* Set RSS table */
enetc_set_rss_table(struct enetc_si * si,const u32 * table,int count)254 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
255 {
256 	return enetc_cmd_rss_table(si, (u32 *)table, count, false);
257 }
258 EXPORT_SYMBOL_GPL(enetc_set_rss_table);
259