xref: /linux/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3 
4 #include "enetc.h"
5 
6 static void enetc_clean_cbdr(struct enetc_si *si)
7 {
8 	struct enetc_cbdr *ring = &si->cbd_ring;
9 	struct enetc_cbd *dest_cbd;
10 	int i, status;
11 
12 	i = ring->next_to_clean;
13 
14 	while (enetc_rd_reg(ring->cir) != i) {
15 		dest_cbd = ENETC_CBD(*ring, i);
16 		status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
17 		if (status)
18 			dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n",
19 				 status, dest_cbd->cmd);
20 
21 		memset(dest_cbd, 0, sizeof(*dest_cbd));
22 
23 		i = (i + 1) % ring->bd_count;
24 	}
25 
26 	ring->next_to_clean = i;
27 }
28 
29 static int enetc_cbd_unused(struct enetc_cbdr *r)
30 {
31 	return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
32 		r->bd_count;
33 }
34 
35 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
36 {
37 	struct enetc_cbdr *ring = &si->cbd_ring;
38 	int timeout = ENETC_CBDR_TIMEOUT;
39 	struct enetc_cbd *dest_cbd;
40 	int i;
41 
42 	if (unlikely(!ring->bd_base))
43 		return -EIO;
44 
45 	if (unlikely(!enetc_cbd_unused(ring)))
46 		enetc_clean_cbdr(si);
47 
48 	i = ring->next_to_use;
49 	dest_cbd = ENETC_CBD(*ring, i);
50 
51 	/* copy command to the ring */
52 	*dest_cbd = *cbd;
53 	i = (i + 1) % ring->bd_count;
54 
55 	ring->next_to_use = i;
56 	/* let H/W know BD ring has been updated */
57 	enetc_wr_reg(ring->pir, i);
58 
59 	do {
60 		if (enetc_rd_reg(ring->cir) == i)
61 			break;
62 		udelay(10); /* cannot sleep, rtnl_lock() */
63 		timeout -= 10;
64 	} while (timeout);
65 
66 	if (!timeout)
67 		return -EBUSY;
68 
69 	/* CBD may writeback data, feedback up level */
70 	*cbd = *dest_cbd;
71 
72 	enetc_clean_cbdr(si);
73 
74 	return 0;
75 }
76 
77 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
78 {
79 	struct enetc_cbd cbd;
80 
81 	memset(&cbd, 0, sizeof(cbd));
82 
83 	cbd.cls = 1;
84 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
85 	cbd.index = cpu_to_le16(index);
86 
87 	return enetc_send_cmd(si, &cbd);
88 }
89 
90 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
91 			    char *mac_addr, int si_map)
92 {
93 	struct enetc_cbd cbd;
94 	u32 upper;
95 	u16 lower;
96 
97 	memset(&cbd, 0, sizeof(cbd));
98 
99 	/* fill up the "set" descriptor */
100 	cbd.cls = 1;
101 	cbd.status_flags = ENETC_CBD_FLAGS_SF;
102 	cbd.index = cpu_to_le16(index);
103 	cbd.opt[3] = cpu_to_le32(si_map);
104 	/* enable entry */
105 	cbd.opt[0] = cpu_to_le32(BIT(31));
106 
107 	upper = *(const u32 *)mac_addr;
108 	lower = *(const u16 *)(mac_addr + 4);
109 	cbd.addr[0] = cpu_to_le32(upper);
110 	cbd.addr[1] = cpu_to_le32(lower);
111 
112 	return enetc_send_cmd(si, &cbd);
113 }
114 
115 #define RFSE_ALIGN	64
116 /* Set entry in RFS table */
117 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
118 		       int index)
119 {
120 	struct enetc_cbd cbd = {.cmd = 0};
121 	dma_addr_t dma, dma_align;
122 	void *tmp, *tmp_align;
123 	int err;
124 
125 	/* fill up the "set" descriptor */
126 	cbd.cmd = 0;
127 	cbd.cls = 4;
128 	cbd.index = cpu_to_le16(index);
129 	cbd.length = cpu_to_le16(sizeof(*rfse));
130 	cbd.opt[3] = cpu_to_le32(0); /* SI */
131 
132 	tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
133 				 &dma, GFP_KERNEL);
134 	if (!tmp) {
135 		dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n");
136 		return -ENOMEM;
137 	}
138 
139 	dma_align = ALIGN(dma, RFSE_ALIGN);
140 	tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
141 	memcpy(tmp_align, rfse, sizeof(*rfse));
142 
143 	cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
144 	cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
145 
146 	err = enetc_send_cmd(si, &cbd);
147 	if (err)
148 		dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err);
149 
150 	dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN,
151 			  tmp, dma);
152 
153 	return err;
154 }
155 
156 #define RSSE_ALIGN	64
157 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
158 			       bool read)
159 {
160 	struct enetc_cbd cbd = {.cmd = 0};
161 	dma_addr_t dma, dma_align;
162 	u8 *tmp, *tmp_align;
163 	int err, i;
164 
165 	if (count < RSSE_ALIGN)
166 		/* HW only takes in a full 64 entry table */
167 		return -EINVAL;
168 
169 	tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN,
170 				 &dma, GFP_KERNEL);
171 	if (!tmp) {
172 		dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n");
173 		return -ENOMEM;
174 	}
175 	dma_align = ALIGN(dma, RSSE_ALIGN);
176 	tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
177 
178 	if (!read)
179 		for (i = 0; i < count; i++)
180 			tmp_align[i] = (u8)(table[i]);
181 
182 	/* fill up the descriptor */
183 	cbd.cmd = read ? 2 : 1;
184 	cbd.cls = 3;
185 	cbd.length = cpu_to_le16(count);
186 
187 	cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
188 	cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
189 
190 	err = enetc_send_cmd(si, &cbd);
191 	if (err)
192 		dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err);
193 
194 	if (read)
195 		for (i = 0; i < count; i++)
196 			table[i] = tmp_align[i];
197 
198 	dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma);
199 
200 	return err;
201 }
202 
203 /* Get RSS table */
204 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
205 {
206 	return enetc_cmd_rss_table(si, table, count, true);
207 }
208 
209 /* Set RSS table */
210 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
211 {
212 	return enetc_cmd_rss_table(si, (u32 *)table, count, false);
213 }
214