xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include "sparx5_main_regs.h"
8 #include "sparx5_main.h"
9 
10 static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
11 {
12 	u32 mask[3];
13 
14 	/* Divide up mask in 32 bit words */
15 	bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
16 
17 	/* Output mask to respective registers */
18 	spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
19 	if (is_sparx5(sparx5)) {
20 		spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
21 		spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
22 	}
23 
24 	return 0;
25 }
26 
27 void sparx5_vlan_init(struct sparx5 *sparx5)
28 {
29 	u16 vid;
30 
31 	spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
32 		 ANA_L3_VLAN_CTRL_VLAN_ENA,
33 		 sparx5,
34 		 ANA_L3_VLAN_CTRL);
35 
36 	/* Map VLAN = FID */
37 	for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
38 		spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
39 			 ANA_L3_VLAN_CFG_VLAN_FID,
40 			 sparx5,
41 			 ANA_L3_VLAN_CFG(vid));
42 }
43 
44 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
45 {
46 	struct sparx5_port *port = sparx5->ports[portno];
47 
48 	/* Configure PVID */
49 	spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
50 		 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
51 		 ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
52 		 ANA_CL_VLAN_CTRL_PORT_VID,
53 		 sparx5,
54 		 ANA_CL_VLAN_CTRL(port->portno));
55 }
56 
57 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
58 			bool untagged)
59 {
60 	struct sparx5 *sparx5 = port->sparx5;
61 	int ret;
62 
63 	/* Untagged egress vlan classification */
64 	if (untagged && port->vid != vid) {
65 		if (port->vid) {
66 			netdev_err(port->ndev,
67 				   "Port already has a native VLAN: %d\n",
68 				   port->vid);
69 			return -EBUSY;
70 		}
71 		port->vid = vid;
72 	}
73 
74 	/* Make the port a member of the VLAN */
75 	set_bit(port->portno, sparx5->vlan_mask[vid]);
76 	ret = sparx5_vlant_set_mask(sparx5, vid);
77 	if (ret)
78 		return ret;
79 
80 	/* Default ingress vlan classification */
81 	if (pvid)
82 		port->pvid = vid;
83 
84 	sparx5_vlan_port_apply(sparx5, port);
85 
86 	return 0;
87 }
88 
89 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
90 {
91 	struct sparx5 *sparx5 = port->sparx5;
92 	int ret;
93 
94 	/* 8021q removes VID 0 on module unload for all interfaces
95 	 * with VLAN filtering feature. We need to keep it to receive
96 	 * untagged traffic.
97 	 */
98 	if (vid == 0)
99 		return 0;
100 
101 	/* Stop the port from being a member of the vlan */
102 	clear_bit(port->portno, sparx5->vlan_mask[vid]);
103 	ret = sparx5_vlant_set_mask(sparx5, vid);
104 	if (ret)
105 		return ret;
106 
107 	/* Ingress */
108 	if (port->pvid == vid)
109 		port->pvid = 0;
110 
111 	/* Egress */
112 	if (port->vid == vid)
113 		port->vid = 0;
114 
115 	sparx5_vlan_port_apply(sparx5, port);
116 
117 	return 0;
118 }
119 
120 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
121 {
122 	struct sparx5 *sparx5 = port->sparx5;
123 	u32 val, mask;
124 
125 	/* mask is spread across 3 registers x 32 bit */
126 	if (port->portno < 32) {
127 		mask = BIT(port->portno);
128 		val = enable ? mask : 0;
129 		spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
130 	} else if (port->portno < 64) {
131 		mask = BIT(port->portno - 32);
132 		val = enable ? mask : 0;
133 		spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
134 	} else if (port->portno < SPX5_PORTS) {
135 		mask = BIT(port->portno - 64);
136 		val = enable ? mask : 0;
137 		spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
138 	} else {
139 		netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
140 	}
141 }
142 
143 void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
144 {
145 	spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
146 	if (is_sparx5(spx5)) {
147 		spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
148 		spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
149 	}
150 }
151 
152 void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
153 {
154 	portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
155 	if (is_sparx5(spx5)) {
156 		portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
157 		portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
158 	}
159 }
160 
161 void sparx5_update_fwd(struct sparx5 *sparx5)
162 {
163 	DECLARE_BITMAP(workmask, SPX5_PORTS);
164 	u32 mask[3];
165 	int port;
166 
167 	/* Divide up fwd mask in 32 bit words */
168 	bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
169 
170 	/* Update flood masks */
171 	for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
172 	     port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) {
173 		spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
174 		if (is_sparx5(sparx5)) {
175 			spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
176 			spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
177 		}
178 	}
179 
180 	/* Update SRC masks */
181 	for (port = 0; port < sparx5->data->consts->n_ports; port++) {
182 		if (test_bit(port, sparx5->bridge_fwd_mask)) {
183 			/* Allow to send to all bridged but self */
184 			bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
185 			clear_bit(port, workmask);
186 			bitmap_to_arr32(mask, workmask, SPX5_PORTS);
187 			spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
188 			if (is_sparx5(sparx5)) {
189 				spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
190 				spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
191 			}
192 		} else {
193 			spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
194 			if (is_sparx5(sparx5)) {
195 				spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
196 				spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
197 			}
198 		}
199 	}
200 
201 	/* Learning enabled only for bridged ports */
202 	bitmap_and(workmask, sparx5->bridge_fwd_mask,
203 		   sparx5->bridge_lrn_mask, SPX5_PORTS);
204 	bitmap_to_arr32(mask, workmask, SPX5_PORTS);
205 
206 	/* Apply learning mask */
207 	spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
208 	if (is_sparx5(sparx5)) {
209 		spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
210 		spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
211 	}
212 }
213 
214 void sparx5_vlan_port_apply(struct sparx5 *sparx5,
215 			    struct sparx5_port *port)
216 
217 {
218 	u32 val;
219 
220 	/* Configure PVID, vlan aware */
221 	val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
222 		ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
223 		ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
224 	spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
225 
226 	val = 0;
227 	if (port->vlan_aware && !port->pvid)
228 		/* If port is vlan-aware and tagged, drop untagged and
229 		 * priority tagged frames.
230 		 */
231 		val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
232 			ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
233 			ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
234 	spx5_wr(val, sparx5,
235 		ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
236 
237 	/* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
238 	val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
239 	if (port->vlan_aware) {
240 		if (port->vid)
241 			/* Tag all frames except when VID == DEFAULT_VLAN */
242 			val |= REW_TAG_CTRL_TAG_CFG_SET(1);
243 		else
244 			val |= REW_TAG_CTRL_TAG_CFG_SET(3);
245 	}
246 	spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
247 
248 	/* Egress VID */
249 	spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
250 		 REW_PORT_VLAN_CFG_PORT_VID,
251 		 sparx5,
252 		 REW_PORT_VLAN_CFG(port->portno));
253 }
254