1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7 #include "sparx5_main_regs.h"
8 #include "sparx5_main.h"
9
sparx5_vlant_set_mask(struct sparx5 * sparx5,u16 vid)10 static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
11 {
12 u32 mask[3];
13
14 /* Divide up mask in 32 bit words */
15 bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
16
17 /* Output mask to respective registers */
18 spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
19 if (is_sparx5(sparx5)) {
20 spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
21 spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
22 }
23
24 return 0;
25 }
26
sparx5_vlan_init(struct sparx5 * sparx5)27 void sparx5_vlan_init(struct sparx5 *sparx5)
28 {
29 u16 vid;
30
31 spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
32 ANA_L3_VLAN_CTRL_VLAN_ENA,
33 sparx5,
34 ANA_L3_VLAN_CTRL);
35
36 /* Map VLAN = FID */
37 for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
38 spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
39 ANA_L3_VLAN_CFG_VLAN_FID,
40 sparx5,
41 ANA_L3_VLAN_CFG(vid));
42 }
43
sparx5_vlan_port_setup(struct sparx5 * sparx5,int portno)44 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
45 {
46 struct sparx5_port *port = sparx5->ports[portno];
47
48 /* Configure PVID */
49 spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
50 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
51 ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
52 ANA_CL_VLAN_CTRL_PORT_VID,
53 sparx5,
54 ANA_CL_VLAN_CTRL(port->portno));
55 }
56
sparx5_vlan_vid_add(struct sparx5_port * port,u16 vid,bool pvid,bool untagged)57 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
58 bool untagged)
59 {
60 struct sparx5 *sparx5 = port->sparx5;
61 int ret;
62
63 /* Untagged egress vlan classification */
64 if (untagged && port->vid != vid) {
65 if (port->vid) {
66 netdev_err(port->ndev,
67 "Port already has a native VLAN: %d\n",
68 port->vid);
69 return -EBUSY;
70 }
71 port->vid = vid;
72 }
73
74 /* Make the port a member of the VLAN */
75 set_bit(port->portno, sparx5->vlan_mask[vid]);
76 ret = sparx5_vlant_set_mask(sparx5, vid);
77 if (ret)
78 return ret;
79
80 /* Default ingress vlan classification */
81 if (pvid)
82 port->pvid = vid;
83
84 sparx5_vlan_port_apply(sparx5, port);
85
86 return 0;
87 }
88
sparx5_vlan_vid_del(struct sparx5_port * port,u16 vid)89 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
90 {
91 struct sparx5 *sparx5 = port->sparx5;
92 int ret;
93
94 /* 8021q removes VID 0 on module unload for all interfaces
95 * with VLAN filtering feature. We need to keep it to receive
96 * untagged traffic.
97 */
98 if (vid == 0)
99 return 0;
100
101 /* Stop the port from being a member of the vlan */
102 clear_bit(port->portno, sparx5->vlan_mask[vid]);
103 ret = sparx5_vlant_set_mask(sparx5, vid);
104 if (ret)
105 return ret;
106
107 /* Ingress */
108 if (port->pvid == vid)
109 port->pvid = 0;
110
111 /* Egress */
112 if (port->vid == vid)
113 port->vid = 0;
114
115 sparx5_vlan_port_apply(sparx5, port);
116
117 return 0;
118 }
119
sparx5_pgid_update_mask(struct sparx5_port * port,int pgid,bool enable)120 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
121 {
122 struct sparx5 *sparx5 = port->sparx5;
123 u32 val, mask;
124
125 /* mask is spread across 3 registers x 32 bit */
126 if (port->portno < 32) {
127 mask = BIT(port->portno);
128 val = enable ? mask : 0;
129 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
130 } else if (port->portno < 64) {
131 mask = BIT(port->portno - 32);
132 val = enable ? mask : 0;
133 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
134 } else if (port->portno < SPX5_PORTS) {
135 mask = BIT(port->portno - 64);
136 val = enable ? mask : 0;
137 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
138 } else {
139 netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
140 }
141 }
142
sparx5_pgid_clear(struct sparx5 * spx5,int pgid)143 void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
144 {
145 spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
146 if (is_sparx5(spx5)) {
147 spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
148 spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
149 }
150 }
151
sparx5_pgid_read_mask(struct sparx5 * spx5,int pgid,u32 portmask[3])152 void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
153 {
154 portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
155 if (is_sparx5(spx5)) {
156 portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
157 portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
158 }
159 }
160
sparx5_update_fwd(struct sparx5 * sparx5)161 void sparx5_update_fwd(struct sparx5 *sparx5)
162 {
163 DECLARE_BITMAP(workmask, SPX5_PORTS);
164 u32 mask[3];
165 int port;
166
167 /* Divide up fwd mask in 32 bit words */
168 bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
169
170 /* Update SRC masks */
171 for (port = 0; port < sparx5->data->consts->n_ports; port++) {
172 if (test_bit(port, sparx5->bridge_fwd_mask)) {
173 /* Allow to send to all bridged but self */
174 bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
175 clear_bit(port, workmask);
176 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
177 spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
178 if (is_sparx5(sparx5)) {
179 spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
180 spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
181 }
182 } else {
183 spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
184 if (is_sparx5(sparx5)) {
185 spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
186 spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
187 }
188 }
189 }
190
191 /* Learning enabled only for bridged ports */
192 bitmap_and(workmask, sparx5->bridge_fwd_mask,
193 sparx5->bridge_lrn_mask, SPX5_PORTS);
194 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
195
196 /* Apply learning mask */
197 spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
198 if (is_sparx5(sparx5)) {
199 spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
200 spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
201 }
202 }
203
sparx5_vlan_port_apply(struct sparx5 * sparx5,struct sparx5_port * port)204 void sparx5_vlan_port_apply(struct sparx5 *sparx5,
205 struct sparx5_port *port)
206
207 {
208 u32 val;
209
210 /* Configure PVID, vlan aware */
211 val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
212 ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
213 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
214 spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
215
216 val = 0;
217 if (port->vlan_aware && !port->pvid)
218 /* If port is vlan-aware and tagged, drop untagged and
219 * priority tagged frames.
220 */
221 val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
222 ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
223 ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
224 spx5_wr(val, sparx5,
225 ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
226
227 /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
228 val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
229 if (port->vlan_aware) {
230 if (port->vid)
231 /* Tag all frames except when VID == DEFAULT_VLAN */
232 val |= REW_TAG_CTRL_TAG_CFG_SET(1);
233 else
234 val |= REW_TAG_CTRL_TAG_CFG_SET(3);
235 }
236 spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
237
238 /* Egress VID */
239 spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
240 REW_PORT_VLAN_CFG_PORT_VID,
241 sparx5,
242 REW_PORT_VLAN_CFG(port->portno));
243 }
244