xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c (revision 9406b485dea5e25bed7c81cd822747d494cc8bde)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/flow_offload.h>
8 
9 #include "spectrum.h"
10 #include "spectrum_span.h"
11 #include "reg.h"
12 
13 enum mlxsw_sp_mall_action_type {
14 	MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
15 	MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
16 };
17 
18 struct mlxsw_sp_mall_mirror_entry {
19 	const struct net_device *to_dev;
20 	int span_id;
21 };
22 
23 struct mlxsw_sp_mall_entry {
24 	struct list_head list;
25 	unsigned long cookie;
26 	enum mlxsw_sp_mall_action_type type;
27 	bool ingress;
28 	union {
29 		struct mlxsw_sp_mall_mirror_entry mirror;
30 		struct mlxsw_sp_port_sample sample;
31 	};
32 	struct rcu_head rcu;
33 };
34 
35 static struct mlxsw_sp_mall_entry *
36 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
37 {
38 	struct mlxsw_sp_mall_entry *mall_entry;
39 
40 	list_for_each_entry(mall_entry, &block->mall_list, list)
41 		if (mall_entry->cookie == cookie)
42 			return mall_entry;
43 
44 	return NULL;
45 }
46 
47 static int
48 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
49 			      struct mlxsw_sp_mall_entry *mall_entry)
50 {
51 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
52 	struct mlxsw_sp_span_trigger_parms parms;
53 	enum mlxsw_sp_span_trigger trigger;
54 	int err;
55 
56 	if (!mall_entry->mirror.to_dev) {
57 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
58 		return -EINVAL;
59 	}
60 
61 	err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
62 				      &mall_entry->mirror.span_id);
63 	if (err)
64 		return err;
65 
66 	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
67 					      mall_entry->ingress);
68 	if (err)
69 		goto err_analyzed_port_get;
70 
71 	trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
72 					MLXSW_SP_SPAN_TRIGGER_EGRESS;
73 	parms.span_id = mall_entry->mirror.span_id;
74 	err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
75 				       &parms);
76 	if (err)
77 		goto err_agent_bind;
78 
79 	return 0;
80 
81 err_agent_bind:
82 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
83 err_analyzed_port_get:
84 	mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
85 	return err;
86 }
87 
88 static void
89 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
90 			      struct mlxsw_sp_mall_entry *mall_entry)
91 {
92 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
93 	struct mlxsw_sp_span_trigger_parms parms;
94 	enum mlxsw_sp_span_trigger trigger;
95 
96 	trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
97 					MLXSW_SP_SPAN_TRIGGER_EGRESS;
98 	parms.span_id = mall_entry->mirror.span_id;
99 	mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
100 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
101 	mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
102 }
103 
104 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
105 					 bool enable, u32 rate)
106 {
107 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
108 	char mpsc_pl[MLXSW_REG_MPSC_LEN];
109 
110 	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
111 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
112 }
113 
114 static int
115 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
116 			      struct mlxsw_sp_mall_entry *mall_entry)
117 {
118 	int err;
119 
120 	if (rtnl_dereference(mlxsw_sp_port->sample)) {
121 		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
122 		return -EEXIST;
123 	}
124 	rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
125 
126 	err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
127 					    mall_entry->sample.rate);
128 	if (err)
129 		goto err_port_sample_set;
130 	return 0;
131 
132 err_port_sample_set:
133 	RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
134 	return err;
135 }
136 
137 static void
138 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
139 {
140 	if (!mlxsw_sp_port->sample)
141 		return;
142 
143 	mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
144 	RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
145 }
146 
147 static int
148 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
149 			    struct mlxsw_sp_mall_entry *mall_entry)
150 {
151 	switch (mall_entry->type) {
152 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
153 		return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
154 	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
155 		return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
156 	default:
157 		WARN_ON(1);
158 		return -EINVAL;
159 	}
160 }
161 
162 static void
163 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
164 			    struct mlxsw_sp_mall_entry *mall_entry)
165 {
166 	switch (mall_entry->type) {
167 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
168 		mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
169 		break;
170 	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
171 		mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
172 		break;
173 	default:
174 		WARN_ON(1);
175 	}
176 }
177 
178 int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
179 			  struct tc_cls_matchall_offload *f)
180 {
181 	struct mlxsw_sp_flow_block_binding *binding;
182 	struct mlxsw_sp_mall_entry *mall_entry;
183 	__be16 protocol = f->common.protocol;
184 	struct flow_action_entry *act;
185 	int err;
186 
187 	if (!flow_offload_has_one_action(&f->rule->action)) {
188 		NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
189 		return -EOPNOTSUPP;
190 	}
191 
192 	if (f->common.chain_index) {
193 		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
194 		return -EOPNOTSUPP;
195 	}
196 
197 	if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
198 		NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
199 		return -EOPNOTSUPP;
200 	}
201 
202 	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
203 	if (!mall_entry)
204 		return -ENOMEM;
205 	mall_entry->cookie = f->cookie;
206 	mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
207 
208 	act = &f->rule->action.entries[0];
209 
210 	if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
211 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
212 		mall_entry->mirror.to_dev = act->dev;
213 	} else if (act->id == FLOW_ACTION_SAMPLE &&
214 		   protocol == htons(ETH_P_ALL)) {
215 		if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
216 			NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
217 			err = -EOPNOTSUPP;
218 			goto errout;
219 		}
220 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
221 		mall_entry->sample.psample_group = act->sample.psample_group;
222 		mall_entry->sample.truncate = act->sample.truncate;
223 		mall_entry->sample.trunc_size = act->sample.trunc_size;
224 		mall_entry->sample.rate = act->sample.rate;
225 	} else {
226 		err = -EOPNOTSUPP;
227 		goto errout;
228 	}
229 
230 	list_for_each_entry(binding, &block->binding_list, list) {
231 		err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
232 						  mall_entry);
233 		if (err)
234 			goto rollback;
235 	}
236 
237 	block->rule_count++;
238 	if (mall_entry->ingress)
239 		block->egress_blocker_rule_count++;
240 	else
241 		block->ingress_blocker_rule_count++;
242 	list_add_tail(&mall_entry->list, &block->mall_list);
243 	return 0;
244 
245 rollback:
246 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
247 					     list)
248 		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
249 errout:
250 	kfree(mall_entry);
251 	return err;
252 }
253 
254 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
255 			   struct tc_cls_matchall_offload *f)
256 {
257 	struct mlxsw_sp_flow_block_binding *binding;
258 	struct mlxsw_sp_mall_entry *mall_entry;
259 
260 	mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
261 	if (!mall_entry) {
262 		NL_SET_ERR_MSG(f->common.extack, "Entry not found");
263 		return;
264 	}
265 
266 	list_del(&mall_entry->list);
267 	if (mall_entry->ingress)
268 		block->egress_blocker_rule_count--;
269 	else
270 		block->ingress_blocker_rule_count--;
271 	block->rule_count--;
272 	list_for_each_entry(binding, &block->binding_list, list)
273 		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
274 	kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
275 }
276 
277 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
278 			    struct mlxsw_sp_port *mlxsw_sp_port)
279 {
280 	struct mlxsw_sp_mall_entry *mall_entry;
281 	int err;
282 
283 	list_for_each_entry(mall_entry, &block->mall_list, list) {
284 		err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
285 		if (err)
286 			goto rollback;
287 	}
288 	return 0;
289 
290 rollback:
291 	list_for_each_entry_continue_reverse(mall_entry, &block->mall_list,
292 					     list)
293 		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
294 	return err;
295 }
296 
297 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
298 			       struct mlxsw_sp_port *mlxsw_sp_port)
299 {
300 	struct mlxsw_sp_mall_entry *mall_entry;
301 
302 	list_for_each_entry(mall_entry, &block->mall_list, list)
303 		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
304 }
305