xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c (revision 40e79150c1686263e6a031d7702aec63aff31332)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/flow_offload.h>
8 
9 #include "spectrum.h"
10 #include "spectrum_span.h"
11 #include "reg.h"
12 
13 enum mlxsw_sp_mall_action_type {
14 	MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
15 	MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
16 };
17 
18 struct mlxsw_sp_mall_mirror_entry {
19 	const struct net_device *to_dev;
20 	int span_id;
21 };
22 
23 struct mlxsw_sp_mall_entry {
24 	struct list_head list;
25 	unsigned long cookie;
26 	enum mlxsw_sp_mall_action_type type;
27 	bool ingress;
28 	union {
29 		struct mlxsw_sp_mall_mirror_entry mirror;
30 		struct mlxsw_sp_port_sample sample;
31 	};
32 	struct rcu_head rcu;
33 };
34 
35 static struct mlxsw_sp_mall_entry *
36 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
37 {
38 	struct mlxsw_sp_mall_entry *mall_entry;
39 
40 	list_for_each_entry(mall_entry, &block->mall_list, list)
41 		if (mall_entry->cookie == cookie)
42 			return mall_entry;
43 
44 	return NULL;
45 }
46 
47 static int
48 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
49 			      struct mlxsw_sp_mall_entry *mall_entry)
50 {
51 	enum mlxsw_sp_span_type span_type;
52 
53 	if (!mall_entry->mirror.to_dev) {
54 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
55 		return -EINVAL;
56 	}
57 
58 	span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
59 					  MLXSW_SP_SPAN_EGRESS;
60 	return mlxsw_sp_span_mirror_add(mlxsw_sp_port,
61 					mall_entry->mirror.to_dev,
62 					span_type, true,
63 					&mall_entry->mirror.span_id);
64 }
65 
66 static void
67 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
68 			      struct mlxsw_sp_mall_entry *mall_entry)
69 {
70 	enum mlxsw_sp_span_type span_type;
71 
72 	span_type = mall_entry->ingress ? MLXSW_SP_SPAN_INGRESS :
73 					  MLXSW_SP_SPAN_EGRESS;
74 	mlxsw_sp_span_mirror_del(mlxsw_sp_port, mall_entry->mirror.span_id,
75 				 span_type, true);
76 }
77 
78 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
79 					 bool enable, u32 rate)
80 {
81 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
82 	char mpsc_pl[MLXSW_REG_MPSC_LEN];
83 
84 	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
85 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
86 }
87 
88 static int
89 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
90 			      struct mlxsw_sp_mall_entry *mall_entry)
91 {
92 	int err;
93 
94 	if (rtnl_dereference(mlxsw_sp_port->sample)) {
95 		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
96 		return -EEXIST;
97 	}
98 	rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
99 
100 	err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
101 					    mall_entry->sample.rate);
102 	if (err)
103 		goto err_port_sample_set;
104 	return 0;
105 
106 err_port_sample_set:
107 	RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
108 	return err;
109 }
110 
111 static void
112 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
113 {
114 	if (!mlxsw_sp_port->sample)
115 		return;
116 
117 	mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
118 	RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
119 }
120 
121 static int
122 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
123 			    struct mlxsw_sp_mall_entry *mall_entry)
124 {
125 	switch (mall_entry->type) {
126 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
127 		return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
128 	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
129 		return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
130 	default:
131 		WARN_ON(1);
132 		return -EINVAL;
133 	}
134 }
135 
136 static void
137 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
138 			    struct mlxsw_sp_mall_entry *mall_entry)
139 {
140 	switch (mall_entry->type) {
141 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
142 		mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
143 		break;
144 	case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
145 		mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
146 		break;
147 	default:
148 		WARN_ON(1);
149 	}
150 }
151 
152 int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
153 			  struct tc_cls_matchall_offload *f)
154 {
155 	struct mlxsw_sp_flow_block_binding *binding;
156 	struct mlxsw_sp_mall_entry *mall_entry;
157 	__be16 protocol = f->common.protocol;
158 	struct flow_action_entry *act;
159 	int err;
160 
161 	if (!flow_offload_has_one_action(&f->rule->action)) {
162 		NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
163 		return -EOPNOTSUPP;
164 	}
165 
166 	if (f->common.chain_index) {
167 		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
168 		return -EOPNOTSUPP;
169 	}
170 
171 	if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
172 		NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
173 		return -EOPNOTSUPP;
174 	}
175 
176 	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
177 	if (!mall_entry)
178 		return -ENOMEM;
179 	mall_entry->cookie = f->cookie;
180 	mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
181 
182 	act = &f->rule->action.entries[0];
183 
184 	if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
185 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
186 		mall_entry->mirror.to_dev = act->dev;
187 	} else if (act->id == FLOW_ACTION_SAMPLE &&
188 		   protocol == htons(ETH_P_ALL)) {
189 		if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
190 			NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
191 			err = -EOPNOTSUPP;
192 			goto errout;
193 		}
194 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
195 		mall_entry->sample.psample_group = act->sample.psample_group;
196 		mall_entry->sample.truncate = act->sample.truncate;
197 		mall_entry->sample.trunc_size = act->sample.trunc_size;
198 		mall_entry->sample.rate = act->sample.rate;
199 	} else {
200 		err = -EOPNOTSUPP;
201 		goto errout;
202 	}
203 
204 	list_for_each_entry(binding, &block->binding_list, list) {
205 		err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
206 						  mall_entry);
207 		if (err)
208 			goto rollback;
209 	}
210 
211 	block->rule_count++;
212 	if (mall_entry->ingress)
213 		block->egress_blocker_rule_count++;
214 	else
215 		block->ingress_blocker_rule_count++;
216 	list_add_tail(&mall_entry->list, &block->mall_list);
217 	return 0;
218 
219 rollback:
220 	list_for_each_entry_continue_reverse(binding, &block->binding_list,
221 					     list)
222 		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
223 errout:
224 	kfree(mall_entry);
225 	return err;
226 }
227 
228 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
229 			   struct tc_cls_matchall_offload *f)
230 {
231 	struct mlxsw_sp_flow_block_binding *binding;
232 	struct mlxsw_sp_mall_entry *mall_entry;
233 
234 	mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
235 	if (!mall_entry) {
236 		NL_SET_ERR_MSG(f->common.extack, "Entry not found");
237 		return;
238 	}
239 
240 	list_del(&mall_entry->list);
241 	if (mall_entry->ingress)
242 		block->egress_blocker_rule_count--;
243 	else
244 		block->ingress_blocker_rule_count--;
245 	block->rule_count--;
246 	list_for_each_entry(binding, &block->binding_list, list)
247 		mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
248 	kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
249 }
250 
251 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
252 			    struct mlxsw_sp_port *mlxsw_sp_port)
253 {
254 	struct mlxsw_sp_mall_entry *mall_entry;
255 	int err;
256 
257 	list_for_each_entry(mall_entry, &block->mall_list, list) {
258 		err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
259 		if (err)
260 			goto rollback;
261 	}
262 	return 0;
263 
264 rollback:
265 	list_for_each_entry_continue_reverse(mall_entry, &block->mall_list,
266 					     list)
267 		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
268 	return err;
269 }
270 
271 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
272 			       struct mlxsw_sp_port *mlxsw_sp_port)
273 {
274 	struct mlxsw_sp_mall_entry *mall_entry;
275 
276 	list_for_each_entry(mall_entry, &block->mall_list, list)
277 		mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
278 }
279