xref: /linux/drivers/net/ethernet/netronome/nfp/flower/metadata.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/vmalloc.h>
8 #include <net/pkt_cls.h>
9 
10 #include "cmsg.h"
11 #include "main.h"
12 #include "../nfp_app.h"
13 
14 struct nfp_mask_id_table {
15 	struct hlist_node link;
16 	u32 hash_key;
17 	u32 ref_cnt;
18 	u8 mask_id;
19 };
20 
21 struct nfp_fl_flow_table_cmp_arg {
22 	struct net_device *netdev;
23 	unsigned long cookie;
24 };
25 
26 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
27 {
28 	struct nfp_flower_priv *priv = app->priv;
29 	struct circ_buf *ring;
30 
31 	ring = &priv->stats_ids.free_list;
32 	/* Check if buffer is full. */
33 	if (!CIRC_SPACE(ring->head, ring->tail,
34 			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
35 			NFP_FL_STATS_ELEM_RS + 1))
36 		return -ENOBUFS;
37 
38 	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
39 	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
40 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
41 
42 	return 0;
43 }
44 
45 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
46 {
47 	struct nfp_flower_priv *priv = app->priv;
48 	u32 freed_stats_id, temp_stats_id;
49 	struct circ_buf *ring;
50 
51 	ring = &priv->stats_ids.free_list;
52 	freed_stats_id = priv->stats_ring_size;
53 	/* Check for unallocated entries first. */
54 	if (priv->stats_ids.init_unalloc > 0) {
55 		*stats_context_id = priv->stats_ids.init_unalloc - 1;
56 		priv->stats_ids.init_unalloc--;
57 		return 0;
58 	}
59 
60 	/* Check if buffer is empty. */
61 	if (ring->head == ring->tail) {
62 		*stats_context_id = freed_stats_id;
63 		return -ENOENT;
64 	}
65 
66 	memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
67 	*stats_context_id = temp_stats_id;
68 	memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
69 	ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
70 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
71 
72 	return 0;
73 }
74 
75 /* Must be called with either RTNL or rcu_read_lock */
76 struct nfp_fl_payload *
77 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
78 			   struct net_device *netdev)
79 {
80 	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
81 	struct nfp_flower_priv *priv = app->priv;
82 
83 	flower_cmp_arg.netdev = netdev;
84 	flower_cmp_arg.cookie = tc_flower_cookie;
85 
86 	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
87 				      nfp_flower_table_params);
88 }
89 
90 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
91 {
92 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
93 	struct nfp_flower_priv *priv = app->priv;
94 	struct nfp_fl_stats_frame *stats;
95 	unsigned char *msg;
96 	u32 ctx_id;
97 	int i;
98 
99 	msg = nfp_flower_cmsg_get_data(skb);
100 
101 	spin_lock(&priv->stats_lock);
102 	for (i = 0; i < msg_len / sizeof(*stats); i++) {
103 		stats = (struct nfp_fl_stats_frame *)msg + i;
104 		ctx_id = be32_to_cpu(stats->stats_con_id);
105 		priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
106 		priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
107 		priv->stats[ctx_id].used = jiffies;
108 	}
109 	spin_unlock(&priv->stats_lock);
110 }
111 
112 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
113 {
114 	struct nfp_flower_priv *priv = app->priv;
115 	struct circ_buf *ring;
116 
117 	ring = &priv->mask_ids.mask_id_free_list;
118 	/* Checking if buffer is full. */
119 	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
120 		return -ENOBUFS;
121 
122 	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
123 	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
124 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
125 
126 	priv->mask_ids.last_used[mask_id] = ktime_get();
127 
128 	return 0;
129 }
130 
131 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
132 {
133 	struct nfp_flower_priv *priv = app->priv;
134 	ktime_t reuse_timeout;
135 	struct circ_buf *ring;
136 	u8 temp_id, freed_id;
137 
138 	ring = &priv->mask_ids.mask_id_free_list;
139 	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
140 	/* Checking for unallocated entries first. */
141 	if (priv->mask_ids.init_unallocated > 0) {
142 		*mask_id = priv->mask_ids.init_unallocated;
143 		priv->mask_ids.init_unallocated--;
144 		return 0;
145 	}
146 
147 	/* Checking if buffer is empty. */
148 	if (ring->head == ring->tail)
149 		goto err_not_found;
150 
151 	memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
152 	*mask_id = temp_id;
153 
154 	reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
155 				     NFP_FL_MASK_REUSE_TIME_NS);
156 
157 	if (ktime_before(ktime_get(), reuse_timeout))
158 		goto err_not_found;
159 
160 	memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
161 	ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
162 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
163 
164 	return 0;
165 
166 err_not_found:
167 	*mask_id = freed_id;
168 	return -ENOENT;
169 }
170 
171 static int
172 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
173 {
174 	struct nfp_flower_priv *priv = app->priv;
175 	struct nfp_mask_id_table *mask_entry;
176 	unsigned long hash_key;
177 	u8 mask_id;
178 
179 	if (nfp_mask_alloc(app, &mask_id))
180 		return -ENOENT;
181 
182 	mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
183 	if (!mask_entry) {
184 		nfp_release_mask_id(app, mask_id);
185 		return -ENOMEM;
186 	}
187 
188 	INIT_HLIST_NODE(&mask_entry->link);
189 	mask_entry->mask_id = mask_id;
190 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
191 	mask_entry->hash_key = hash_key;
192 	mask_entry->ref_cnt = 1;
193 	hash_add(priv->mask_table, &mask_entry->link, hash_key);
194 
195 	return mask_id;
196 }
197 
198 static struct nfp_mask_id_table *
199 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
200 {
201 	struct nfp_flower_priv *priv = app->priv;
202 	struct nfp_mask_id_table *mask_entry;
203 	unsigned long hash_key;
204 
205 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
206 
207 	hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
208 		if (mask_entry->hash_key == hash_key)
209 			return mask_entry;
210 
211 	return NULL;
212 }
213 
214 static int
215 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
216 {
217 	struct nfp_mask_id_table *mask_entry;
218 
219 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
220 	if (!mask_entry)
221 		return -ENOENT;
222 
223 	mask_entry->ref_cnt++;
224 
225 	/* Casting u8 to int for later use. */
226 	return mask_entry->mask_id;
227 }
228 
229 static bool
230 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
231 		   u8 *meta_flags, u8 *mask_id)
232 {
233 	int id;
234 
235 	id = nfp_find_in_mask_table(app, mask_data, mask_len);
236 	if (id < 0) {
237 		id = nfp_add_mask_table(app, mask_data, mask_len);
238 		if (id < 0)
239 			return false;
240 		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
241 	}
242 	*mask_id = id;
243 
244 	return true;
245 }
246 
247 static bool
248 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
249 		      u8 *meta_flags, u8 *mask_id)
250 {
251 	struct nfp_mask_id_table *mask_entry;
252 
253 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
254 	if (!mask_entry)
255 		return false;
256 
257 	if (meta_flags)
258 		*meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
259 
260 	*mask_id = mask_entry->mask_id;
261 	mask_entry->ref_cnt--;
262 	if (!mask_entry->ref_cnt) {
263 		hash_del(&mask_entry->link);
264 		nfp_release_mask_id(app, *mask_id);
265 		kfree(mask_entry);
266 		if (meta_flags)
267 			*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
268 	}
269 
270 	return true;
271 }
272 
273 int nfp_compile_flow_metadata(struct nfp_app *app,
274 			      struct tc_cls_flower_offload *flow,
275 			      struct nfp_fl_payload *nfp_flow,
276 			      struct net_device *netdev)
277 {
278 	struct nfp_flower_priv *priv = app->priv;
279 	struct nfp_fl_payload *check_entry;
280 	u8 new_mask_id;
281 	u32 stats_cxt;
282 
283 	if (nfp_get_stats_entry(app, &stats_cxt))
284 		return -ENOENT;
285 
286 	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
287 	nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
288 	nfp_flow->ingress_dev = netdev;
289 
290 	new_mask_id = 0;
291 	if (!nfp_check_mask_add(app, nfp_flow->mask_data,
292 				nfp_flow->meta.mask_len,
293 				&nfp_flow->meta.flags, &new_mask_id)) {
294 		if (nfp_release_stats_entry(app, stats_cxt))
295 			return -EINVAL;
296 		return -ENOENT;
297 	}
298 
299 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
300 	priv->flower_version++;
301 
302 	/* Update flow payload with mask ids. */
303 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
304 	priv->stats[stats_cxt].pkts = 0;
305 	priv->stats[stats_cxt].bytes = 0;
306 	priv->stats[stats_cxt].used = jiffies;
307 
308 	check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
309 	if (check_entry) {
310 		if (nfp_release_stats_entry(app, stats_cxt))
311 			return -EINVAL;
312 
313 		if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
314 					   nfp_flow->meta.mask_len,
315 					   NULL, &new_mask_id))
316 			return -EINVAL;
317 
318 		return -EEXIST;
319 	}
320 
321 	return 0;
322 }
323 
324 int nfp_modify_flow_metadata(struct nfp_app *app,
325 			     struct nfp_fl_payload *nfp_flow)
326 {
327 	struct nfp_flower_priv *priv = app->priv;
328 	u8 new_mask_id = 0;
329 	u32 temp_ctx_id;
330 
331 	nfp_check_mask_remove(app, nfp_flow->mask_data,
332 			      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
333 			      &new_mask_id);
334 
335 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
336 	priv->flower_version++;
337 
338 	/* Update flow payload with mask ids. */
339 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
340 
341 	/* Release the stats ctx id. */
342 	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
343 
344 	return nfp_release_stats_entry(app, temp_ctx_id);
345 }
346 
347 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
348 			    const void *obj)
349 {
350 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
351 	const struct nfp_fl_payload *flow_entry = obj;
352 
353 	if (flow_entry->ingress_dev == cmp_arg->netdev)
354 		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
355 
356 	return 1;
357 }
358 
359 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
360 {
361 	const struct nfp_fl_payload *flower_entry = data;
362 
363 	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
364 		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
365 		      seed);
366 }
367 
368 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
369 {
370 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
371 
372 	return jhash2((u32 *)&cmp_arg->cookie,
373 		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
374 }
375 
376 const struct rhashtable_params nfp_flower_table_params = {
377 	.head_offset		= offsetof(struct nfp_fl_payload, fl_node),
378 	.hashfn			= nfp_fl_key_hashfn,
379 	.obj_cmpfn		= nfp_fl_obj_cmpfn,
380 	.obj_hashfn		= nfp_fl_obj_hashfn,
381 	.automatic_shrinking	= true,
382 };
383 
384 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
385 {
386 	struct nfp_flower_priv *priv = app->priv;
387 	int err;
388 
389 	hash_init(priv->mask_table);
390 
391 	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
392 	if (err)
393 		return err;
394 
395 	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
396 
397 	/* Init ring buffer and unallocated mask_ids. */
398 	priv->mask_ids.mask_id_free_list.buf =
399 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
400 			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
401 	if (!priv->mask_ids.mask_id_free_list.buf)
402 		goto err_free_flow_table;
403 
404 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
405 
406 	/* Init timestamps for mask id*/
407 	priv->mask_ids.last_used =
408 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
409 			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
410 	if (!priv->mask_ids.last_used)
411 		goto err_free_mask_id;
412 
413 	/* Init ring buffer and unallocated stats_ids. */
414 	priv->stats_ids.free_list.buf =
415 		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
416 				   priv->stats_ring_size));
417 	if (!priv->stats_ids.free_list.buf)
418 		goto err_free_last_used;
419 
420 	priv->stats_ids.init_unalloc = host_ctx_count;
421 
422 	priv->stats = kvmalloc_array(priv->stats_ring_size,
423 				     sizeof(struct nfp_fl_stats), GFP_KERNEL);
424 	if (!priv->stats)
425 		goto err_free_ring_buf;
426 
427 	spin_lock_init(&priv->stats_lock);
428 
429 	return 0;
430 
431 err_free_ring_buf:
432 	vfree(priv->stats_ids.free_list.buf);
433 err_free_last_used:
434 	kfree(priv->mask_ids.last_used);
435 err_free_mask_id:
436 	kfree(priv->mask_ids.mask_id_free_list.buf);
437 err_free_flow_table:
438 	rhashtable_destroy(&priv->flow_table);
439 	return -ENOMEM;
440 }
441 
442 void nfp_flower_metadata_cleanup(struct nfp_app *app)
443 {
444 	struct nfp_flower_priv *priv = app->priv;
445 
446 	if (!priv)
447 		return;
448 
449 	rhashtable_free_and_destroy(&priv->flow_table,
450 				    nfp_check_rhashtable_empty, NULL);
451 	kvfree(priv->stats);
452 	kfree(priv->mask_ids.mask_id_free_list.buf);
453 	kfree(priv->mask_ids.last_used);
454 	vfree(priv->stats_ids.free_list.buf);
455 }
456