xref: /linux/net/openvswitch/flow_table.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5 
6 #include "flow.h"
7 #include "datapath.h"
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
20 #include <linux/in.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36 
37 #define TBL_MIN_BUCKETS		1024
38 #define MASK_ARRAY_SIZE_MIN	16
39 #define REHASH_INTERVAL		(10 * 60 * HZ)
40 
41 #define MC_DEFAULT_HASH_ENTRIES	256
42 #define MC_HASH_SHIFT		8
43 #define MC_HASH_SEGS		((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44 
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
47 
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
49 {
50 	return range->end - range->start;
51 }
52 
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 		       bool full, const struct sw_flow_mask *mask)
55 {
56 	int start = full ? 0 : mask->range.start;
57 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 	const long *m = (const long *)((const u8 *)&mask->key + start);
59 	const long *s = (const long *)((const u8 *)src + start);
60 	long *d = (long *)((u8 *)dst + start);
61 	int i;
62 
63 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 	 * if 'full' is false the memory outside of the 'mask->range' is left
65 	 * uninitialized. This can be used as an optimization when further
66 	 * operations on 'dst' only use contents within 'mask->range'.
67 	 */
68 	for (i = 0; i < len; i += sizeof(long))
69 		*d++ = *s++ & *m++;
70 }
71 
72 struct sw_flow *ovs_flow_alloc(void)
73 {
74 	struct sw_flow *flow;
75 	struct sw_flow_stats *stats;
76 
77 	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
78 	if (!flow)
79 		return ERR_PTR(-ENOMEM);
80 
81 	flow->stats_last_writer = -1;
82 	flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
83 
84 	/* Initialize the default stat node. */
85 	stats = kmem_cache_alloc_node(flow_stats_cache,
86 				      GFP_KERNEL | __GFP_ZERO,
87 				      node_online(0) ? 0 : NUMA_NO_NODE);
88 	if (!stats)
89 		goto err;
90 
91 	spin_lock_init(&stats->lock);
92 
93 	RCU_INIT_POINTER(flow->stats[0], stats);
94 
95 	cpumask_set_cpu(0, flow->cpu_used_mask);
96 
97 	return flow;
98 err:
99 	kmem_cache_free(flow_cache, flow);
100 	return ERR_PTR(-ENOMEM);
101 }
102 
103 int ovs_flow_tbl_count(const struct flow_table *table)
104 {
105 	return table->count;
106 }
107 
108 static void flow_free(struct sw_flow *flow)
109 {
110 	unsigned int cpu;
111 
112 	if (ovs_identifier_is_key(&flow->id))
113 		kfree(flow->id.unmasked_key);
114 	if (flow->sf_acts)
115 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
116 					  flow->sf_acts);
117 
118 	for_each_cpu(cpu, flow->cpu_used_mask) {
119 		if (flow->stats[cpu])
120 			kmem_cache_free(flow_stats_cache,
121 					(struct sw_flow_stats __force *)flow->stats[cpu]);
122 	}
123 
124 	kmem_cache_free(flow_cache, flow);
125 }
126 
127 static void rcu_free_flow_callback(struct rcu_head *rcu)
128 {
129 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
130 
131 	flow_free(flow);
132 }
133 
134 void ovs_flow_free(struct sw_flow *flow, bool deferred)
135 {
136 	if (!flow)
137 		return;
138 
139 	if (deferred)
140 		call_rcu(&flow->rcu, rcu_free_flow_callback);
141 	else
142 		flow_free(flow);
143 }
144 
145 static void __table_instance_destroy(struct table_instance *ti)
146 {
147 	kvfree(ti->buckets);
148 	kfree(ti);
149 }
150 
151 static struct table_instance *table_instance_alloc(int new_size)
152 {
153 	struct table_instance *ti = kmalloc_obj(*ti);
154 	int i;
155 
156 	if (!ti)
157 		return NULL;
158 
159 	ti->buckets = kvmalloc_objs(struct hlist_head, new_size);
160 	if (!ti->buckets) {
161 		kfree(ti);
162 		return NULL;
163 	}
164 
165 	for (i = 0; i < new_size; i++)
166 		INIT_HLIST_HEAD(&ti->buckets[i]);
167 
168 	ti->n_buckets = new_size;
169 	ti->node_ver = 0;
170 	get_random_bytes(&ti->hash_seed, sizeof(u32));
171 
172 	return ti;
173 }
174 
175 static void __mask_array_destroy(struct mask_array *ma)
176 {
177 	free_percpu(ma->masks_usage_stats);
178 	kfree(ma);
179 }
180 
181 static void mask_array_rcu_cb(struct rcu_head *rcu)
182 {
183 	struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
184 
185 	__mask_array_destroy(ma);
186 }
187 
188 static void tbl_mask_array_reset_counters(struct mask_array *ma)
189 {
190 	int i, cpu;
191 
192 	/* As the per CPU counters are not atomic we can not go ahead and
193 	 * reset them from another CPU. To be able to still have an approximate
194 	 * zero based counter we store the value at reset, and subtract it
195 	 * later when processing.
196 	 */
197 	for (i = 0; i < ma->max; i++) {
198 		ma->masks_usage_zero_cntr[i] = 0;
199 
200 		for_each_possible_cpu(cpu) {
201 			struct mask_array_stats *stats;
202 			unsigned int start;
203 			u64 counter;
204 
205 			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
206 			do {
207 				start = u64_stats_fetch_begin(&stats->syncp);
208 				counter = stats->usage_cntrs[i];
209 			} while (u64_stats_fetch_retry(&stats->syncp, start));
210 
211 			ma->masks_usage_zero_cntr[i] += counter;
212 		}
213 	}
214 }
215 
216 static struct mask_array *tbl_mask_array_alloc(int size)
217 {
218 	struct mask_array *new;
219 
220 	size = max(MASK_ARRAY_SIZE_MIN, size);
221 	new = kzalloc(struct_size(new, masks, size) +
222 		      sizeof(u64) * size, GFP_KERNEL);
223 	if (!new)
224 		return NULL;
225 
226 	new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
227 					     struct_size(new, masks, size));
228 
229 	new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
230 						sizeof(u64) * size,
231 						__alignof__(u64));
232 	if (!new->masks_usage_stats) {
233 		kfree(new);
234 		return NULL;
235 	}
236 
237 	new->count = 0;
238 	new->max = size;
239 
240 	return new;
241 }
242 
243 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
244 {
245 	struct mask_array *old;
246 	struct mask_array *new;
247 
248 	new = tbl_mask_array_alloc(size);
249 	if (!new)
250 		return -ENOMEM;
251 
252 	old = ovsl_dereference(tbl->mask_array);
253 	if (old) {
254 		int i;
255 
256 		for (i = 0; i < old->max; i++) {
257 			if (ovsl_dereference(old->masks[i]))
258 				new->masks[new->count++] = old->masks[i];
259 		}
260 		call_rcu(&old->rcu, mask_array_rcu_cb);
261 	}
262 
263 	rcu_assign_pointer(tbl->mask_array, new);
264 
265 	return 0;
266 }
267 
268 static int tbl_mask_array_add_mask(struct flow_table *tbl,
269 				   struct sw_flow_mask *new)
270 {
271 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
272 	int err, ma_count = READ_ONCE(ma->count);
273 
274 	if (ma_count >= ma->max) {
275 		err = tbl_mask_array_realloc(tbl, ma->max +
276 						  MASK_ARRAY_SIZE_MIN);
277 		if (err)
278 			return err;
279 
280 		ma = ovsl_dereference(tbl->mask_array);
281 	} else {
282 		/* On every add or delete we need to reset the counters so
283 		 * every new mask gets a fair chance of being prioritized.
284 		 */
285 		tbl_mask_array_reset_counters(ma);
286 	}
287 
288 	BUG_ON(ovsl_dereference(ma->masks[ma_count]));
289 
290 	rcu_assign_pointer(ma->masks[ma_count], new);
291 	WRITE_ONCE(ma->count, ma_count + 1);
292 
293 	return 0;
294 }
295 
296 static void tbl_mask_array_del_mask(struct flow_table *tbl,
297 				    struct sw_flow_mask *mask)
298 {
299 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
300 	int i, ma_count = READ_ONCE(ma->count);
301 
302 	/* Remove the deleted mask pointers from the array */
303 	for (i = 0; i < ma_count; i++) {
304 		if (mask == ovsl_dereference(ma->masks[i]))
305 			goto found;
306 	}
307 
308 	BUG();
309 	return;
310 
311 found:
312 	WRITE_ONCE(ma->count, ma_count - 1);
313 
314 	rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
315 	RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
316 
317 	kfree_rcu(mask, rcu);
318 
319 	/* Shrink the mask array if necessary. */
320 	if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
321 	    ma_count <= (ma->max / 3))
322 		tbl_mask_array_realloc(tbl, ma->max / 2);
323 	else
324 		tbl_mask_array_reset_counters(ma);
325 
326 }
327 
328 /* Remove 'mask' from the mask list, if it is not needed any more. */
329 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
330 {
331 	if (mask) {
332 		/* ovs-lock is required to protect mask-refcount and
333 		 * mask list.
334 		 */
335 		ASSERT_OVSL();
336 		BUG_ON(!mask->ref_count);
337 		mask->ref_count--;
338 
339 		if (!mask->ref_count)
340 			tbl_mask_array_del_mask(tbl, mask);
341 	}
342 }
343 
344 static void __mask_cache_destroy(struct mask_cache *mc)
345 {
346 	free_percpu(mc->mask_cache);
347 	kfree(mc);
348 }
349 
350 static void mask_cache_rcu_cb(struct rcu_head *rcu)
351 {
352 	struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
353 
354 	__mask_cache_destroy(mc);
355 }
356 
357 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
358 {
359 	struct mask_cache_entry __percpu *cache = NULL;
360 	struct mask_cache *new;
361 
362 	/* Only allow size to be 0, or a power of 2, and does not exceed
363 	 * percpu allocation size.
364 	 */
365 	if ((!is_power_of_2(size) && size != 0) ||
366 	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
367 		return NULL;
368 
369 	new = kzalloc_obj(*new);
370 	if (!new)
371 		return NULL;
372 
373 	new->cache_size = size;
374 	if (new->cache_size > 0) {
375 		cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
376 						  new->cache_size),
377 				       __alignof__(struct mask_cache_entry));
378 		if (!cache) {
379 			kfree(new);
380 			return NULL;
381 		}
382 	}
383 
384 	new->mask_cache = cache;
385 	return new;
386 }
387 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
388 {
389 	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
390 	struct mask_cache *new;
391 
392 	if (size == mc->cache_size)
393 		return 0;
394 
395 	if ((!is_power_of_2(size) && size != 0) ||
396 	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
397 		return -EINVAL;
398 
399 	new = tbl_mask_cache_alloc(size);
400 	if (!new)
401 		return -ENOMEM;
402 
403 	rcu_assign_pointer(table->mask_cache, new);
404 	call_rcu(&mc->rcu, mask_cache_rcu_cb);
405 
406 	return 0;
407 }
408 
409 int ovs_flow_tbl_init(struct flow_table *table)
410 {
411 	struct table_instance *ti, *ufid_ti;
412 	struct mask_cache *mc;
413 	struct mask_array *ma;
414 
415 	mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
416 	if (!mc)
417 		return -ENOMEM;
418 
419 	ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
420 	if (!ma)
421 		goto free_mask_cache;
422 
423 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
424 	if (!ti)
425 		goto free_mask_array;
426 
427 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
428 	if (!ufid_ti)
429 		goto free_ti;
430 
431 	rcu_assign_pointer(table->ti, ti);
432 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
433 	rcu_assign_pointer(table->mask_array, ma);
434 	rcu_assign_pointer(table->mask_cache, mc);
435 	table->last_rehash = jiffies;
436 	table->count = 0;
437 	table->ufid_count = 0;
438 	return 0;
439 
440 free_ti:
441 	__table_instance_destroy(ti);
442 free_mask_array:
443 	__mask_array_destroy(ma);
444 free_mask_cache:
445 	__mask_cache_destroy(mc);
446 	return -ENOMEM;
447 }
448 
449 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
450 {
451 	struct table_instance *ti;
452 
453 	ti = container_of(rcu, struct table_instance, rcu);
454 	__table_instance_destroy(ti);
455 }
456 
457 static void table_instance_flow_free(struct flow_table *table,
458 				     struct table_instance *ti,
459 				     struct table_instance *ufid_ti,
460 				     struct sw_flow *flow)
461 {
462 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
463 	table->count--;
464 
465 	if (ovs_identifier_is_ufid(&flow->id)) {
466 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
467 		table->ufid_count--;
468 	}
469 
470 	flow_mask_remove(table, flow->mask);
471 }
472 
473 /* Must be called with OVS mutex held. */
474 void table_instance_flow_flush(struct flow_table *table,
475 			       struct table_instance *ti,
476 			       struct table_instance *ufid_ti)
477 {
478 	int i;
479 
480 	for (i = 0; i < ti->n_buckets; i++) {
481 		struct hlist_head *head = &ti->buckets[i];
482 		struct hlist_node *n;
483 		struct sw_flow *flow;
484 
485 		hlist_for_each_entry_safe(flow, n, head,
486 					  flow_table.node[ti->node_ver]) {
487 
488 			table_instance_flow_free(table, ti, ufid_ti,
489 						 flow);
490 			ovs_flow_free(flow, true);
491 		}
492 	}
493 
494 	if (WARN_ON(table->count != 0 ||
495 		    table->ufid_count != 0)) {
496 		table->count = 0;
497 		table->ufid_count = 0;
498 	}
499 }
500 
501 static void table_instance_destroy(struct table_instance *ti,
502 				   struct table_instance *ufid_ti)
503 {
504 	call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
505 	call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
506 }
507 
508 /* No need for locking this function is called from RCU callback or
509  * error path.
510  */
511 void ovs_flow_tbl_destroy(struct flow_table *table)
512 {
513 	struct table_instance *ti = rcu_dereference_raw(table->ti);
514 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
515 	struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
516 	struct mask_array *ma = rcu_dereference_raw(table->mask_array);
517 
518 	call_rcu(&mc->rcu, mask_cache_rcu_cb);
519 	call_rcu(&ma->rcu, mask_array_rcu_cb);
520 	table_instance_destroy(ti, ufid_ti);
521 }
522 
523 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
524 				       u32 *bucket, u32 *last)
525 {
526 	struct sw_flow *flow;
527 	struct hlist_head *head;
528 	int ver;
529 	int i;
530 
531 	ver = ti->node_ver;
532 	while (*bucket < ti->n_buckets) {
533 		i = 0;
534 		head = &ti->buckets[*bucket];
535 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
536 			if (i < *last) {
537 				i++;
538 				continue;
539 			}
540 			*last = i + 1;
541 			return flow;
542 		}
543 		(*bucket)++;
544 		*last = 0;
545 	}
546 
547 	return NULL;
548 }
549 
550 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
551 {
552 	hash = jhash_1word(hash, ti->hash_seed);
553 	return &ti->buckets[hash & (ti->n_buckets - 1)];
554 }
555 
556 static void table_instance_insert(struct table_instance *ti,
557 				  struct sw_flow *flow)
558 {
559 	struct hlist_head *head;
560 
561 	head = find_bucket(ti, flow->flow_table.hash);
562 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
563 }
564 
565 static void ufid_table_instance_insert(struct table_instance *ti,
566 				       struct sw_flow *flow)
567 {
568 	struct hlist_head *head;
569 
570 	head = find_bucket(ti, flow->ufid_table.hash);
571 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
572 }
573 
574 static void flow_table_copy_flows(struct table_instance *old,
575 				  struct table_instance *new, bool ufid)
576 {
577 	int old_ver;
578 	int i;
579 
580 	old_ver = old->node_ver;
581 	new->node_ver = !old_ver;
582 
583 	/* Insert in new table. */
584 	for (i = 0; i < old->n_buckets; i++) {
585 		struct sw_flow *flow;
586 		struct hlist_head *head = &old->buckets[i];
587 
588 		if (ufid)
589 			hlist_for_each_entry_rcu(flow, head,
590 						 ufid_table.node[old_ver],
591 						 lockdep_ovsl_is_held())
592 				ufid_table_instance_insert(new, flow);
593 		else
594 			hlist_for_each_entry_rcu(flow, head,
595 						 flow_table.node[old_ver],
596 						 lockdep_ovsl_is_held())
597 				table_instance_insert(new, flow);
598 	}
599 }
600 
601 static struct table_instance *table_instance_rehash(struct table_instance *ti,
602 						    int n_buckets, bool ufid)
603 {
604 	struct table_instance *new_ti;
605 
606 	new_ti = table_instance_alloc(n_buckets);
607 	if (!new_ti)
608 		return NULL;
609 
610 	flow_table_copy_flows(ti, new_ti, ufid);
611 
612 	return new_ti;
613 }
614 
615 int ovs_flow_tbl_flush(struct flow_table *flow_table)
616 {
617 	struct table_instance *old_ti, *new_ti;
618 	struct table_instance *old_ufid_ti, *new_ufid_ti;
619 
620 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
621 	if (!new_ti)
622 		return -ENOMEM;
623 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
624 	if (!new_ufid_ti)
625 		goto err_free_ti;
626 
627 	old_ti = ovsl_dereference(flow_table->ti);
628 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
629 
630 	rcu_assign_pointer(flow_table->ti, new_ti);
631 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
632 	flow_table->last_rehash = jiffies;
633 
634 	table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
635 	table_instance_destroy(old_ti, old_ufid_ti);
636 	return 0;
637 
638 err_free_ti:
639 	__table_instance_destroy(new_ti);
640 	return -ENOMEM;
641 }
642 
643 static u32 flow_hash(const struct sw_flow_key *key,
644 		     const struct sw_flow_key_range *range)
645 {
646 	const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
647 
648 	/* Make sure number of hash bytes are multiple of u32. */
649 	int hash_u32s = range_n_bytes(range) >> 2;
650 
651 	return jhash2(hash_key, hash_u32s, 0);
652 }
653 
654 static int flow_key_start(const struct sw_flow_key *key)
655 {
656 	if (key->tun_proto)
657 		return 0;
658 	else
659 		return rounddown(offsetof(struct sw_flow_key, phy),
660 				 sizeof(long));
661 }
662 
663 static bool cmp_key(const struct sw_flow_key *key1,
664 		    const struct sw_flow_key *key2,
665 		    int key_start, int key_end)
666 {
667 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
668 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
669 	int i;
670 
671 	for (i = key_start; i < key_end; i += sizeof(long))
672 		if (*cp1++ ^ *cp2++)
673 			return false;
674 
675 	return true;
676 }
677 
678 static bool flow_cmp_masked_key(const struct sw_flow *flow,
679 				const struct sw_flow_key *key,
680 				const struct sw_flow_key_range *range)
681 {
682 	return cmp_key(&flow->key, key, range->start, range->end);
683 }
684 
685 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
686 				      const struct sw_flow_match *match)
687 {
688 	struct sw_flow_key *key = match->key;
689 	int key_start = flow_key_start(key);
690 	int key_end = match->range.end;
691 
692 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
693 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
694 }
695 
696 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
697 					  const struct sw_flow_key *unmasked,
698 					  const struct sw_flow_mask *mask,
699 					  u32 *n_mask_hit)
700 {
701 	struct sw_flow *flow;
702 	struct hlist_head *head;
703 	u32 hash;
704 	struct sw_flow_key masked_key;
705 
706 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
707 	hash = flow_hash(&masked_key, &mask->range);
708 	head = find_bucket(ti, hash);
709 	(*n_mask_hit)++;
710 
711 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
712 				 lockdep_ovsl_is_held()) {
713 		if (flow->mask == mask && flow->flow_table.hash == hash &&
714 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
715 			return flow;
716 	}
717 	return NULL;
718 }
719 
720 /* Flow lookup does full lookup on flow table. It starts with
721  * mask from index passed in *index.
722  * This function MUST be called with BH disabled due to the use
723  * of CPU specific variables.
724  */
725 static struct sw_flow *flow_lookup(struct flow_table *tbl,
726 				   struct table_instance *ti,
727 				   struct mask_array *ma,
728 				   const struct sw_flow_key *key,
729 				   u32 *n_mask_hit,
730 				   u32 *n_cache_hit,
731 				   u32 *index)
732 {
733 	struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
734 	struct sw_flow *flow;
735 	struct sw_flow_mask *mask;
736 	int i;
737 
738 	if (likely(*index < ma->max)) {
739 		mask = rcu_dereference_ovsl(ma->masks[*index]);
740 		if (mask) {
741 			flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
742 			if (flow) {
743 				u64_stats_update_begin(&stats->syncp);
744 				stats->usage_cntrs[*index]++;
745 				u64_stats_update_end(&stats->syncp);
746 				(*n_cache_hit)++;
747 				return flow;
748 			}
749 		}
750 	}
751 
752 	for (i = 0; i < ma->max; i++)  {
753 
754 		if (i == *index)
755 			continue;
756 
757 		mask = rcu_dereference_ovsl(ma->masks[i]);
758 		if (unlikely(!mask))
759 			break;
760 
761 		flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
762 		if (flow) { /* Found */
763 			*index = i;
764 			u64_stats_update_begin(&stats->syncp);
765 			stats->usage_cntrs[*index]++;
766 			u64_stats_update_end(&stats->syncp);
767 			return flow;
768 		}
769 	}
770 
771 	return NULL;
772 }
773 
774 /*
775  * mask_cache maps flow to probable mask. This cache is not tightly
776  * coupled cache, It means updates to  mask list can result in inconsistent
777  * cache entry in mask cache.
778  * This is per cpu cache and is divided in MC_HASH_SEGS segments.
779  * In case of a hash collision the entry is hashed in next segment.
780  * */
781 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
782 					  const struct sw_flow_key *key,
783 					  u32 skb_hash,
784 					  u32 *n_mask_hit,
785 					  u32 *n_cache_hit)
786 {
787 	struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
788 	struct mask_array *ma = rcu_dereference(tbl->mask_array);
789 	struct table_instance *ti = rcu_dereference(tbl->ti);
790 	struct mask_cache_entry *entries, *ce;
791 	struct sw_flow *flow;
792 	u32 hash;
793 	int seg;
794 
795 	*n_mask_hit = 0;
796 	*n_cache_hit = 0;
797 	if (unlikely(!skb_hash || mc->cache_size == 0)) {
798 		u32 mask_index = 0;
799 		u32 cache = 0;
800 
801 		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
802 				   &mask_index);
803 	}
804 
805 	/* Pre and post recirulation flows usually have the same skb_hash
806 	 * value. To avoid hash collisions, rehash the 'skb_hash' with
807 	 * 'recirc_id'.  */
808 	if (key->recirc_id)
809 		skb_hash = jhash_1word(skb_hash, key->recirc_id);
810 
811 	ce = NULL;
812 	hash = skb_hash;
813 	entries = this_cpu_ptr(mc->mask_cache);
814 
815 	/* Find the cache entry 'ce' to operate on. */
816 	for (seg = 0; seg < MC_HASH_SEGS; seg++) {
817 		int index = hash & (mc->cache_size - 1);
818 		struct mask_cache_entry *e;
819 
820 		e = &entries[index];
821 		if (e->skb_hash == skb_hash) {
822 			flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
823 					   n_cache_hit, &e->mask_index);
824 			if (!flow)
825 				e->skb_hash = 0;
826 			return flow;
827 		}
828 
829 		if (!ce || e->skb_hash < ce->skb_hash)
830 			ce = e;  /* A better replacement cache candidate. */
831 
832 		hash >>= MC_HASH_SHIFT;
833 	}
834 
835 	/* Cache miss, do full lookup. */
836 	flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
837 			   &ce->mask_index);
838 	if (flow)
839 		ce->skb_hash = skb_hash;
840 
841 	*n_cache_hit = 0;
842 	return flow;
843 }
844 
845 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
846 				    const struct sw_flow_key *key)
847 {
848 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
849 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
850 	u32 __always_unused n_mask_hit;
851 	u32 __always_unused n_cache_hit;
852 	struct sw_flow *flow;
853 	u32 index = 0;
854 
855 	/* This function gets called trough the netlink interface and therefore
856 	 * is preemptible. However, flow_lookup() function needs to be called
857 	 * with BH disabled due to CPU specific variables.
858 	 */
859 	local_bh_disable();
860 	flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
861 	local_bh_enable();
862 	return flow;
863 }
864 
865 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
866 					  const struct sw_flow_match *match)
867 {
868 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
869 	int i;
870 
871 	/* Always called under ovs-mutex. */
872 	for (i = 0; i < ma->max; i++) {
873 		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
874 		u32 __always_unused n_mask_hit;
875 		struct sw_flow_mask *mask;
876 		struct sw_flow *flow;
877 
878 		mask = ovsl_dereference(ma->masks[i]);
879 		if (!mask)
880 			continue;
881 
882 		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
883 		if (flow && ovs_identifier_is_key(&flow->id) &&
884 		    ovs_flow_cmp_unmasked_key(flow, match)) {
885 			return flow;
886 		}
887 	}
888 
889 	return NULL;
890 }
891 
892 static u32 ufid_hash(const struct sw_flow_id *sfid)
893 {
894 	return jhash(sfid->ufid, sfid->ufid_len, 0);
895 }
896 
897 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
898 			      const struct sw_flow_id *sfid)
899 {
900 	if (flow->id.ufid_len != sfid->ufid_len)
901 		return false;
902 
903 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
904 }
905 
906 bool ovs_flow_cmp(const struct sw_flow *flow,
907 		  const struct sw_flow_match *match)
908 {
909 	if (ovs_identifier_is_ufid(&flow->id))
910 		return flow_cmp_masked_key(flow, match->key, &match->range);
911 
912 	return ovs_flow_cmp_unmasked_key(flow, match);
913 }
914 
915 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
916 					 const struct sw_flow_id *ufid)
917 {
918 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
919 	struct sw_flow *flow;
920 	struct hlist_head *head;
921 	u32 hash;
922 
923 	hash = ufid_hash(ufid);
924 	head = find_bucket(ti, hash);
925 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
926 				 lockdep_ovsl_is_held()) {
927 		if (flow->ufid_table.hash == hash &&
928 		    ovs_flow_cmp_ufid(flow, ufid))
929 			return flow;
930 	}
931 	return NULL;
932 }
933 
934 int ovs_flow_tbl_num_masks(const struct flow_table *table)
935 {
936 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
937 	return READ_ONCE(ma->count);
938 }
939 
940 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
941 {
942 	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
943 
944 	return READ_ONCE(mc->cache_size);
945 }
946 
947 static struct table_instance *table_instance_expand(struct table_instance *ti,
948 						    bool ufid)
949 {
950 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
951 }
952 
953 /* Must be called with OVS mutex held. */
954 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
955 {
956 	struct table_instance *ti = ovsl_dereference(table->ti);
957 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
958 
959 	BUG_ON(table->count == 0);
960 	table_instance_flow_free(table, ti, ufid_ti, flow);
961 }
962 
963 static struct sw_flow_mask *mask_alloc(void)
964 {
965 	struct sw_flow_mask *mask;
966 
967 	mask = kmalloc_obj(*mask);
968 	if (mask)
969 		mask->ref_count = 1;
970 
971 	return mask;
972 }
973 
974 static bool mask_equal(const struct sw_flow_mask *a,
975 		       const struct sw_flow_mask *b)
976 {
977 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
978 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
979 
980 	return  (a->range.end == b->range.end)
981 		&& (a->range.start == b->range.start)
982 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
983 }
984 
985 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
986 					   const struct sw_flow_mask *mask)
987 {
988 	struct mask_array *ma;
989 	int i;
990 
991 	ma = ovsl_dereference(tbl->mask_array);
992 	for (i = 0; i < ma->max; i++) {
993 		struct sw_flow_mask *t;
994 		t = ovsl_dereference(ma->masks[i]);
995 
996 		if (t && mask_equal(mask, t))
997 			return t;
998 	}
999 
1000 	return NULL;
1001 }
1002 
1003 /* Add 'mask' into the mask list, if it is not already there. */
1004 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
1005 			    const struct sw_flow_mask *new)
1006 {
1007 	struct sw_flow_mask *mask;
1008 
1009 	mask = flow_mask_find(tbl, new);
1010 	if (!mask) {
1011 		/* Allocate a new mask if none exists. */
1012 		mask = mask_alloc();
1013 		if (!mask)
1014 			return -ENOMEM;
1015 		mask->key = new->key;
1016 		mask->range = new->range;
1017 
1018 		/* Add mask to mask-list. */
1019 		if (tbl_mask_array_add_mask(tbl, mask)) {
1020 			kfree(mask);
1021 			return -ENOMEM;
1022 		}
1023 	} else {
1024 		BUG_ON(!mask->ref_count);
1025 		mask->ref_count++;
1026 	}
1027 
1028 	flow->mask = mask;
1029 	return 0;
1030 }
1031 
1032 /* Must be called with OVS mutex held. */
1033 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1034 {
1035 	struct table_instance *new_ti = NULL;
1036 	struct table_instance *ti;
1037 
1038 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1039 	ti = ovsl_dereference(table->ti);
1040 	table_instance_insert(ti, flow);
1041 	table->count++;
1042 
1043 	/* Expand table, if necessary, to make room. */
1044 	if (table->count > ti->n_buckets)
1045 		new_ti = table_instance_expand(ti, false);
1046 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1047 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1048 
1049 	if (new_ti) {
1050 		rcu_assign_pointer(table->ti, new_ti);
1051 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1052 		table->last_rehash = jiffies;
1053 	}
1054 }
1055 
1056 /* Must be called with OVS mutex held. */
1057 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1058 {
1059 	struct table_instance *ti;
1060 
1061 	flow->ufid_table.hash = ufid_hash(&flow->id);
1062 	ti = ovsl_dereference(table->ufid_ti);
1063 	ufid_table_instance_insert(ti, flow);
1064 	table->ufid_count++;
1065 
1066 	/* Expand table, if necessary, to make room. */
1067 	if (table->ufid_count > ti->n_buckets) {
1068 		struct table_instance *new_ti;
1069 
1070 		new_ti = table_instance_expand(ti, true);
1071 		if (new_ti) {
1072 			rcu_assign_pointer(table->ufid_ti, new_ti);
1073 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1074 		}
1075 	}
1076 }
1077 
1078 /* Must be called with OVS mutex held. */
1079 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1080 			const struct sw_flow_mask *mask)
1081 {
1082 	int err;
1083 
1084 	err = flow_mask_insert(table, flow, mask);
1085 	if (err)
1086 		return err;
1087 	flow_key_insert(table, flow);
1088 	if (ovs_identifier_is_ufid(&flow->id))
1089 		flow_ufid_insert(table, flow);
1090 
1091 	return 0;
1092 }
1093 
1094 static int compare_mask_and_count(const void *a, const void *b)
1095 {
1096 	const struct mask_count *mc_a = a;
1097 	const struct mask_count *mc_b = b;
1098 
1099 	return (s64)mc_b->counter - (s64)mc_a->counter;
1100 }
1101 
1102 /* Must be called with OVS mutex held. */
1103 void ovs_flow_masks_rebalance(struct flow_table *table)
1104 {
1105 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1106 	struct mask_count *masks_and_count;
1107 	struct mask_array *new;
1108 	int masks_entries = 0;
1109 	int i;
1110 
1111 	/* Build array of all current entries with use counters. */
1112 	masks_and_count = kmalloc_objs(*masks_and_count, ma->max);
1113 	if (!masks_and_count)
1114 		return;
1115 
1116 	for (i = 0; i < ma->max; i++) {
1117 		struct sw_flow_mask *mask;
1118 		int cpu;
1119 
1120 		mask = rcu_dereference_ovsl(ma->masks[i]);
1121 		if (unlikely(!mask))
1122 			break;
1123 
1124 		masks_and_count[i].index = i;
1125 		masks_and_count[i].counter = 0;
1126 
1127 		for_each_possible_cpu(cpu) {
1128 			struct mask_array_stats *stats;
1129 			unsigned int start;
1130 			u64 counter;
1131 
1132 			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
1133 			do {
1134 				start = u64_stats_fetch_begin(&stats->syncp);
1135 				counter = stats->usage_cntrs[i];
1136 			} while (u64_stats_fetch_retry(&stats->syncp, start));
1137 
1138 			masks_and_count[i].counter += counter;
1139 		}
1140 
1141 		/* Subtract the zero count value. */
1142 		masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1143 
1144 		/* Rather than calling tbl_mask_array_reset_counters()
1145 		 * below when no change is needed, do it inline here.
1146 		 */
1147 		ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1148 	}
1149 
1150 	if (i == 0)
1151 		goto free_mask_entries;
1152 
1153 	/* Sort the entries */
1154 	masks_entries = i;
1155 	sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1156 	     compare_mask_and_count, NULL);
1157 
1158 	/* If the order is the same, nothing to do... */
1159 	for (i = 0; i < masks_entries; i++) {
1160 		if (i != masks_and_count[i].index)
1161 			break;
1162 	}
1163 	if (i == masks_entries)
1164 		goto free_mask_entries;
1165 
1166 	/* Rebuilt the new list in order of usage. */
1167 	new = tbl_mask_array_alloc(ma->max);
1168 	if (!new)
1169 		goto free_mask_entries;
1170 
1171 	for (i = 0; i < masks_entries; i++) {
1172 		int index = masks_and_count[i].index;
1173 
1174 		if (ovsl_dereference(ma->masks[index]))
1175 			new->masks[new->count++] = ma->masks[index];
1176 	}
1177 
1178 	rcu_assign_pointer(table->mask_array, new);
1179 	call_rcu(&ma->rcu, mask_array_rcu_cb);
1180 
1181 free_mask_entries:
1182 	kfree(masks_and_count);
1183 }
1184 
1185 /* Initializes the flow module.
1186  * Returns zero if successful or a negative error code. */
1187 int ovs_flow_init(void)
1188 {
1189 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1190 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1191 
1192 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1193 				       + (nr_cpu_ids
1194 					  * sizeof(struct sw_flow_stats *))
1195 				       + cpumask_size(),
1196 				       0, 0, NULL);
1197 	if (flow_cache == NULL)
1198 		return -ENOMEM;
1199 
1200 	flow_stats_cache
1201 		= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1202 				    0, SLAB_HWCACHE_ALIGN, NULL);
1203 	if (flow_stats_cache == NULL) {
1204 		kmem_cache_destroy(flow_cache);
1205 		flow_cache = NULL;
1206 		return -ENOMEM;
1207 	}
1208 
1209 	return 0;
1210 }
1211 
1212 /* Uninitializes the flow module. */
1213 void ovs_flow_exit(void)
1214 {
1215 	kmem_cache_destroy(flow_stats_cache);
1216 	kmem_cache_destroy(flow_cache);
1217 }
1218