xref: /linux/net/openvswitch/flow_table.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5 
6 #include "flow.h"
7 #include "datapath.h"
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
20 #include <linux/in.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/ndisc.h>
36 
37 #define TBL_MIN_BUCKETS		1024
38 #define MASK_ARRAY_SIZE_MIN	16
39 #define REHASH_INTERVAL		(10 * 60 * HZ)
40 
41 #define MC_DEFAULT_HASH_ENTRIES	256
42 #define MC_HASH_SHIFT		8
43 #define MC_HASH_SEGS		((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44 
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
47 
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
49 {
50 	return range->end - range->start;
51 }
52 
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 		       bool full, const struct sw_flow_mask *mask)
55 {
56 	int start = full ? 0 : mask->range.start;
57 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 	const long *m = (const long *)((const u8 *)&mask->key + start);
59 	const long *s = (const long *)((const u8 *)src + start);
60 	long *d = (long *)((u8 *)dst + start);
61 	int i;
62 
63 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 	 * if 'full' is false the memory outside of the 'mask->range' is left
65 	 * uninitialized. This can be used as an optimization when further
66 	 * operations on 'dst' only use contents within 'mask->range'.
67 	 */
68 	for (i = 0; i < len; i += sizeof(long))
69 		*d++ = *s++ & *m++;
70 }
71 
72 struct sw_flow *ovs_flow_alloc(void)
73 {
74 	struct sw_flow *flow;
75 	struct sw_flow_stats *stats;
76 
77 	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
78 	if (!flow)
79 		return ERR_PTR(-ENOMEM);
80 
81 	flow->stats_last_writer = -1;
82 	flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
83 
84 	/* Initialize the default stat node. */
85 	stats = kmem_cache_alloc_node(flow_stats_cache,
86 				      GFP_KERNEL | __GFP_ZERO,
87 				      node_online(0) ? 0 : NUMA_NO_NODE);
88 	if (!stats)
89 		goto err;
90 
91 	spin_lock_init(&stats->lock);
92 
93 	RCU_INIT_POINTER(flow->stats[0], stats);
94 
95 	cpumask_set_cpu(0, flow->cpu_used_mask);
96 
97 	return flow;
98 err:
99 	kmem_cache_free(flow_cache, flow);
100 	return ERR_PTR(-ENOMEM);
101 }
102 
103 int ovs_flow_tbl_count(const struct flow_table *table)
104 {
105 	return table->count;
106 }
107 
108 static void flow_free(struct sw_flow *flow)
109 {
110 	unsigned int cpu;
111 
112 	if (ovs_identifier_is_key(&flow->id))
113 		kfree(flow->id.unmasked_key);
114 	if (flow->sf_acts)
115 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
116 					  flow->sf_acts);
117 
118 	for_each_cpu(cpu, flow->cpu_used_mask) {
119 		if (flow->stats[cpu])
120 			kmem_cache_free(flow_stats_cache,
121 					(struct sw_flow_stats __force *)flow->stats[cpu]);
122 	}
123 
124 	kmem_cache_free(flow_cache, flow);
125 }
126 
127 static void rcu_free_flow_callback(struct rcu_head *rcu)
128 {
129 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
130 
131 	flow_free(flow);
132 }
133 
134 void ovs_flow_free(struct sw_flow *flow, bool deferred)
135 {
136 	if (!flow)
137 		return;
138 
139 	if (deferred)
140 		call_rcu(&flow->rcu, rcu_free_flow_callback);
141 	else
142 		flow_free(flow);
143 }
144 
145 static void __table_instance_destroy(struct table_instance *ti)
146 {
147 	kvfree(ti->buckets);
148 	kfree(ti);
149 }
150 
151 static struct table_instance *table_instance_alloc(int new_size)
152 {
153 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
154 	int i;
155 
156 	if (!ti)
157 		return NULL;
158 
159 	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
160 				     GFP_KERNEL);
161 	if (!ti->buckets) {
162 		kfree(ti);
163 		return NULL;
164 	}
165 
166 	for (i = 0; i < new_size; i++)
167 		INIT_HLIST_HEAD(&ti->buckets[i]);
168 
169 	ti->n_buckets = new_size;
170 	ti->node_ver = 0;
171 	get_random_bytes(&ti->hash_seed, sizeof(u32));
172 
173 	return ti;
174 }
175 
176 static void __mask_array_destroy(struct mask_array *ma)
177 {
178 	free_percpu(ma->masks_usage_stats);
179 	kfree(ma);
180 }
181 
182 static void mask_array_rcu_cb(struct rcu_head *rcu)
183 {
184 	struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
185 
186 	__mask_array_destroy(ma);
187 }
188 
189 static void tbl_mask_array_reset_counters(struct mask_array *ma)
190 {
191 	int i, cpu;
192 
193 	/* As the per CPU counters are not atomic we can not go ahead and
194 	 * reset them from another CPU. To be able to still have an approximate
195 	 * zero based counter we store the value at reset, and subtract it
196 	 * later when processing.
197 	 */
198 	for (i = 0; i < ma->max; i++) {
199 		ma->masks_usage_zero_cntr[i] = 0;
200 
201 		for_each_possible_cpu(cpu) {
202 			struct mask_array_stats *stats;
203 			unsigned int start;
204 			u64 counter;
205 
206 			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
207 			do {
208 				start = u64_stats_fetch_begin(&stats->syncp);
209 				counter = stats->usage_cntrs[i];
210 			} while (u64_stats_fetch_retry(&stats->syncp, start));
211 
212 			ma->masks_usage_zero_cntr[i] += counter;
213 		}
214 	}
215 }
216 
217 static struct mask_array *tbl_mask_array_alloc(int size)
218 {
219 	struct mask_array *new;
220 
221 	size = max(MASK_ARRAY_SIZE_MIN, size);
222 	new = kzalloc(struct_size(new, masks, size) +
223 		      sizeof(u64) * size, GFP_KERNEL);
224 	if (!new)
225 		return NULL;
226 
227 	new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
228 					     struct_size(new, masks, size));
229 
230 	new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
231 						sizeof(u64) * size,
232 						__alignof__(u64));
233 	if (!new->masks_usage_stats) {
234 		kfree(new);
235 		return NULL;
236 	}
237 
238 	new->count = 0;
239 	new->max = size;
240 
241 	return new;
242 }
243 
244 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
245 {
246 	struct mask_array *old;
247 	struct mask_array *new;
248 
249 	new = tbl_mask_array_alloc(size);
250 	if (!new)
251 		return -ENOMEM;
252 
253 	old = ovsl_dereference(tbl->mask_array);
254 	if (old) {
255 		int i;
256 
257 		for (i = 0; i < old->max; i++) {
258 			if (ovsl_dereference(old->masks[i]))
259 				new->masks[new->count++] = old->masks[i];
260 		}
261 		call_rcu(&old->rcu, mask_array_rcu_cb);
262 	}
263 
264 	rcu_assign_pointer(tbl->mask_array, new);
265 
266 	return 0;
267 }
268 
269 static int tbl_mask_array_add_mask(struct flow_table *tbl,
270 				   struct sw_flow_mask *new)
271 {
272 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
273 	int err, ma_count = READ_ONCE(ma->count);
274 
275 	if (ma_count >= ma->max) {
276 		err = tbl_mask_array_realloc(tbl, ma->max +
277 						  MASK_ARRAY_SIZE_MIN);
278 		if (err)
279 			return err;
280 
281 		ma = ovsl_dereference(tbl->mask_array);
282 	} else {
283 		/* On every add or delete we need to reset the counters so
284 		 * every new mask gets a fair chance of being prioritized.
285 		 */
286 		tbl_mask_array_reset_counters(ma);
287 	}
288 
289 	BUG_ON(ovsl_dereference(ma->masks[ma_count]));
290 
291 	rcu_assign_pointer(ma->masks[ma_count], new);
292 	WRITE_ONCE(ma->count, ma_count + 1);
293 
294 	return 0;
295 }
296 
297 static void tbl_mask_array_del_mask(struct flow_table *tbl,
298 				    struct sw_flow_mask *mask)
299 {
300 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
301 	int i, ma_count = READ_ONCE(ma->count);
302 
303 	/* Remove the deleted mask pointers from the array */
304 	for (i = 0; i < ma_count; i++) {
305 		if (mask == ovsl_dereference(ma->masks[i]))
306 			goto found;
307 	}
308 
309 	BUG();
310 	return;
311 
312 found:
313 	WRITE_ONCE(ma->count, ma_count - 1);
314 
315 	rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
316 	RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
317 
318 	kfree_rcu(mask, rcu);
319 
320 	/* Shrink the mask array if necessary. */
321 	if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
322 	    ma_count <= (ma->max / 3))
323 		tbl_mask_array_realloc(tbl, ma->max / 2);
324 	else
325 		tbl_mask_array_reset_counters(ma);
326 
327 }
328 
329 /* Remove 'mask' from the mask list, if it is not needed any more. */
330 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
331 {
332 	if (mask) {
333 		/* ovs-lock is required to protect mask-refcount and
334 		 * mask list.
335 		 */
336 		ASSERT_OVSL();
337 		BUG_ON(!mask->ref_count);
338 		mask->ref_count--;
339 
340 		if (!mask->ref_count)
341 			tbl_mask_array_del_mask(tbl, mask);
342 	}
343 }
344 
345 static void __mask_cache_destroy(struct mask_cache *mc)
346 {
347 	free_percpu(mc->mask_cache);
348 	kfree(mc);
349 }
350 
351 static void mask_cache_rcu_cb(struct rcu_head *rcu)
352 {
353 	struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
354 
355 	__mask_cache_destroy(mc);
356 }
357 
358 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
359 {
360 	struct mask_cache_entry __percpu *cache = NULL;
361 	struct mask_cache *new;
362 
363 	/* Only allow size to be 0, or a power of 2, and does not exceed
364 	 * percpu allocation size.
365 	 */
366 	if ((!is_power_of_2(size) && size != 0) ||
367 	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
368 		return NULL;
369 
370 	new = kzalloc(sizeof(*new), GFP_KERNEL);
371 	if (!new)
372 		return NULL;
373 
374 	new->cache_size = size;
375 	if (new->cache_size > 0) {
376 		cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
377 						  new->cache_size),
378 				       __alignof__(struct mask_cache_entry));
379 		if (!cache) {
380 			kfree(new);
381 			return NULL;
382 		}
383 	}
384 
385 	new->mask_cache = cache;
386 	return new;
387 }
388 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
389 {
390 	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
391 	struct mask_cache *new;
392 
393 	if (size == mc->cache_size)
394 		return 0;
395 
396 	if ((!is_power_of_2(size) && size != 0) ||
397 	    (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
398 		return -EINVAL;
399 
400 	new = tbl_mask_cache_alloc(size);
401 	if (!new)
402 		return -ENOMEM;
403 
404 	rcu_assign_pointer(table->mask_cache, new);
405 	call_rcu(&mc->rcu, mask_cache_rcu_cb);
406 
407 	return 0;
408 }
409 
410 int ovs_flow_tbl_init(struct flow_table *table)
411 {
412 	struct table_instance *ti, *ufid_ti;
413 	struct mask_cache *mc;
414 	struct mask_array *ma;
415 
416 	mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
417 	if (!mc)
418 		return -ENOMEM;
419 
420 	ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
421 	if (!ma)
422 		goto free_mask_cache;
423 
424 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
425 	if (!ti)
426 		goto free_mask_array;
427 
428 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
429 	if (!ufid_ti)
430 		goto free_ti;
431 
432 	rcu_assign_pointer(table->ti, ti);
433 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
434 	rcu_assign_pointer(table->mask_array, ma);
435 	rcu_assign_pointer(table->mask_cache, mc);
436 	table->last_rehash = jiffies;
437 	table->count = 0;
438 	table->ufid_count = 0;
439 	return 0;
440 
441 free_ti:
442 	__table_instance_destroy(ti);
443 free_mask_array:
444 	__mask_array_destroy(ma);
445 free_mask_cache:
446 	__mask_cache_destroy(mc);
447 	return -ENOMEM;
448 }
449 
450 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
451 {
452 	struct table_instance *ti;
453 
454 	ti = container_of(rcu, struct table_instance, rcu);
455 	__table_instance_destroy(ti);
456 }
457 
458 static void table_instance_flow_free(struct flow_table *table,
459 				     struct table_instance *ti,
460 				     struct table_instance *ufid_ti,
461 				     struct sw_flow *flow)
462 {
463 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
464 	table->count--;
465 
466 	if (ovs_identifier_is_ufid(&flow->id)) {
467 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
468 		table->ufid_count--;
469 	}
470 
471 	flow_mask_remove(table, flow->mask);
472 }
473 
474 /* Must be called with OVS mutex held. */
475 void table_instance_flow_flush(struct flow_table *table,
476 			       struct table_instance *ti,
477 			       struct table_instance *ufid_ti)
478 {
479 	int i;
480 
481 	for (i = 0; i < ti->n_buckets; i++) {
482 		struct hlist_head *head = &ti->buckets[i];
483 		struct hlist_node *n;
484 		struct sw_flow *flow;
485 
486 		hlist_for_each_entry_safe(flow, n, head,
487 					  flow_table.node[ti->node_ver]) {
488 
489 			table_instance_flow_free(table, ti, ufid_ti,
490 						 flow);
491 			ovs_flow_free(flow, true);
492 		}
493 	}
494 
495 	if (WARN_ON(table->count != 0 ||
496 		    table->ufid_count != 0)) {
497 		table->count = 0;
498 		table->ufid_count = 0;
499 	}
500 }
501 
502 static void table_instance_destroy(struct table_instance *ti,
503 				   struct table_instance *ufid_ti)
504 {
505 	call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
506 	call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
507 }
508 
509 /* No need for locking this function is called from RCU callback or
510  * error path.
511  */
512 void ovs_flow_tbl_destroy(struct flow_table *table)
513 {
514 	struct table_instance *ti = rcu_dereference_raw(table->ti);
515 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
516 	struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
517 	struct mask_array *ma = rcu_dereference_raw(table->mask_array);
518 
519 	call_rcu(&mc->rcu, mask_cache_rcu_cb);
520 	call_rcu(&ma->rcu, mask_array_rcu_cb);
521 	table_instance_destroy(ti, ufid_ti);
522 }
523 
524 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
525 				       u32 *bucket, u32 *last)
526 {
527 	struct sw_flow *flow;
528 	struct hlist_head *head;
529 	int ver;
530 	int i;
531 
532 	ver = ti->node_ver;
533 	while (*bucket < ti->n_buckets) {
534 		i = 0;
535 		head = &ti->buckets[*bucket];
536 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
537 			if (i < *last) {
538 				i++;
539 				continue;
540 			}
541 			*last = i + 1;
542 			return flow;
543 		}
544 		(*bucket)++;
545 		*last = 0;
546 	}
547 
548 	return NULL;
549 }
550 
551 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
552 {
553 	hash = jhash_1word(hash, ti->hash_seed);
554 	return &ti->buckets[hash & (ti->n_buckets - 1)];
555 }
556 
557 static void table_instance_insert(struct table_instance *ti,
558 				  struct sw_flow *flow)
559 {
560 	struct hlist_head *head;
561 
562 	head = find_bucket(ti, flow->flow_table.hash);
563 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
564 }
565 
566 static void ufid_table_instance_insert(struct table_instance *ti,
567 				       struct sw_flow *flow)
568 {
569 	struct hlist_head *head;
570 
571 	head = find_bucket(ti, flow->ufid_table.hash);
572 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
573 }
574 
575 static void flow_table_copy_flows(struct table_instance *old,
576 				  struct table_instance *new, bool ufid)
577 {
578 	int old_ver;
579 	int i;
580 
581 	old_ver = old->node_ver;
582 	new->node_ver = !old_ver;
583 
584 	/* Insert in new table. */
585 	for (i = 0; i < old->n_buckets; i++) {
586 		struct sw_flow *flow;
587 		struct hlist_head *head = &old->buckets[i];
588 
589 		if (ufid)
590 			hlist_for_each_entry_rcu(flow, head,
591 						 ufid_table.node[old_ver],
592 						 lockdep_ovsl_is_held())
593 				ufid_table_instance_insert(new, flow);
594 		else
595 			hlist_for_each_entry_rcu(flow, head,
596 						 flow_table.node[old_ver],
597 						 lockdep_ovsl_is_held())
598 				table_instance_insert(new, flow);
599 	}
600 }
601 
602 static struct table_instance *table_instance_rehash(struct table_instance *ti,
603 						    int n_buckets, bool ufid)
604 {
605 	struct table_instance *new_ti;
606 
607 	new_ti = table_instance_alloc(n_buckets);
608 	if (!new_ti)
609 		return NULL;
610 
611 	flow_table_copy_flows(ti, new_ti, ufid);
612 
613 	return new_ti;
614 }
615 
616 int ovs_flow_tbl_flush(struct flow_table *flow_table)
617 {
618 	struct table_instance *old_ti, *new_ti;
619 	struct table_instance *old_ufid_ti, *new_ufid_ti;
620 
621 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
622 	if (!new_ti)
623 		return -ENOMEM;
624 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
625 	if (!new_ufid_ti)
626 		goto err_free_ti;
627 
628 	old_ti = ovsl_dereference(flow_table->ti);
629 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
630 
631 	rcu_assign_pointer(flow_table->ti, new_ti);
632 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
633 	flow_table->last_rehash = jiffies;
634 
635 	table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
636 	table_instance_destroy(old_ti, old_ufid_ti);
637 	return 0;
638 
639 err_free_ti:
640 	__table_instance_destroy(new_ti);
641 	return -ENOMEM;
642 }
643 
644 static u32 flow_hash(const struct sw_flow_key *key,
645 		     const struct sw_flow_key_range *range)
646 {
647 	const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
648 
649 	/* Make sure number of hash bytes are multiple of u32. */
650 	int hash_u32s = range_n_bytes(range) >> 2;
651 
652 	return jhash2(hash_key, hash_u32s, 0);
653 }
654 
655 static int flow_key_start(const struct sw_flow_key *key)
656 {
657 	if (key->tun_proto)
658 		return 0;
659 	else
660 		return rounddown(offsetof(struct sw_flow_key, phy),
661 				 sizeof(long));
662 }
663 
664 static bool cmp_key(const struct sw_flow_key *key1,
665 		    const struct sw_flow_key *key2,
666 		    int key_start, int key_end)
667 {
668 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
669 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
670 	int i;
671 
672 	for (i = key_start; i < key_end; i += sizeof(long))
673 		if (*cp1++ ^ *cp2++)
674 			return false;
675 
676 	return true;
677 }
678 
679 static bool flow_cmp_masked_key(const struct sw_flow *flow,
680 				const struct sw_flow_key *key,
681 				const struct sw_flow_key_range *range)
682 {
683 	return cmp_key(&flow->key, key, range->start, range->end);
684 }
685 
686 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
687 				      const struct sw_flow_match *match)
688 {
689 	struct sw_flow_key *key = match->key;
690 	int key_start = flow_key_start(key);
691 	int key_end = match->range.end;
692 
693 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
694 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
695 }
696 
697 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
698 					  const struct sw_flow_key *unmasked,
699 					  const struct sw_flow_mask *mask,
700 					  u32 *n_mask_hit)
701 {
702 	struct sw_flow *flow;
703 	struct hlist_head *head;
704 	u32 hash;
705 	struct sw_flow_key masked_key;
706 
707 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
708 	hash = flow_hash(&masked_key, &mask->range);
709 	head = find_bucket(ti, hash);
710 	(*n_mask_hit)++;
711 
712 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
713 				 lockdep_ovsl_is_held()) {
714 		if (flow->mask == mask && flow->flow_table.hash == hash &&
715 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
716 			return flow;
717 	}
718 	return NULL;
719 }
720 
721 /* Flow lookup does full lookup on flow table. It starts with
722  * mask from index passed in *index.
723  * This function MUST be called with BH disabled due to the use
724  * of CPU specific variables.
725  */
726 static struct sw_flow *flow_lookup(struct flow_table *tbl,
727 				   struct table_instance *ti,
728 				   struct mask_array *ma,
729 				   const struct sw_flow_key *key,
730 				   u32 *n_mask_hit,
731 				   u32 *n_cache_hit,
732 				   u32 *index)
733 {
734 	struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
735 	struct sw_flow *flow;
736 	struct sw_flow_mask *mask;
737 	int i;
738 
739 	if (likely(*index < ma->max)) {
740 		mask = rcu_dereference_ovsl(ma->masks[*index]);
741 		if (mask) {
742 			flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
743 			if (flow) {
744 				u64_stats_update_begin(&stats->syncp);
745 				stats->usage_cntrs[*index]++;
746 				u64_stats_update_end(&stats->syncp);
747 				(*n_cache_hit)++;
748 				return flow;
749 			}
750 		}
751 	}
752 
753 	for (i = 0; i < ma->max; i++)  {
754 
755 		if (i == *index)
756 			continue;
757 
758 		mask = rcu_dereference_ovsl(ma->masks[i]);
759 		if (unlikely(!mask))
760 			break;
761 
762 		flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
763 		if (flow) { /* Found */
764 			*index = i;
765 			u64_stats_update_begin(&stats->syncp);
766 			stats->usage_cntrs[*index]++;
767 			u64_stats_update_end(&stats->syncp);
768 			return flow;
769 		}
770 	}
771 
772 	return NULL;
773 }
774 
775 /*
776  * mask_cache maps flow to probable mask. This cache is not tightly
777  * coupled cache, It means updates to  mask list can result in inconsistent
778  * cache entry in mask cache.
779  * This is per cpu cache and is divided in MC_HASH_SEGS segments.
780  * In case of a hash collision the entry is hashed in next segment.
781  * */
782 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
783 					  const struct sw_flow_key *key,
784 					  u32 skb_hash,
785 					  u32 *n_mask_hit,
786 					  u32 *n_cache_hit)
787 {
788 	struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
789 	struct mask_array *ma = rcu_dereference(tbl->mask_array);
790 	struct table_instance *ti = rcu_dereference(tbl->ti);
791 	struct mask_cache_entry *entries, *ce;
792 	struct sw_flow *flow;
793 	u32 hash;
794 	int seg;
795 
796 	*n_mask_hit = 0;
797 	*n_cache_hit = 0;
798 	if (unlikely(!skb_hash || mc->cache_size == 0)) {
799 		u32 mask_index = 0;
800 		u32 cache = 0;
801 
802 		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
803 				   &mask_index);
804 	}
805 
806 	/* Pre and post recirulation flows usually have the same skb_hash
807 	 * value. To avoid hash collisions, rehash the 'skb_hash' with
808 	 * 'recirc_id'.  */
809 	if (key->recirc_id)
810 		skb_hash = jhash_1word(skb_hash, key->recirc_id);
811 
812 	ce = NULL;
813 	hash = skb_hash;
814 	entries = this_cpu_ptr(mc->mask_cache);
815 
816 	/* Find the cache entry 'ce' to operate on. */
817 	for (seg = 0; seg < MC_HASH_SEGS; seg++) {
818 		int index = hash & (mc->cache_size - 1);
819 		struct mask_cache_entry *e;
820 
821 		e = &entries[index];
822 		if (e->skb_hash == skb_hash) {
823 			flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
824 					   n_cache_hit, &e->mask_index);
825 			if (!flow)
826 				e->skb_hash = 0;
827 			return flow;
828 		}
829 
830 		if (!ce || e->skb_hash < ce->skb_hash)
831 			ce = e;  /* A better replacement cache candidate. */
832 
833 		hash >>= MC_HASH_SHIFT;
834 	}
835 
836 	/* Cache miss, do full lookup. */
837 	flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
838 			   &ce->mask_index);
839 	if (flow)
840 		ce->skb_hash = skb_hash;
841 
842 	*n_cache_hit = 0;
843 	return flow;
844 }
845 
846 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
847 				    const struct sw_flow_key *key)
848 {
849 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
850 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
851 	u32 __always_unused n_mask_hit;
852 	u32 __always_unused n_cache_hit;
853 	struct sw_flow *flow;
854 	u32 index = 0;
855 
856 	/* This function gets called trough the netlink interface and therefore
857 	 * is preemptible. However, flow_lookup() function needs to be called
858 	 * with BH disabled due to CPU specific variables.
859 	 */
860 	local_bh_disable();
861 	flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
862 	local_bh_enable();
863 	return flow;
864 }
865 
866 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
867 					  const struct sw_flow_match *match)
868 {
869 	struct mask_array *ma = ovsl_dereference(tbl->mask_array);
870 	int i;
871 
872 	/* Always called under ovs-mutex. */
873 	for (i = 0; i < ma->max; i++) {
874 		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
875 		u32 __always_unused n_mask_hit;
876 		struct sw_flow_mask *mask;
877 		struct sw_flow *flow;
878 
879 		mask = ovsl_dereference(ma->masks[i]);
880 		if (!mask)
881 			continue;
882 
883 		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
884 		if (flow && ovs_identifier_is_key(&flow->id) &&
885 		    ovs_flow_cmp_unmasked_key(flow, match)) {
886 			return flow;
887 		}
888 	}
889 
890 	return NULL;
891 }
892 
893 static u32 ufid_hash(const struct sw_flow_id *sfid)
894 {
895 	return jhash(sfid->ufid, sfid->ufid_len, 0);
896 }
897 
898 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
899 			      const struct sw_flow_id *sfid)
900 {
901 	if (flow->id.ufid_len != sfid->ufid_len)
902 		return false;
903 
904 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
905 }
906 
907 bool ovs_flow_cmp(const struct sw_flow *flow,
908 		  const struct sw_flow_match *match)
909 {
910 	if (ovs_identifier_is_ufid(&flow->id))
911 		return flow_cmp_masked_key(flow, match->key, &match->range);
912 
913 	return ovs_flow_cmp_unmasked_key(flow, match);
914 }
915 
916 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
917 					 const struct sw_flow_id *ufid)
918 {
919 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
920 	struct sw_flow *flow;
921 	struct hlist_head *head;
922 	u32 hash;
923 
924 	hash = ufid_hash(ufid);
925 	head = find_bucket(ti, hash);
926 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
927 				 lockdep_ovsl_is_held()) {
928 		if (flow->ufid_table.hash == hash &&
929 		    ovs_flow_cmp_ufid(flow, ufid))
930 			return flow;
931 	}
932 	return NULL;
933 }
934 
935 int ovs_flow_tbl_num_masks(const struct flow_table *table)
936 {
937 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
938 	return READ_ONCE(ma->count);
939 }
940 
941 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
942 {
943 	struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
944 
945 	return READ_ONCE(mc->cache_size);
946 }
947 
948 static struct table_instance *table_instance_expand(struct table_instance *ti,
949 						    bool ufid)
950 {
951 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
952 }
953 
954 /* Must be called with OVS mutex held. */
955 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
956 {
957 	struct table_instance *ti = ovsl_dereference(table->ti);
958 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
959 
960 	BUG_ON(table->count == 0);
961 	table_instance_flow_free(table, ti, ufid_ti, flow);
962 }
963 
964 static struct sw_flow_mask *mask_alloc(void)
965 {
966 	struct sw_flow_mask *mask;
967 
968 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
969 	if (mask)
970 		mask->ref_count = 1;
971 
972 	return mask;
973 }
974 
975 static bool mask_equal(const struct sw_flow_mask *a,
976 		       const struct sw_flow_mask *b)
977 {
978 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
979 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
980 
981 	return  (a->range.end == b->range.end)
982 		&& (a->range.start == b->range.start)
983 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
984 }
985 
986 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
987 					   const struct sw_flow_mask *mask)
988 {
989 	struct mask_array *ma;
990 	int i;
991 
992 	ma = ovsl_dereference(tbl->mask_array);
993 	for (i = 0; i < ma->max; i++) {
994 		struct sw_flow_mask *t;
995 		t = ovsl_dereference(ma->masks[i]);
996 
997 		if (t && mask_equal(mask, t))
998 			return t;
999 	}
1000 
1001 	return NULL;
1002 }
1003 
1004 /* Add 'mask' into the mask list, if it is not already there. */
1005 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
1006 			    const struct sw_flow_mask *new)
1007 {
1008 	struct sw_flow_mask *mask;
1009 
1010 	mask = flow_mask_find(tbl, new);
1011 	if (!mask) {
1012 		/* Allocate a new mask if none exists. */
1013 		mask = mask_alloc();
1014 		if (!mask)
1015 			return -ENOMEM;
1016 		mask->key = new->key;
1017 		mask->range = new->range;
1018 
1019 		/* Add mask to mask-list. */
1020 		if (tbl_mask_array_add_mask(tbl, mask)) {
1021 			kfree(mask);
1022 			return -ENOMEM;
1023 		}
1024 	} else {
1025 		BUG_ON(!mask->ref_count);
1026 		mask->ref_count++;
1027 	}
1028 
1029 	flow->mask = mask;
1030 	return 0;
1031 }
1032 
1033 /* Must be called with OVS mutex held. */
1034 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1035 {
1036 	struct table_instance *new_ti = NULL;
1037 	struct table_instance *ti;
1038 
1039 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1040 	ti = ovsl_dereference(table->ti);
1041 	table_instance_insert(ti, flow);
1042 	table->count++;
1043 
1044 	/* Expand table, if necessary, to make room. */
1045 	if (table->count > ti->n_buckets)
1046 		new_ti = table_instance_expand(ti, false);
1047 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1048 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1049 
1050 	if (new_ti) {
1051 		rcu_assign_pointer(table->ti, new_ti);
1052 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1053 		table->last_rehash = jiffies;
1054 	}
1055 }
1056 
1057 /* Must be called with OVS mutex held. */
1058 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1059 {
1060 	struct table_instance *ti;
1061 
1062 	flow->ufid_table.hash = ufid_hash(&flow->id);
1063 	ti = ovsl_dereference(table->ufid_ti);
1064 	ufid_table_instance_insert(ti, flow);
1065 	table->ufid_count++;
1066 
1067 	/* Expand table, if necessary, to make room. */
1068 	if (table->ufid_count > ti->n_buckets) {
1069 		struct table_instance *new_ti;
1070 
1071 		new_ti = table_instance_expand(ti, true);
1072 		if (new_ti) {
1073 			rcu_assign_pointer(table->ufid_ti, new_ti);
1074 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1075 		}
1076 	}
1077 }
1078 
1079 /* Must be called with OVS mutex held. */
1080 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1081 			const struct sw_flow_mask *mask)
1082 {
1083 	int err;
1084 
1085 	err = flow_mask_insert(table, flow, mask);
1086 	if (err)
1087 		return err;
1088 	flow_key_insert(table, flow);
1089 	if (ovs_identifier_is_ufid(&flow->id))
1090 		flow_ufid_insert(table, flow);
1091 
1092 	return 0;
1093 }
1094 
1095 static int compare_mask_and_count(const void *a, const void *b)
1096 {
1097 	const struct mask_count *mc_a = a;
1098 	const struct mask_count *mc_b = b;
1099 
1100 	return (s64)mc_b->counter - (s64)mc_a->counter;
1101 }
1102 
1103 /* Must be called with OVS mutex held. */
1104 void ovs_flow_masks_rebalance(struct flow_table *table)
1105 {
1106 	struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1107 	struct mask_count *masks_and_count;
1108 	struct mask_array *new;
1109 	int masks_entries = 0;
1110 	int i;
1111 
1112 	/* Build array of all current entries with use counters. */
1113 	masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1114 					GFP_KERNEL);
1115 	if (!masks_and_count)
1116 		return;
1117 
1118 	for (i = 0; i < ma->max; i++) {
1119 		struct sw_flow_mask *mask;
1120 		int cpu;
1121 
1122 		mask = rcu_dereference_ovsl(ma->masks[i]);
1123 		if (unlikely(!mask))
1124 			break;
1125 
1126 		masks_and_count[i].index = i;
1127 		masks_and_count[i].counter = 0;
1128 
1129 		for_each_possible_cpu(cpu) {
1130 			struct mask_array_stats *stats;
1131 			unsigned int start;
1132 			u64 counter;
1133 
1134 			stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
1135 			do {
1136 				start = u64_stats_fetch_begin(&stats->syncp);
1137 				counter = stats->usage_cntrs[i];
1138 			} while (u64_stats_fetch_retry(&stats->syncp, start));
1139 
1140 			masks_and_count[i].counter += counter;
1141 		}
1142 
1143 		/* Subtract the zero count value. */
1144 		masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1145 
1146 		/* Rather than calling tbl_mask_array_reset_counters()
1147 		 * below when no change is needed, do it inline here.
1148 		 */
1149 		ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1150 	}
1151 
1152 	if (i == 0)
1153 		goto free_mask_entries;
1154 
1155 	/* Sort the entries */
1156 	masks_entries = i;
1157 	sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1158 	     compare_mask_and_count, NULL);
1159 
1160 	/* If the order is the same, nothing to do... */
1161 	for (i = 0; i < masks_entries; i++) {
1162 		if (i != masks_and_count[i].index)
1163 			break;
1164 	}
1165 	if (i == masks_entries)
1166 		goto free_mask_entries;
1167 
1168 	/* Rebuilt the new list in order of usage. */
1169 	new = tbl_mask_array_alloc(ma->max);
1170 	if (!new)
1171 		goto free_mask_entries;
1172 
1173 	for (i = 0; i < masks_entries; i++) {
1174 		int index = masks_and_count[i].index;
1175 
1176 		if (ovsl_dereference(ma->masks[index]))
1177 			new->masks[new->count++] = ma->masks[index];
1178 	}
1179 
1180 	rcu_assign_pointer(table->mask_array, new);
1181 	call_rcu(&ma->rcu, mask_array_rcu_cb);
1182 
1183 free_mask_entries:
1184 	kfree(masks_and_count);
1185 }
1186 
1187 /* Initializes the flow module.
1188  * Returns zero if successful or a negative error code. */
1189 int ovs_flow_init(void)
1190 {
1191 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1192 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1193 
1194 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1195 				       + (nr_cpu_ids
1196 					  * sizeof(struct sw_flow_stats *))
1197 				       + cpumask_size(),
1198 				       0, 0, NULL);
1199 	if (flow_cache == NULL)
1200 		return -ENOMEM;
1201 
1202 	flow_stats_cache
1203 		= kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1204 				    0, SLAB_HWCACHE_ALIGN, NULL);
1205 	if (flow_stats_cache == NULL) {
1206 		kmem_cache_destroy(flow_cache);
1207 		flow_cache = NULL;
1208 		return -ENOMEM;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 /* Uninitializes the flow module. */
1215 void ovs_flow_exit(void)
1216 {
1217 	kmem_cache_destroy(flow_stats_cache);
1218 	kmem_cache_destroy(flow_cache);
1219 }
1220