xref: /linux/net/openvswitch/flow_table.c (revision 96ac6d435100450f0565708d9b885ea2a7400e0a)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #include "flow.h"
20 #include "datapath.h"
21 #include "flow_netlink.h"
22 #include <linux/uaccess.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <net/llc_pdu.h>
28 #include <linux/kernel.h>
29 #include <linux/jhash.h>
30 #include <linux/jiffies.h>
31 #include <linux/llc.h>
32 #include <linux/module.h>
33 #include <linux/in.h>
34 #include <linux/rcupdate.h>
35 #include <linux/cpumask.h>
36 #include <linux/if_arp.h>
37 #include <linux/ip.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
45 #include <net/ip.h>
46 #include <net/ipv6.h>
47 #include <net/ndisc.h>
48 
49 #define TBL_MIN_BUCKETS		1024
50 #define REHASH_INTERVAL		(10 * 60 * HZ)
51 
52 static struct kmem_cache *flow_cache;
53 struct kmem_cache *flow_stats_cache __read_mostly;
54 
55 static u16 range_n_bytes(const struct sw_flow_key_range *range)
56 {
57 	return range->end - range->start;
58 }
59 
60 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
61 		       bool full, const struct sw_flow_mask *mask)
62 {
63 	int start = full ? 0 : mask->range.start;
64 	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
65 	const long *m = (const long *)((const u8 *)&mask->key + start);
66 	const long *s = (const long *)((const u8 *)src + start);
67 	long *d = (long *)((u8 *)dst + start);
68 	int i;
69 
70 	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
71 	 * if 'full' is false the memory outside of the 'mask->range' is left
72 	 * uninitialized. This can be used as an optimization when further
73 	 * operations on 'dst' only use contents within 'mask->range'.
74 	 */
75 	for (i = 0; i < len; i += sizeof(long))
76 		*d++ = *s++ & *m++;
77 }
78 
79 struct sw_flow *ovs_flow_alloc(void)
80 {
81 	struct sw_flow *flow;
82 	struct flow_stats *stats;
83 
84 	flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
85 	if (!flow)
86 		return ERR_PTR(-ENOMEM);
87 
88 	flow->stats_last_writer = -1;
89 
90 	/* Initialize the default stat node. */
91 	stats = kmem_cache_alloc_node(flow_stats_cache,
92 				      GFP_KERNEL | __GFP_ZERO,
93 				      node_online(0) ? 0 : NUMA_NO_NODE);
94 	if (!stats)
95 		goto err;
96 
97 	spin_lock_init(&stats->lock);
98 
99 	RCU_INIT_POINTER(flow->stats[0], stats);
100 
101 	cpumask_set_cpu(0, &flow->cpu_used_mask);
102 
103 	return flow;
104 err:
105 	kmem_cache_free(flow_cache, flow);
106 	return ERR_PTR(-ENOMEM);
107 }
108 
109 int ovs_flow_tbl_count(const struct flow_table *table)
110 {
111 	return table->count;
112 }
113 
114 static void flow_free(struct sw_flow *flow)
115 {
116 	int cpu;
117 
118 	if (ovs_identifier_is_key(&flow->id))
119 		kfree(flow->id.unmasked_key);
120 	if (flow->sf_acts)
121 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
122 	/* We open code this to make sure cpu 0 is always considered */
123 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
124 		if (flow->stats[cpu])
125 			kmem_cache_free(flow_stats_cache,
126 					(struct flow_stats __force *)flow->stats[cpu]);
127 	kmem_cache_free(flow_cache, flow);
128 }
129 
130 static void rcu_free_flow_callback(struct rcu_head *rcu)
131 {
132 	struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
133 
134 	flow_free(flow);
135 }
136 
137 void ovs_flow_free(struct sw_flow *flow, bool deferred)
138 {
139 	if (!flow)
140 		return;
141 
142 	if (deferred)
143 		call_rcu(&flow->rcu, rcu_free_flow_callback);
144 	else
145 		flow_free(flow);
146 }
147 
148 static void __table_instance_destroy(struct table_instance *ti)
149 {
150 	kvfree(ti->buckets);
151 	kfree(ti);
152 }
153 
154 static struct table_instance *table_instance_alloc(int new_size)
155 {
156 	struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
157 	int i;
158 
159 	if (!ti)
160 		return NULL;
161 
162 	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
163 				     GFP_KERNEL);
164 	if (!ti->buckets) {
165 		kfree(ti);
166 		return NULL;
167 	}
168 
169 	for (i = 0; i < new_size; i++)
170 		INIT_HLIST_HEAD(&ti->buckets[i]);
171 
172 	ti->n_buckets = new_size;
173 	ti->node_ver = 0;
174 	ti->keep_flows = false;
175 	get_random_bytes(&ti->hash_seed, sizeof(u32));
176 
177 	return ti;
178 }
179 
180 int ovs_flow_tbl_init(struct flow_table *table)
181 {
182 	struct table_instance *ti, *ufid_ti;
183 
184 	ti = table_instance_alloc(TBL_MIN_BUCKETS);
185 
186 	if (!ti)
187 		return -ENOMEM;
188 
189 	ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
190 	if (!ufid_ti)
191 		goto free_ti;
192 
193 	rcu_assign_pointer(table->ti, ti);
194 	rcu_assign_pointer(table->ufid_ti, ufid_ti);
195 	INIT_LIST_HEAD(&table->mask_list);
196 	table->last_rehash = jiffies;
197 	table->count = 0;
198 	table->ufid_count = 0;
199 	return 0;
200 
201 free_ti:
202 	__table_instance_destroy(ti);
203 	return -ENOMEM;
204 }
205 
206 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
207 {
208 	struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
209 
210 	__table_instance_destroy(ti);
211 }
212 
213 static void table_instance_destroy(struct table_instance *ti,
214 				   struct table_instance *ufid_ti,
215 				   bool deferred)
216 {
217 	int i;
218 
219 	if (!ti)
220 		return;
221 
222 	BUG_ON(!ufid_ti);
223 	if (ti->keep_flows)
224 		goto skip_flows;
225 
226 	for (i = 0; i < ti->n_buckets; i++) {
227 		struct sw_flow *flow;
228 		struct hlist_head *head = &ti->buckets[i];
229 		struct hlist_node *n;
230 		int ver = ti->node_ver;
231 		int ufid_ver = ufid_ti->node_ver;
232 
233 		hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
234 			hlist_del_rcu(&flow->flow_table.node[ver]);
235 			if (ovs_identifier_is_ufid(&flow->id))
236 				hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
237 			ovs_flow_free(flow, deferred);
238 		}
239 	}
240 
241 skip_flows:
242 	if (deferred) {
243 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
244 		call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
245 	} else {
246 		__table_instance_destroy(ti);
247 		__table_instance_destroy(ufid_ti);
248 	}
249 }
250 
251 /* No need for locking this function is called from RCU callback or
252  * error path.
253  */
254 void ovs_flow_tbl_destroy(struct flow_table *table)
255 {
256 	struct table_instance *ti = rcu_dereference_raw(table->ti);
257 	struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
258 
259 	table_instance_destroy(ti, ufid_ti, false);
260 }
261 
262 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
263 				       u32 *bucket, u32 *last)
264 {
265 	struct sw_flow *flow;
266 	struct hlist_head *head;
267 	int ver;
268 	int i;
269 
270 	ver = ti->node_ver;
271 	while (*bucket < ti->n_buckets) {
272 		i = 0;
273 		head = &ti->buckets[*bucket];
274 		hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
275 			if (i < *last) {
276 				i++;
277 				continue;
278 			}
279 			*last = i + 1;
280 			return flow;
281 		}
282 		(*bucket)++;
283 		*last = 0;
284 	}
285 
286 	return NULL;
287 }
288 
289 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
290 {
291 	hash = jhash_1word(hash, ti->hash_seed);
292 	return &ti->buckets[hash & (ti->n_buckets - 1)];
293 }
294 
295 static void table_instance_insert(struct table_instance *ti,
296 				  struct sw_flow *flow)
297 {
298 	struct hlist_head *head;
299 
300 	head = find_bucket(ti, flow->flow_table.hash);
301 	hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
302 }
303 
304 static void ufid_table_instance_insert(struct table_instance *ti,
305 				       struct sw_flow *flow)
306 {
307 	struct hlist_head *head;
308 
309 	head = find_bucket(ti, flow->ufid_table.hash);
310 	hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
311 }
312 
313 static void flow_table_copy_flows(struct table_instance *old,
314 				  struct table_instance *new, bool ufid)
315 {
316 	int old_ver;
317 	int i;
318 
319 	old_ver = old->node_ver;
320 	new->node_ver = !old_ver;
321 
322 	/* Insert in new table. */
323 	for (i = 0; i < old->n_buckets; i++) {
324 		struct sw_flow *flow;
325 		struct hlist_head *head = &old->buckets[i];
326 
327 		if (ufid)
328 			hlist_for_each_entry(flow, head,
329 					     ufid_table.node[old_ver])
330 				ufid_table_instance_insert(new, flow);
331 		else
332 			hlist_for_each_entry(flow, head,
333 					     flow_table.node[old_ver])
334 				table_instance_insert(new, flow);
335 	}
336 
337 	old->keep_flows = true;
338 }
339 
340 static struct table_instance *table_instance_rehash(struct table_instance *ti,
341 						    int n_buckets, bool ufid)
342 {
343 	struct table_instance *new_ti;
344 
345 	new_ti = table_instance_alloc(n_buckets);
346 	if (!new_ti)
347 		return NULL;
348 
349 	flow_table_copy_flows(ti, new_ti, ufid);
350 
351 	return new_ti;
352 }
353 
354 int ovs_flow_tbl_flush(struct flow_table *flow_table)
355 {
356 	struct table_instance *old_ti, *new_ti;
357 	struct table_instance *old_ufid_ti, *new_ufid_ti;
358 
359 	new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
360 	if (!new_ti)
361 		return -ENOMEM;
362 	new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
363 	if (!new_ufid_ti)
364 		goto err_free_ti;
365 
366 	old_ti = ovsl_dereference(flow_table->ti);
367 	old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
368 
369 	rcu_assign_pointer(flow_table->ti, new_ti);
370 	rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
371 	flow_table->last_rehash = jiffies;
372 	flow_table->count = 0;
373 	flow_table->ufid_count = 0;
374 
375 	table_instance_destroy(old_ti, old_ufid_ti, true);
376 	return 0;
377 
378 err_free_ti:
379 	__table_instance_destroy(new_ti);
380 	return -ENOMEM;
381 }
382 
383 static u32 flow_hash(const struct sw_flow_key *key,
384 		     const struct sw_flow_key_range *range)
385 {
386 	int key_start = range->start;
387 	int key_end = range->end;
388 	const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
389 	int hash_u32s = (key_end - key_start) >> 2;
390 
391 	/* Make sure number of hash bytes are multiple of u32. */
392 	BUILD_BUG_ON(sizeof(long) % sizeof(u32));
393 
394 	return jhash2(hash_key, hash_u32s, 0);
395 }
396 
397 static int flow_key_start(const struct sw_flow_key *key)
398 {
399 	if (key->tun_proto)
400 		return 0;
401 	else
402 		return rounddown(offsetof(struct sw_flow_key, phy),
403 					  sizeof(long));
404 }
405 
406 static bool cmp_key(const struct sw_flow_key *key1,
407 		    const struct sw_flow_key *key2,
408 		    int key_start, int key_end)
409 {
410 	const long *cp1 = (const long *)((const u8 *)key1 + key_start);
411 	const long *cp2 = (const long *)((const u8 *)key2 + key_start);
412 	long diffs = 0;
413 	int i;
414 
415 	for (i = key_start; i < key_end;  i += sizeof(long))
416 		diffs |= *cp1++ ^ *cp2++;
417 
418 	return diffs == 0;
419 }
420 
421 static bool flow_cmp_masked_key(const struct sw_flow *flow,
422 				const struct sw_flow_key *key,
423 				const struct sw_flow_key_range *range)
424 {
425 	return cmp_key(&flow->key, key, range->start, range->end);
426 }
427 
428 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
429 				      const struct sw_flow_match *match)
430 {
431 	struct sw_flow_key *key = match->key;
432 	int key_start = flow_key_start(key);
433 	int key_end = match->range.end;
434 
435 	BUG_ON(ovs_identifier_is_ufid(&flow->id));
436 	return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
437 }
438 
439 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
440 					  const struct sw_flow_key *unmasked,
441 					  const struct sw_flow_mask *mask)
442 {
443 	struct sw_flow *flow;
444 	struct hlist_head *head;
445 	u32 hash;
446 	struct sw_flow_key masked_key;
447 
448 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
449 	hash = flow_hash(&masked_key, &mask->range);
450 	head = find_bucket(ti, hash);
451 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
452 		if (flow->mask == mask && flow->flow_table.hash == hash &&
453 		    flow_cmp_masked_key(flow, &masked_key, &mask->range))
454 			return flow;
455 	}
456 	return NULL;
457 }
458 
459 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
460 				    const struct sw_flow_key *key,
461 				    u32 *n_mask_hit)
462 {
463 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
464 	struct sw_flow_mask *mask;
465 	struct sw_flow *flow;
466 
467 	*n_mask_hit = 0;
468 	list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
469 		(*n_mask_hit)++;
470 		flow = masked_flow_lookup(ti, key, mask);
471 		if (flow)  /* Found */
472 			return flow;
473 	}
474 	return NULL;
475 }
476 
477 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
478 				    const struct sw_flow_key *key)
479 {
480 	u32 __always_unused n_mask_hit;
481 
482 	return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
483 }
484 
485 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
486 					  const struct sw_flow_match *match)
487 {
488 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
489 	struct sw_flow_mask *mask;
490 	struct sw_flow *flow;
491 
492 	/* Always called under ovs-mutex. */
493 	list_for_each_entry(mask, &tbl->mask_list, list) {
494 		flow = masked_flow_lookup(ti, match->key, mask);
495 		if (flow && ovs_identifier_is_key(&flow->id) &&
496 		    ovs_flow_cmp_unmasked_key(flow, match))
497 			return flow;
498 	}
499 	return NULL;
500 }
501 
502 static u32 ufid_hash(const struct sw_flow_id *sfid)
503 {
504 	return jhash(sfid->ufid, sfid->ufid_len, 0);
505 }
506 
507 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
508 			      const struct sw_flow_id *sfid)
509 {
510 	if (flow->id.ufid_len != sfid->ufid_len)
511 		return false;
512 
513 	return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
514 }
515 
516 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
517 {
518 	if (ovs_identifier_is_ufid(&flow->id))
519 		return flow_cmp_masked_key(flow, match->key, &match->range);
520 
521 	return ovs_flow_cmp_unmasked_key(flow, match);
522 }
523 
524 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
525 					 const struct sw_flow_id *ufid)
526 {
527 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
528 	struct sw_flow *flow;
529 	struct hlist_head *head;
530 	u32 hash;
531 
532 	hash = ufid_hash(ufid);
533 	head = find_bucket(ti, hash);
534 	hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
535 		if (flow->ufid_table.hash == hash &&
536 		    ovs_flow_cmp_ufid(flow, ufid))
537 			return flow;
538 	}
539 	return NULL;
540 }
541 
542 int ovs_flow_tbl_num_masks(const struct flow_table *table)
543 {
544 	struct sw_flow_mask *mask;
545 	int num = 0;
546 
547 	list_for_each_entry(mask, &table->mask_list, list)
548 		num++;
549 
550 	return num;
551 }
552 
553 static struct table_instance *table_instance_expand(struct table_instance *ti,
554 						    bool ufid)
555 {
556 	return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
557 }
558 
559 /* Remove 'mask' from the mask list, if it is not needed any more. */
560 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
561 {
562 	if (mask) {
563 		/* ovs-lock is required to protect mask-refcount and
564 		 * mask list.
565 		 */
566 		ASSERT_OVSL();
567 		BUG_ON(!mask->ref_count);
568 		mask->ref_count--;
569 
570 		if (!mask->ref_count) {
571 			list_del_rcu(&mask->list);
572 			kfree_rcu(mask, rcu);
573 		}
574 	}
575 }
576 
577 /* Must be called with OVS mutex held. */
578 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
579 {
580 	struct table_instance *ti = ovsl_dereference(table->ti);
581 	struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
582 
583 	BUG_ON(table->count == 0);
584 	hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
585 	table->count--;
586 	if (ovs_identifier_is_ufid(&flow->id)) {
587 		hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
588 		table->ufid_count--;
589 	}
590 
591 	/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
592 	 * accessible as long as the RCU read lock is held.
593 	 */
594 	flow_mask_remove(table, flow->mask);
595 }
596 
597 static struct sw_flow_mask *mask_alloc(void)
598 {
599 	struct sw_flow_mask *mask;
600 
601 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
602 	if (mask)
603 		mask->ref_count = 1;
604 
605 	return mask;
606 }
607 
608 static bool mask_equal(const struct sw_flow_mask *a,
609 		       const struct sw_flow_mask *b)
610 {
611 	const u8 *a_ = (const u8 *)&a->key + a->range.start;
612 	const u8 *b_ = (const u8 *)&b->key + b->range.start;
613 
614 	return  (a->range.end == b->range.end)
615 		&& (a->range.start == b->range.start)
616 		&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
617 }
618 
619 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
620 					   const struct sw_flow_mask *mask)
621 {
622 	struct list_head *ml;
623 
624 	list_for_each(ml, &tbl->mask_list) {
625 		struct sw_flow_mask *m;
626 		m = container_of(ml, struct sw_flow_mask, list);
627 		if (mask_equal(mask, m))
628 			return m;
629 	}
630 
631 	return NULL;
632 }
633 
634 /* Add 'mask' into the mask list, if it is not already there. */
635 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
636 			    const struct sw_flow_mask *new)
637 {
638 	struct sw_flow_mask *mask;
639 	mask = flow_mask_find(tbl, new);
640 	if (!mask) {
641 		/* Allocate a new mask if none exsits. */
642 		mask = mask_alloc();
643 		if (!mask)
644 			return -ENOMEM;
645 		mask->key = new->key;
646 		mask->range = new->range;
647 		list_add_rcu(&mask->list, &tbl->mask_list);
648 	} else {
649 		BUG_ON(!mask->ref_count);
650 		mask->ref_count++;
651 	}
652 
653 	flow->mask = mask;
654 	return 0;
655 }
656 
657 /* Must be called with OVS mutex held. */
658 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
659 {
660 	struct table_instance *new_ti = NULL;
661 	struct table_instance *ti;
662 
663 	flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
664 	ti = ovsl_dereference(table->ti);
665 	table_instance_insert(ti, flow);
666 	table->count++;
667 
668 	/* Expand table, if necessary, to make room. */
669 	if (table->count > ti->n_buckets)
670 		new_ti = table_instance_expand(ti, false);
671 	else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
672 		new_ti = table_instance_rehash(ti, ti->n_buckets, false);
673 
674 	if (new_ti) {
675 		rcu_assign_pointer(table->ti, new_ti);
676 		call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
677 		table->last_rehash = jiffies;
678 	}
679 }
680 
681 /* Must be called with OVS mutex held. */
682 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
683 {
684 	struct table_instance *ti;
685 
686 	flow->ufid_table.hash = ufid_hash(&flow->id);
687 	ti = ovsl_dereference(table->ufid_ti);
688 	ufid_table_instance_insert(ti, flow);
689 	table->ufid_count++;
690 
691 	/* Expand table, if necessary, to make room. */
692 	if (table->ufid_count > ti->n_buckets) {
693 		struct table_instance *new_ti;
694 
695 		new_ti = table_instance_expand(ti, true);
696 		if (new_ti) {
697 			rcu_assign_pointer(table->ufid_ti, new_ti);
698 			call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
699 		}
700 	}
701 }
702 
703 /* Must be called with OVS mutex held. */
704 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
705 			const struct sw_flow_mask *mask)
706 {
707 	int err;
708 
709 	err = flow_mask_insert(table, flow, mask);
710 	if (err)
711 		return err;
712 	flow_key_insert(table, flow);
713 	if (ovs_identifier_is_ufid(&flow->id))
714 		flow_ufid_insert(table, flow);
715 
716 	return 0;
717 }
718 
719 /* Initializes the flow module.
720  * Returns zero if successful or a negative error code. */
721 int ovs_flow_init(void)
722 {
723 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
724 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
725 
726 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
727 				       + (nr_cpu_ids
728 					  * sizeof(struct flow_stats *)),
729 				       0, 0, NULL);
730 	if (flow_cache == NULL)
731 		return -ENOMEM;
732 
733 	flow_stats_cache
734 		= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
735 				    0, SLAB_HWCACHE_ALIGN, NULL);
736 	if (flow_stats_cache == NULL) {
737 		kmem_cache_destroy(flow_cache);
738 		flow_cache = NULL;
739 		return -ENOMEM;
740 	}
741 
742 	return 0;
743 }
744 
745 /* Uninitializes the flow module. */
746 void ovs_flow_exit(void)
747 {
748 	kmem_cache_destroy(flow_stats_cache);
749 	kmem_cache_destroy(flow_cache);
750 }
751