Lines Matching +full:in +full:- +full:masks
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
20 #include <linux/in.h>
50 return range->end - range->start; in range_n_bytes()
56 int start = full ? 0 : mask->range.start; in ovs_flow_mask_key()
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range); in ovs_flow_mask_key()
58 const long *m = (const long *)((const u8 *)&mask->key + start); in ovs_flow_mask_key()
64 * if 'full' is false the memory outside of the 'mask->range' is left in ovs_flow_mask_key()
66 * operations on 'dst' only use contents within 'mask->range'. in ovs_flow_mask_key()
79 return ERR_PTR(-ENOMEM); in ovs_flow_alloc()
81 flow->stats_last_writer = -1; in ovs_flow_alloc()
82 flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids]; in ovs_flow_alloc()
91 spin_lock_init(&stats->lock); in ovs_flow_alloc()
93 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc()
95 cpumask_set_cpu(0, flow->cpu_used_mask); in ovs_flow_alloc()
100 return ERR_PTR(-ENOMEM); in ovs_flow_alloc()
105 return table->count; in ovs_flow_tbl_count()
112 if (ovs_identifier_is_key(&flow->id)) in flow_free()
113 kfree(flow->id.unmasked_key); in flow_free()
114 if (flow->sf_acts) in flow_free()
116 flow->sf_acts); in flow_free()
119 cpu = cpumask_next(cpu, flow->cpu_used_mask)) { in flow_free()
120 if (flow->stats[cpu]) in flow_free()
122 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free()
141 call_rcu(&flow->rcu, rcu_free_flow_callback); in ovs_flow_free()
148 kvfree(ti->buckets); in __table_instance_destroy()
160 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), in table_instance_alloc()
162 if (!ti->buckets) { in table_instance_alloc()
168 INIT_HLIST_HEAD(&ti->buckets[i]); in table_instance_alloc()
170 ti->n_buckets = new_size; in table_instance_alloc()
171 ti->node_ver = 0; in table_instance_alloc()
172 get_random_bytes(&ti->hash_seed, sizeof(u32)); in table_instance_alloc()
179 free_percpu(ma->masks_usage_stats); in __mask_array_destroy()
199 for (i = 0; i < ma->max; i++) { in tbl_mask_array_reset_counters()
200 ma->masks_usage_zero_cntr[i] = 0; in tbl_mask_array_reset_counters()
207 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in tbl_mask_array_reset_counters()
209 start = u64_stats_fetch_begin(&stats->syncp); in tbl_mask_array_reset_counters()
210 counter = stats->usage_cntrs[i]; in tbl_mask_array_reset_counters()
211 } while (u64_stats_fetch_retry(&stats->syncp, start)); in tbl_mask_array_reset_counters()
213 ma->masks_usage_zero_cntr[i] += counter; in tbl_mask_array_reset_counters()
223 new = kzalloc(struct_size(new, masks, size) + in tbl_mask_array_alloc()
228 new->masks_usage_zero_cntr = (u64 *)((u8 *)new + in tbl_mask_array_alloc()
229 struct_size(new, masks, size)); in tbl_mask_array_alloc()
231 new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) + in tbl_mask_array_alloc()
234 if (!new->masks_usage_stats) { in tbl_mask_array_alloc()
239 new->count = 0; in tbl_mask_array_alloc()
240 new->max = size; in tbl_mask_array_alloc()
252 return -ENOMEM; in tbl_mask_array_realloc()
254 old = ovsl_dereference(tbl->mask_array); in tbl_mask_array_realloc()
258 for (i = 0; i < old->max; i++) { in tbl_mask_array_realloc()
259 if (ovsl_dereference(old->masks[i])) in tbl_mask_array_realloc()
260 new->masks[new->count++] = old->masks[i]; in tbl_mask_array_realloc()
262 call_rcu(&old->rcu, mask_array_rcu_cb); in tbl_mask_array_realloc()
265 rcu_assign_pointer(tbl->mask_array, new); in tbl_mask_array_realloc()
273 struct mask_array *ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_add_mask()
274 int err, ma_count = READ_ONCE(ma->count); in tbl_mask_array_add_mask()
276 if (ma_count >= ma->max) { in tbl_mask_array_add_mask()
277 err = tbl_mask_array_realloc(tbl, ma->max + in tbl_mask_array_add_mask()
282 ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_add_mask()
290 BUG_ON(ovsl_dereference(ma->masks[ma_count])); in tbl_mask_array_add_mask()
292 rcu_assign_pointer(ma->masks[ma_count], new); in tbl_mask_array_add_mask()
293 WRITE_ONCE(ma->count, ma_count + 1); in tbl_mask_array_add_mask()
301 struct mask_array *ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_del_mask()
302 int i, ma_count = READ_ONCE(ma->count); in tbl_mask_array_del_mask()
306 if (mask == ovsl_dereference(ma->masks[i])) in tbl_mask_array_del_mask()
314 WRITE_ONCE(ma->count, ma_count - 1); in tbl_mask_array_del_mask()
316 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]); in tbl_mask_array_del_mask()
317 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL); in tbl_mask_array_del_mask()
322 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && in tbl_mask_array_del_mask()
323 ma_count <= (ma->max / 3)) in tbl_mask_array_del_mask()
324 tbl_mask_array_realloc(tbl, ma->max / 2); in tbl_mask_array_del_mask()
334 /* ovs-lock is required to protect mask-refcount and in flow_mask_remove()
338 BUG_ON(!mask->ref_count); in flow_mask_remove()
339 mask->ref_count--; in flow_mask_remove()
341 if (!mask->ref_count) in flow_mask_remove()
348 free_percpu(mc->mask_cache); in __mask_cache_destroy()
375 new->cache_size = size; in tbl_mask_cache_alloc()
376 if (new->cache_size > 0) { in tbl_mask_cache_alloc()
378 new->cache_size), in tbl_mask_cache_alloc()
386 new->mask_cache = cache; in tbl_mask_cache_alloc()
391 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); in ovs_flow_tbl_masks_cache_resize()
394 if (size == mc->cache_size) in ovs_flow_tbl_masks_cache_resize()
399 return -EINVAL; in ovs_flow_tbl_masks_cache_resize()
403 return -ENOMEM; in ovs_flow_tbl_masks_cache_resize()
405 rcu_assign_pointer(table->mask_cache, new); in ovs_flow_tbl_masks_cache_resize()
406 call_rcu(&mc->rcu, mask_cache_rcu_cb); in ovs_flow_tbl_masks_cache_resize()
419 return -ENOMEM; in ovs_flow_tbl_init()
433 rcu_assign_pointer(table->ti, ti); in ovs_flow_tbl_init()
434 rcu_assign_pointer(table->ufid_ti, ufid_ti); in ovs_flow_tbl_init()
435 rcu_assign_pointer(table->mask_array, ma); in ovs_flow_tbl_init()
436 rcu_assign_pointer(table->mask_cache, mc); in ovs_flow_tbl_init()
437 table->last_rehash = jiffies; in ovs_flow_tbl_init()
438 table->count = 0; in ovs_flow_tbl_init()
439 table->ufid_count = 0; in ovs_flow_tbl_init()
448 return -ENOMEM; in ovs_flow_tbl_init()
464 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); in table_instance_flow_free()
465 table->count--; in table_instance_flow_free()
467 if (ovs_identifier_is_ufid(&flow->id)) { in table_instance_flow_free()
468 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); in table_instance_flow_free()
469 table->ufid_count--; in table_instance_flow_free()
472 flow_mask_remove(table, flow->mask); in table_instance_flow_free()
482 for (i = 0; i < ti->n_buckets; i++) { in table_instance_flow_flush()
483 struct hlist_head *head = &ti->buckets[i]; in table_instance_flow_flush()
488 flow_table.node[ti->node_ver]) { in table_instance_flow_flush()
496 if (WARN_ON(table->count != 0 || in table_instance_flow_flush()
497 table->ufid_count != 0)) { in table_instance_flow_flush()
498 table->count = 0; in table_instance_flow_flush()
499 table->ufid_count = 0; in table_instance_flow_flush()
506 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); in table_instance_destroy()
507 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); in table_instance_destroy()
515 struct table_instance *ti = rcu_dereference_raw(table->ti); in ovs_flow_tbl_destroy()
516 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); in ovs_flow_tbl_destroy()
517 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); in ovs_flow_tbl_destroy()
518 struct mask_array *ma = rcu_dereference_raw(table->mask_array); in ovs_flow_tbl_destroy()
520 call_rcu(&mc->rcu, mask_cache_rcu_cb); in ovs_flow_tbl_destroy()
521 call_rcu(&ma->rcu, mask_array_rcu_cb); in ovs_flow_tbl_destroy()
533 ver = ti->node_ver; in ovs_flow_tbl_dump_next()
534 while (*bucket < ti->n_buckets) { in ovs_flow_tbl_dump_next()
536 head = &ti->buckets[*bucket]; in ovs_flow_tbl_dump_next()
554 hash = jhash_1word(hash, ti->hash_seed); in find_bucket()
555 return &ti->buckets[hash & (ti->n_buckets - 1)]; in find_bucket()
563 head = find_bucket(ti, flow->flow_table.hash); in table_instance_insert()
564 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); in table_instance_insert()
572 head = find_bucket(ti, flow->ufid_table.hash); in ufid_table_instance_insert()
573 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); in ufid_table_instance_insert()
582 old_ver = old->node_ver; in flow_table_copy_flows()
583 new->node_ver = !old_ver; in flow_table_copy_flows()
585 /* Insert in new table. */ in flow_table_copy_flows()
586 for (i = 0; i < old->n_buckets; i++) { in flow_table_copy_flows()
588 struct hlist_head *head = &old->buckets[i]; in flow_table_copy_flows()
624 return -ENOMEM; in ovs_flow_tbl_flush()
629 old_ti = ovsl_dereference(flow_table->ti); in ovs_flow_tbl_flush()
630 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); in ovs_flow_tbl_flush()
632 rcu_assign_pointer(flow_table->ti, new_ti); in ovs_flow_tbl_flush()
633 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); in ovs_flow_tbl_flush()
634 flow_table->last_rehash = jiffies; in ovs_flow_tbl_flush()
642 return -ENOMEM; in ovs_flow_tbl_flush()
648 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); in flow_hash()
658 if (key->tun_proto) in flow_key_start()
684 return cmp_key(&flow->key, key, range->start, range->end); in flow_cmp_masked_key()
690 struct sw_flow_key *key = match->key; in ovs_flow_cmp_unmasked_key()
692 int key_end = match->range.end; in ovs_flow_cmp_unmasked_key()
694 BUG_ON(ovs_identifier_is_ufid(&flow->id)); in ovs_flow_cmp_unmasked_key()
695 return cmp_key(flow->id.unmasked_key, key, key_start, key_end); in ovs_flow_cmp_unmasked_key()
709 hash = flow_hash(&masked_key, &mask->range); in masked_flow_lookup()
713 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], in masked_flow_lookup()
715 if (flow->mask == mask && flow->flow_table.hash == hash && in masked_flow_lookup()
716 flow_cmp_masked_key(flow, &masked_key, &mask->range)) in masked_flow_lookup()
723 * mask from index passed in *index.
735 struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats); in flow_lookup()
740 if (likely(*index < ma->max)) { in flow_lookup()
741 mask = rcu_dereference_ovsl(ma->masks[*index]); in flow_lookup()
745 u64_stats_update_begin(&stats->syncp); in flow_lookup()
746 stats->usage_cntrs[*index]++; in flow_lookup()
747 u64_stats_update_end(&stats->syncp); in flow_lookup()
754 for (i = 0; i < ma->max; i++) { in flow_lookup()
759 mask = rcu_dereference_ovsl(ma->masks[i]); in flow_lookup()
766 u64_stats_update_begin(&stats->syncp); in flow_lookup()
767 stats->usage_cntrs[*index]++; in flow_lookup()
768 u64_stats_update_end(&stats->syncp); in flow_lookup()
778 * coupled cache, It means updates to mask list can result in inconsistent
779 * cache entry in mask cache.
780 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
781 * In case of a hash collision the entry is hashed in next segment.
789 struct mask_cache *mc = rcu_dereference(tbl->mask_cache); in ovs_flow_tbl_lookup_stats()
790 struct mask_array *ma = rcu_dereference(tbl->mask_array); in ovs_flow_tbl_lookup_stats()
791 struct table_instance *ti = rcu_dereference(tbl->ti); in ovs_flow_tbl_lookup_stats()
799 if (unlikely(!skb_hash || mc->cache_size == 0)) { in ovs_flow_tbl_lookup_stats()
810 if (key->recirc_id) in ovs_flow_tbl_lookup_stats()
811 skb_hash = jhash_1word(skb_hash, key->recirc_id); in ovs_flow_tbl_lookup_stats()
815 entries = this_cpu_ptr(mc->mask_cache); in ovs_flow_tbl_lookup_stats()
819 int index = hash & (mc->cache_size - 1); in ovs_flow_tbl_lookup_stats()
823 if (e->skb_hash == skb_hash) { in ovs_flow_tbl_lookup_stats()
825 n_cache_hit, &e->mask_index); in ovs_flow_tbl_lookup_stats()
827 e->skb_hash = 0; in ovs_flow_tbl_lookup_stats()
831 if (!ce || e->skb_hash < ce->skb_hash) in ovs_flow_tbl_lookup_stats()
839 &ce->mask_index); in ovs_flow_tbl_lookup_stats()
841 ce->skb_hash = skb_hash; in ovs_flow_tbl_lookup_stats()
850 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup()
851 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); in ovs_flow_tbl_lookup()
870 struct mask_array *ma = ovsl_dereference(tbl->mask_array); in ovs_flow_tbl_lookup_exact()
873 /* Always called under ovs-mutex. */ in ovs_flow_tbl_lookup_exact()
874 for (i = 0; i < ma->max; i++) { in ovs_flow_tbl_lookup_exact()
875 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup_exact()
880 mask = ovsl_dereference(ma->masks[i]); in ovs_flow_tbl_lookup_exact()
884 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit); in ovs_flow_tbl_lookup_exact()
885 if (flow && ovs_identifier_is_key(&flow->id) && in ovs_flow_tbl_lookup_exact()
896 return jhash(sfid->ufid, sfid->ufid_len, 0); in ufid_hash()
902 if (flow->id.ufid_len != sfid->ufid_len) in ovs_flow_cmp_ufid()
905 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); in ovs_flow_cmp_ufid()
911 if (ovs_identifier_is_ufid(&flow->id)) in ovs_flow_cmp()
912 return flow_cmp_masked_key(flow, match->key, &match->range); in ovs_flow_cmp()
920 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); in ovs_flow_tbl_lookup_ufid()
927 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], in ovs_flow_tbl_lookup_ufid()
929 if (flow->ufid_table.hash == hash && in ovs_flow_tbl_lookup_ufid()
938 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); in ovs_flow_tbl_num_masks()
939 return READ_ONCE(ma->count); in ovs_flow_tbl_num_masks()
944 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); in ovs_flow_tbl_masks_cache_size()
946 return READ_ONCE(mc->cache_size); in ovs_flow_tbl_masks_cache_size()
952 return table_instance_rehash(ti, ti->n_buckets * 2, ufid); in table_instance_expand()
958 struct table_instance *ti = ovsl_dereference(table->ti); in ovs_flow_tbl_remove()
959 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); in ovs_flow_tbl_remove()
961 BUG_ON(table->count == 0); in ovs_flow_tbl_remove()
971 mask->ref_count = 1; in mask_alloc()
979 const u8 *a_ = (const u8 *)&a->key + a->range.start; in mask_equal()
980 const u8 *b_ = (const u8 *)&b->key + b->range.start; in mask_equal()
982 return (a->range.end == b->range.end) in mask_equal()
983 && (a->range.start == b->range.start) in mask_equal()
984 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); in mask_equal()
993 ma = ovsl_dereference(tbl->mask_array); in flow_mask_find()
994 for (i = 0; i < ma->max; i++) { in flow_mask_find()
996 t = ovsl_dereference(ma->masks[i]); in flow_mask_find()
1016 return -ENOMEM; in flow_mask_insert()
1017 mask->key = new->key; in flow_mask_insert()
1018 mask->range = new->range; in flow_mask_insert()
1020 /* Add mask to mask-list. */ in flow_mask_insert()
1023 return -ENOMEM; in flow_mask_insert()
1026 BUG_ON(!mask->ref_count); in flow_mask_insert()
1027 mask->ref_count++; in flow_mask_insert()
1030 flow->mask = mask; in flow_mask_insert()
1040 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); in flow_key_insert()
1041 ti = ovsl_dereference(table->ti); in flow_key_insert()
1043 table->count++; in flow_key_insert()
1046 if (table->count > ti->n_buckets) in flow_key_insert()
1048 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) in flow_key_insert()
1049 new_ti = table_instance_rehash(ti, ti->n_buckets, false); in flow_key_insert()
1052 rcu_assign_pointer(table->ti, new_ti); in flow_key_insert()
1053 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); in flow_key_insert()
1054 table->last_rehash = jiffies; in flow_key_insert()
1063 flow->ufid_table.hash = ufid_hash(&flow->id); in flow_ufid_insert()
1064 ti = ovsl_dereference(table->ufid_ti); in flow_ufid_insert()
1066 table->ufid_count++; in flow_ufid_insert()
1069 if (table->ufid_count > ti->n_buckets) { in flow_ufid_insert()
1074 rcu_assign_pointer(table->ufid_ti, new_ti); in flow_ufid_insert()
1075 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); in flow_ufid_insert()
1090 if (ovs_identifier_is_ufid(&flow->id)) in ovs_flow_tbl_insert()
1101 return (s64)mc_b->counter - (s64)mc_a->counter; in compare_mask_and_count()
1107 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); in ovs_flow_masks_rebalance()
1114 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), in ovs_flow_masks_rebalance()
1119 for (i = 0; i < ma->max; i++) { in ovs_flow_masks_rebalance()
1123 mask = rcu_dereference_ovsl(ma->masks[i]); in ovs_flow_masks_rebalance()
1135 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in ovs_flow_masks_rebalance()
1137 start = u64_stats_fetch_begin(&stats->syncp); in ovs_flow_masks_rebalance()
1138 counter = stats->usage_cntrs[i]; in ovs_flow_masks_rebalance()
1139 } while (u64_stats_fetch_retry(&stats->syncp, start)); in ovs_flow_masks_rebalance()
1145 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; in ovs_flow_masks_rebalance()
1150 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; in ovs_flow_masks_rebalance()
1169 /* Rebuilt the new list in order of usage. */ in ovs_flow_masks_rebalance()
1170 new = tbl_mask_array_alloc(ma->max); in ovs_flow_masks_rebalance()
1177 if (ovsl_dereference(ma->masks[index])) in ovs_flow_masks_rebalance()
1178 new->masks[new->count++] = ma->masks[index]; in ovs_flow_masks_rebalance()
1181 rcu_assign_pointer(table->mask_array, new); in ovs_flow_masks_rebalance()
1182 call_rcu(&ma->rcu, mask_array_rcu_cb); in ovs_flow_masks_rebalance()
1201 return -ENOMEM; in ovs_flow_init()
1209 return -ENOMEM; in ovs_flow_init()