Lines Matching +full:input +full:- +full:depth

1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
59 /* Macro to enable/disable run-time checks. */
62 #define VERIFY_DEPTH(depth) do { \ argument
63 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
64 rte_panic("LPM: Invalid depth (%u) at line %d", \
65 (unsigned)(depth), __LINE__); \
68 #define VERIFY_DEPTH(depth) argument
72 * Converts a given depth value to its corresponding mask value.
74 * depth (IN) : range = 1 - 32
78 depth_to_mask(uint8_t depth) in depth_to_mask() argument
80 VERIFY_DEPTH(depth); in depth_to_mask()
85 return (int)0x80000000 >> (depth - 1); in depth_to_mask()
89 * Converts given depth value to its corresponding range value.
92 depth_to_range(uint8_t depth) in depth_to_range() argument
94 VERIFY_DEPTH(depth); in depth_to_range()
97 * Calculate tbl24 range. (Note: 2^depth = 1 << depth) in depth_to_range()
99 if (depth <= MAX_DEPTH_TBL24) in depth_to_range()
100 return 1 << (MAX_DEPTH_TBL24 - depth); in depth_to_range()
102 /* Else if depth is greater than 24 */ in depth_to_range()
103 return 1 << (RTE_LPM_MAX_DEPTH - depth); in depth_to_range()
121 l = te->data;
122 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
154 if ((name == NULL) || (socket_id < -1) in rte_lpm_create()
155 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) { in rte_lpm_create()
164 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules; in rte_lpm_create()
166 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); in rte_lpm_create()
173 lpm = te->data; in rte_lpm_create()
174 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) in rte_lpm_create()
203 lpm->rules_tbl = rte_zmalloc_socket(NULL, in rte_lpm_create()
206 if (lpm->rules_tbl == NULL) { in rte_lpm_create()
215 lpm->tbl8 = rte_zmalloc_socket(NULL, in rte_lpm_create()
218 if (lpm->tbl8 == NULL) { in rte_lpm_create()
220 rte_free(lpm->rules_tbl); in rte_lpm_create()
229 lpm->max_rules = config->max_rules; in rte_lpm_create()
230 lpm->number_tbl8s = config->number_tbl8s; in rte_lpm_create()
231 strlcpy(lpm->name, name, sizeof(lpm->name)); in rte_lpm_create()
233 //te->data = lpm; in rte_lpm_create()
263 if (te->data == (void *) lpm) in rte_lpm_free()
272 rte_free(lpm->tbl8); in rte_lpm_free()
273 rte_free(lpm->rules_tbl); in rte_lpm_free()
283 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
284 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
285 * to refer to depth 1 because even though the depth range is 1 - 32, depths
286 * are stored in the rule table from 0 - 31.
287 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
290 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
296 VERIFY_DEPTH(depth);
299 if (lpm->rule_info[depth - 1].used_rules > 0) {
302 rule_gindex = lpm->rule_info[depth - 1].first_rule;
306 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
311 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
313 if (lpm->rules_tbl[rule_index].next_hop
315 return -EEXIST;
316 lpm->rules_tbl[rule_index].next_hop = next_hop;
322 if (rule_index == lpm->max_rules)
323 return -ENOSPC;
328 for (i = depth - 1; i > 0; i--) {
329 if (lpm->rule_info[i - 1].used_rules > 0) {
330 rule_index = lpm->rule_info[i - 1].first_rule
331 + lpm->rule_info[i - 1].used_rules;
335 if (rule_index == lpm->max_rules)
336 return -ENOSPC;
338 lpm->rule_info[depth - 1].first_rule = rule_index;
342 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
343 if (lpm->rule_info[i - 1].first_rule
344 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
345 return -ENOSPC;
347 if (lpm->rule_info[i - 1].used_rules > 0) {
348 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
349 + lpm->rule_info[i - 1].used_rules]
350 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
351 lpm->rule_info[i - 1].first_rule++;
356 lpm->rules_tbl[rule_index].ip = ip_masked;
357 lpm->rules_tbl[rule_index].next_hop = next_hop;
360 lpm->rule_info[depth - 1].used_rules++;
367 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
370 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
374 VERIFY_DEPTH(depth);
376 lpm->rules_tbl[rule_index] =
377 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
378 + lpm->rule_info[depth - 1].used_rules - 1];
380 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
381 if (lpm->rule_info[i].used_rules > 0) {
382 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
383 lpm->rules_tbl[lpm->rule_info[i].first_rule
384 + lpm->rule_info[i].used_rules - 1];
385 lpm->rule_info[i].first_rule--;
389 lpm->rule_info[depth - 1].used_rules--;
394 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
397 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
401 VERIFY_DEPTH(depth);
403 rule_gindex = lpm->rule_info[depth - 1].first_rule;
404 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
406 /* Scan used rules at given depth to find rule. */
409 if (lpm->rules_tbl[rule_index].ip == ip_masked)
413 /* If rule is not found return -EINVAL. */
414 return -EINVAL;
431 if (!tbl8_entry->valid_group) { in tbl8_alloc()
435 .depth = 0, in tbl8_alloc()
452 return -ENOSPC; in tbl8_alloc()
466 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, in add_depth_small() argument
474 tbl24_range = depth_to_range(depth); in add_depth_small()
478 * For invalid OR valid and non-extended tbl 24 entries set in add_depth_small()
481 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && in add_depth_small()
482 lpm->tbl24[i].depth <= depth)) { in add_depth_small()
488 .depth = depth, in add_depth_small()
494 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, in add_depth_small()
500 if (lpm->tbl24[i].valid_group == 1) { in add_depth_small()
504 tbl8_index = lpm->tbl24[i].group_idx * in add_depth_small()
510 if (!lpm->tbl8[j].valid || in add_depth_small()
511 lpm->tbl8[j].depth <= depth) { in add_depth_small()
516 .depth = depth, in add_depth_small()
524 __atomic_store(&lpm->tbl8[j], in add_depth_small()
538 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, in add_depth_big() argument
547 tbl8_range = depth_to_range(depth); in add_depth_big()
549 if (!lpm->tbl24[tbl24_index].valid) { in add_depth_big()
551 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); in add_depth_big()
567 .depth = depth, in add_depth_big()
568 .valid_group = lpm->tbl8[i].valid_group, in add_depth_big()
571 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, in add_depth_big()
585 .depth = 0, in add_depth_big()
591 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, in add_depth_big()
595 else if (lpm->tbl24[tbl24_index].valid_group == 0) { in add_depth_big()
597 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); in add_depth_big()
612 .depth = lpm->tbl24[tbl24_index].depth, in add_depth_big()
613 .valid_group = lpm->tbl8[i].valid_group, in add_depth_big()
614 .next_hop = lpm->tbl24[tbl24_index].next_hop, in add_depth_big()
616 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, in add_depth_big()
626 .depth = depth, in add_depth_big()
627 .valid_group = lpm->tbl8[i].valid_group, in add_depth_big()
630 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, in add_depth_big()
644 .depth = 0, in add_depth_big()
650 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, in add_depth_big()
656 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; in add_depth_big()
663 if (!lpm->tbl8[i].valid || in add_depth_big()
664 lpm->tbl8[i].depth <= depth) { in add_depth_big()
667 .depth = depth, in add_depth_big()
669 .valid_group = lpm->tbl8[i].valid_group, in add_depth_big()
676 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, in add_depth_big()
691 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, in rte_lpm_add() argument
698 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) in rte_lpm_add()
699 return -EINVAL; in rte_lpm_add()
701 ip_masked = ip & depth_to_mask(depth); in rte_lpm_add()
705 rule_index = rule_add(lpm, ip_masked, depth, next_hop); in rte_lpm_add()
710 if (rule_index == -EEXIST) in rte_lpm_add()
719 if (depth <= MAX_DEPTH_TBL24) { in rte_lpm_add()
720 status = add_depth_small(lpm, ip_masked, depth, next_hop); in rte_lpm_add()
721 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ in rte_lpm_add()
722 status = add_depth_big(lpm, ip_masked, depth, next_hop); in rte_lpm_add()
729 //rule_delete(lpm, rule_index, depth); in rte_lpm_add()
740 * Look for a rule in the high-level rules table
743 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
752 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
753 return -EINVAL;
756 ip_masked = ip & depth_to_mask(depth);
757 rule_index = rule_find(lpm, ip_masked, depth);
760 *next_hop = lpm->rules_tbl[rule_index].next_hop;
769 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
776 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
787 return -1;
793 uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth) in delete_depth_small() argument
799 tbl24_range = depth_to_range(depth); in delete_depth_small()
804 * Firstly check the sub_rule_index. A -1 indicates no replacement rule in delete_depth_small()
814 if (lpm->tbl24[i].valid_group == 0 && in delete_depth_small()
815 lpm->tbl24[i].depth <= depth) { in delete_depth_small()
816 __atomic_store(&lpm->tbl24[i], in delete_depth_small()
818 } else if (lpm->tbl24[i].valid_group == 1) { in delete_depth_small()
821 * to be a rule with depth >= 25 in the in delete_depth_small()
825 tbl8_group_index = lpm->tbl24[i].group_idx; in delete_depth_small()
832 if (lpm->tbl8[j].depth <= depth) in delete_depth_small()
833 lpm->tbl8[j].valid = INVALID; in delete_depth_small()
847 .depth = sub_rule_depth, in delete_depth_small()
853 .depth = sub_rule_depth, in delete_depth_small()
859 if (lpm->tbl24[i].valid_group == 0 && in delete_depth_small()
860 lpm->tbl24[i].depth <= depth) { in delete_depth_small()
861 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, in delete_depth_small()
863 } else if (lpm->tbl24[i].valid_group == 1) { in delete_depth_small()
866 * to be a rule with depth >= 25 in the in delete_depth_small()
870 tbl8_group_index = lpm->tbl24[i].group_idx; in delete_depth_small()
877 if (lpm->tbl8[j].depth <= depth) in delete_depth_small()
878 __atomic_store(&lpm->tbl8[j], in delete_depth_small()
892 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
893 * Return of -EINVAL means tbl8 is empty and thus can be recycled
894 * Return of value > -1 means tbl8 is in use but has all the same values and
906 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH in tbl8_recycle_check()
912 * If first entry is valid check if the depth is less than 24 in tbl8_recycle_check()
914 * are all of this depth. in tbl8_recycle_check()
916 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { in tbl8_recycle_check()
920 if (tbl8[i].depth != in tbl8_recycle_check()
921 tbl8[tbl8_group_start].depth) { in tbl8_recycle_check()
923 return -EEXIST; in tbl8_recycle_check()
930 return -EEXIST; in tbl8_recycle_check()
938 return -EEXIST; in tbl8_recycle_check()
940 /* If no valid entries are found then return -EINVAL. */ in tbl8_recycle_check()
941 return -EINVAL; in tbl8_recycle_check()
946 uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth) in delete_depth_big() argument
960 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; in delete_depth_big()
963 tbl8_range = depth_to_range(depth); in delete_depth_big()
971 if (lpm->tbl8[i].depth <= depth) in delete_depth_big()
972 lpm->tbl8[i].valid = INVALID; in delete_depth_big()
978 .depth = sub_rule_depth, in delete_depth_big()
979 .valid_group = lpm->tbl8[tbl8_group_start].valid_group, in delete_depth_big()
988 if (lpm->tbl8[i].depth <= depth) in delete_depth_big()
989 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, in delete_depth_big()
1000 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start); in delete_depth_big()
1002 if (tbl8_recycle_index == -EINVAL) { in delete_depth_big()
1006 lpm->tbl24[tbl24_index].valid = 0; in delete_depth_big()
1008 tbl8_free(lpm->tbl8, tbl8_group_start); in delete_depth_big()
1009 } else if (tbl8_recycle_index > -1) { in delete_depth_big()
1012 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, in delete_depth_big()
1015 .depth = lpm->tbl8[tbl8_recycle_index].depth, in delete_depth_big()
1021 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, in delete_depth_big()
1024 tbl8_free(lpm->tbl8, tbl8_group_start); in delete_depth_big()
1034 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, in rte_lpm_delete() argument
1041 * Check input arguments. Note: IP must be a positive integer of 32 in rte_lpm_delete()
1044 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) { in rte_lpm_delete()
1045 return -EINVAL; in rte_lpm_delete()
1048 ip_masked = ip & depth_to_mask(depth); in rte_lpm_delete()
1052 * Find the index of the input rule, that needs to be deleted, in the in rte_lpm_delete()
1055 rule_to_delete_index = rule_find(lpm, ip_masked, depth); in rte_lpm_delete()
1059 * function rule_find returns -EINVAL. in rte_lpm_delete()
1062 return -EINVAL; in rte_lpm_delete()
1065 rule_delete(lpm, rule_to_delete_index, depth); in rte_lpm_delete()
1070 * replace the rule_to_delete we return -1 and invalidate the table in rte_lpm_delete()
1074 //sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth); in rte_lpm_delete()
1077 * If the input depth value is less than 25 use function in rte_lpm_delete()
1080 if (depth <= MAX_DEPTH_TBL24) { in rte_lpm_delete()
1081 return delete_depth_small(lpm, ip_masked, depth, in rte_lpm_delete()
1083 } else { /* If depth > MAX_DEPTH_TBL24 */ in rte_lpm_delete()
1084 return delete_depth_big(lpm, ip_masked, depth, sub_rule_nhop, in rte_lpm_delete()
1096 memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); in rte_lpm_delete_all()
1099 memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); in rte_lpm_delete_all()
1102 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) in rte_lpm_delete_all()
1103 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s); in rte_lpm_delete_all()
1106 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); in rte_lpm_delete_all()