1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <sys/param.h>
6 #include <sys/ctype.h>
7 #include <sys/systm.h>
8 #include <sys/lock.h>
9 #include <sys/rwlock.h>
10 #include <sys/malloc.h>
11 #include <sys/mbuf.h>
12 #include <sys/socket.h>
13 #include <sys/kernel.h>
14
15 //#include <netinet6/rte_tailq.h>
16 int errno = 0, rte_errno = 0;
17
18 #include "rte_shim.h"
19 #include "rte_lpm6.h"
20
21 #define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24)
22 #define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256
23 #define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21)
24
25 #define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000
26 #define RTE_LPM6_LOOKUP_SUCCESS 0x20000000
27 #define RTE_LPM6_TBL8_BITMASK 0x001FFFFF
28
29 #define ADD_FIRST_BYTE 3
30 #define LOOKUP_FIRST_BYTE 4
31 #define BYTE_SIZE 8
32 #define BYTES2_SIZE 16
33
34 #define RULE_HASH_TABLE_EXTRA_SPACE 64
35 #define TBL24_IND UINT32_MAX
36
37 #define lpm6_tbl8_gindex next_hop
38
39 /** Flags for setting an entry as valid/invalid. */
40 enum valid_flag {
41 INVALID = 0,
42 VALID
43 };
44
45 #if 0
46 TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
47
48 static struct rte_tailq_elem rte_lpm6_tailq = {
49 .name = "RTE_LPM6",
50 };
51 EAL_REGISTER_TAILQ(rte_lpm6_tailq)
52 #endif
53
54 /** Tbl entry structure. It is the same for both tbl24 and tbl8 */
55 struct rte_lpm6_tbl_entry {
56 uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
57 uint32_t depth :8; /**< Rule depth. */
58
59 /* Flags. */
60 uint32_t valid :1; /**< Validation flag. */
61 uint32_t valid_group :1; /**< Group validation flag. */
62 uint32_t ext_entry :1; /**< External entry. */
63 };
64
65 /** Rules tbl entry structure. */
66 struct rte_lpm6_rule {
67 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
68 uint32_t next_hop; /**< Rule next hop. */
69 uint8_t depth; /**< Rule depth. */
70 };
71
72 /** Rules tbl entry key. */
73 struct rte_lpm6_rule_key {
74 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
75 uint8_t depth; /**< Rule depth. */
76 };
77
78 /* Header of tbl8 */
79 struct rte_lpm_tbl8_hdr {
80 uint32_t owner_tbl_ind; /**< owner table: TBL24_IND if owner is tbl24,
81 * otherwise index of tbl8
82 */
83 uint32_t owner_entry_ind; /**< index of the owner table entry where
84 * pointer to the tbl8 is stored
85 */
86 uint32_t ref_cnt; /**< table reference counter */
87 };
88
89 /** LPM6 structure. */
90 struct rte_lpm6 {
91 struct rte_lpm6_external ext; /* Storage used by the algo wrapper */
92 /* LPM metadata. */
93 char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
94 uint32_t max_rules; /**< Max number of rules. */
95 uint32_t used_rules; /**< Used rules so far. */
96 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
97
98 /* LPM Tables. */
99 //struct rte_hash *rules_tbl; /**< LPM rules. */
100 struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES]
101 __rte_cache_aligned; /**< LPM tbl24 table. */
102
103 uint32_t *tbl8_pool; /**< pool of indexes of free tbl8s */
104 uint32_t tbl8_pool_pos; /**< current position in the tbl8 pool */
105
106 struct rte_lpm_tbl8_hdr *tbl8_hdrs; /* array of tbl8 headers */
107
108 struct rte_lpm6_tbl_entry tbl8[0]
109 __rte_cache_aligned; /**< LPM tbl8 table. */
110 };
111
112 /*
113 * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
114 * It leaves untouched one bit per unit in the depth variable
115 * and set the rest to 0.
116 */
117 static inline void
ip6_mask_addr(uint8_t * ip,uint8_t depth)118 ip6_mask_addr(uint8_t *ip, uint8_t depth)
119 {
120 int16_t part_depth, mask;
121 int i;
122
123 part_depth = depth;
124
125 for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
126 if (part_depth < BYTE_SIZE && part_depth >= 0) {
127 mask = (uint16_t)(~(UINT8_MAX >> part_depth));
128 ip[i] = (uint8_t)(ip[i] & mask);
129 } else if (part_depth < 0)
130 ip[i] = 0;
131
132 part_depth -= BYTE_SIZE;
133 }
134 }
135
136 /* copy ipv6 address */
137 static inline void
ip6_copy_addr(uint8_t * dst,const uint8_t * src)138 ip6_copy_addr(uint8_t *dst, const uint8_t *src)
139 {
140 rte_memcpy(dst, src, RTE_LPM6_IPV6_ADDR_SIZE);
141 }
142
143 #if 0
144 /*
145 * LPM6 rule hash function
146 *
147 * It's used as a hash function for the rte_hash
148 * containing rules
149 */
150 static inline uint32_t
151 rule_hash(const void *data, __rte_unused uint32_t data_len,
152 uint32_t init_val)
153 {
154 return rte_jhash(data, sizeof(struct rte_lpm6_rule_key), init_val);
155 }
156 #endif
157
158 /*
159 * Init pool of free tbl8 indexes
160 */
161 static void
tbl8_pool_init(struct rte_lpm6 * lpm)162 tbl8_pool_init(struct rte_lpm6 *lpm)
163 {
164 uint32_t i;
165
166 /* put entire range of indexes to the tbl8 pool */
167 for (i = 0; i < lpm->number_tbl8s; i++)
168 lpm->tbl8_pool[i] = i;
169
170 lpm->tbl8_pool_pos = 0;
171 }
172
173 /*
174 * Get an index of a free tbl8 from the pool
175 */
176 static inline uint32_t
tbl8_get(struct rte_lpm6 * lpm,uint32_t * tbl8_ind)177 tbl8_get(struct rte_lpm6 *lpm, uint32_t *tbl8_ind)
178 {
179 if (lpm->tbl8_pool_pos == lpm->number_tbl8s)
180 /* no more free tbl8 */
181 return -ENOSPC;
182
183 /* next index */
184 *tbl8_ind = lpm->tbl8_pool[lpm->tbl8_pool_pos++];
185 return 0;
186 }
187
188 /*
189 * Put an index of a free tbl8 back to the pool
190 */
191 static inline uint32_t
tbl8_put(struct rte_lpm6 * lpm,uint32_t tbl8_ind)192 tbl8_put(struct rte_lpm6 *lpm, uint32_t tbl8_ind)
193 {
194 if (lpm->tbl8_pool_pos == 0)
195 /* pool is full */
196 return -ENOSPC;
197
198 lpm->tbl8_pool[--lpm->tbl8_pool_pos] = tbl8_ind;
199 return 0;
200 }
201
202 /*
203 * Returns number of tbl8s available in the pool
204 */
205 static inline uint32_t
tbl8_available(struct rte_lpm6 * lpm)206 tbl8_available(struct rte_lpm6 *lpm)
207 {
208 return lpm->number_tbl8s - lpm->tbl8_pool_pos;
209 }
210
211 #if 0
212 /*
213 * Init a rule key.
214 * note that ip must be already masked
215 */
216 static inline void
217 rule_key_init(struct rte_lpm6_rule_key *key, uint8_t *ip, uint8_t depth)
218 {
219 ip6_copy_addr(key->ip, ip);
220 key->depth = depth;
221 }
222
223 /*
224 * Rebuild the entire LPM tree by reinserting all rules
225 */
226 static void
227 rebuild_lpm(struct rte_lpm6 *lpm)
228 {
229 uint64_t next_hop;
230 struct rte_lpm6_rule_key *rule_key;
231 uint32_t iter = 0;
232
233 while (rte_hash_iterate(lpm->rules_tbl, (void *) &rule_key,
234 (void **) &next_hop, &iter) >= 0)
235 rte_lpm6_add(lpm, rule_key->ip, rule_key->depth,
236 (uint32_t) next_hop);
237 }
238 #endif
239
240 /*
241 * Allocates memory for LPM object
242 */
243 struct rte_lpm6 *
rte_lpm6_create(const char * name,int socket_id,const struct rte_lpm6_config * config)244 rte_lpm6_create(const char *name, int socket_id,
245 const struct rte_lpm6_config *config)
246 {
247 char mem_name[RTE_LPM6_NAMESIZE];
248 struct rte_lpm6 *lpm = NULL;
249 //struct rte_tailq_entry *te;
250 uint64_t mem_size;
251 //struct rte_lpm6_list *lpm_list;
252 //struct rte_hash *rules_tbl = NULL;
253 uint32_t *tbl8_pool = NULL;
254 struct rte_lpm_tbl8_hdr *tbl8_hdrs = NULL;
255
256 //lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
257
258 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
259
260 /* Check user arguments. */
261 if ((name == NULL) || (socket_id < -1) || (config == NULL) ||
262 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) {
263 rte_errno = EINVAL;
264 return NULL;
265 }
266
267 #if 0
268 /* create rules hash table */
269 snprintf(mem_name, sizeof(mem_name), "LRH_%s", name);
270 struct rte_hash_parameters rule_hash_tbl_params = {
271 .entries = config->max_rules * 1.2 +
272 RULE_HASH_TABLE_EXTRA_SPACE,
273 .key_len = sizeof(struct rte_lpm6_rule_key),
274 .hash_func = rule_hash,
275 .hash_func_init_val = 0,
276 .name = mem_name,
277 .reserved = 0,
278 .socket_id = socket_id,
279 .extra_flag = 0
280 };
281
282 rules_tbl = rte_hash_create(&rule_hash_tbl_params);
283 if (rules_tbl == NULL) {
284 RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)",
285 rte_strerror(rte_errno), rte_errno);
286 goto fail_wo_unlock;
287 }
288 #endif
289
290 /* allocate tbl8 indexes pool */
291 tbl8_pool = rte_malloc(NULL,
292 sizeof(uint32_t) * config->number_tbl8s,
293 RTE_CACHE_LINE_SIZE);
294 if (tbl8_pool == NULL) {
295 RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)",
296 rte_strerror(rte_errno), rte_errno);
297 rte_errno = ENOMEM;
298 goto fail_wo_unlock;
299 }
300
301 /* allocate tbl8 headers */
302 tbl8_hdrs = rte_malloc(NULL,
303 sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s,
304 RTE_CACHE_LINE_SIZE);
305 if (tbl8_hdrs == NULL) {
306 RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)",
307 rte_strerror(rte_errno), rte_errno);
308 rte_errno = ENOMEM;
309 goto fail_wo_unlock;
310 }
311
312 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
313
314 /* Determine the amount of memory to allocate. */
315 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
316 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
317
318 #if 0
319 rte_mcfg_tailq_write_lock();
320
321 /* Guarantee there's no existing */
322 TAILQ_FOREACH(te, lpm_list, next) {
323 lpm = (struct rte_lpm6 *) te->data;
324 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
325 break;
326 }
327 lpm = NULL;
328 if (te != NULL) {
329 rte_errno = EEXIST;
330 goto fail;
331 }
332
333 /* allocate tailq entry */
334 te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
335 if (te == NULL) {
336 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
337 rte_errno = ENOMEM;
338 goto fail;
339 }
340 #endif
341
342 /* Allocate memory to store the LPM data structures. */
343 lpm = rte_zmalloc_socket(mem_name, (size_t)mem_size,
344 RTE_CACHE_LINE_SIZE, socket_id);
345
346 if (lpm == NULL) {
347 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
348 //rte_free(te);
349 rte_errno = ENOMEM;
350 goto fail;
351 }
352
353 /* Save user arguments. */
354 //lpm->max_rules = config->max_rules;
355 lpm->number_tbl8s = config->number_tbl8s;
356 strlcpy(lpm->name, name, sizeof(lpm->name));
357 //lpm->rules_tbl = rules_tbl;
358 lpm->tbl8_pool = tbl8_pool;
359 lpm->tbl8_hdrs = tbl8_hdrs;
360
361 /* init the stack */
362 tbl8_pool_init(lpm);
363
364 //te->data = (void *) lpm;
365
366 //TAILQ_INSERT_TAIL(lpm_list, te, next);
367 rte_mcfg_tailq_write_unlock();
368 return lpm;
369
370 fail:
371 rte_mcfg_tailq_write_unlock();
372
373 fail_wo_unlock:
374 rte_free(tbl8_hdrs);
375 rte_free(tbl8_pool);
376 //rte_hash_free(rules_tbl);
377
378 return NULL;
379 }
380
381 #if 0
382 /*
383 * Find an existing lpm table and return a pointer to it.
384 */
385 struct rte_lpm6 *
386 rte_lpm6_find_existing(const char *name)
387 {
388 struct rte_lpm6 *l = NULL;
389 struct rte_tailq_entry *te;
390 struct rte_lpm6_list *lpm_list;
391
392 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
393
394 rte_mcfg_tailq_read_lock();
395 TAILQ_FOREACH(te, lpm_list, next) {
396 l = (struct rte_lpm6 *) te->data;
397 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
398 break;
399 }
400 rte_mcfg_tailq_read_unlock();
401
402 if (te == NULL) {
403 rte_errno = ENOENT;
404 return NULL;
405 }
406
407 return l;
408 }
409 #endif
410
411 /*
412 * Deallocates memory for given LPM table.
413 */
414 void
rte_lpm6_free(struct rte_lpm6 * lpm)415 rte_lpm6_free(struct rte_lpm6 *lpm)
416 {
417 #if 0
418 struct rte_lpm6_list *lpm_list;
419 struct rte_tailq_entry *te;
420
421 /* Check user arguments. */
422 if (lpm == NULL)
423 return;
424
425 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
426
427 rte_mcfg_tailq_write_lock();
428
429 /* find our tailq entry */
430 TAILQ_FOREACH(te, lpm_list, next) {
431 if (te->data == (void *) lpm)
432 break;
433 }
434
435 if (te != NULL)
436 TAILQ_REMOVE(lpm_list, te, next);
437
438 rte_mcfg_tailq_write_unlock();
439 #endif
440
441 rte_free(lpm->tbl8_hdrs);
442 rte_free(lpm->tbl8_pool);
443 //rte_hash_free(lpm->rules_tbl);
444 rte_free(lpm);
445 //rte_free(te);
446 }
447
448 #if 0
449 /* Find a rule */
450 static inline int
451 rule_find_with_key(struct rte_lpm6 *lpm,
452 const struct rte_lpm6_rule_key *rule_key,
453 uint32_t *next_hop)
454 {
455 uint64_t hash_val;
456 int ret;
457
458 /* lookup for a rule */
459 ret = rte_hash_lookup_data(lpm->rules_tbl, (const void *) rule_key,
460 (void **) &hash_val);
461 if (ret >= 0) {
462 *next_hop = (uint32_t) hash_val;
463 return 1;
464 }
465
466 return 0;
467 }
468
469 /* Find a rule */
470 static int
471 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
472 uint32_t *next_hop)
473 {
474 struct rte_lpm6_rule_key rule_key;
475
476 /* init a rule key */
477 rule_key_init(&rule_key, ip, depth);
478
479 return rule_find_with_key(lpm, &rule_key, next_hop);
480 }
481
482 /*
483 * Checks if a rule already exists in the rules table and updates
484 * the nexthop if so. Otherwise it adds a new rule if enough space is available.
485 *
486 * Returns:
487 * 0 - next hop of existed rule is updated
488 * 1 - new rule successfully added
489 * <0 - error
490 */
491 static inline int
492 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop)
493 {
494 int ret, rule_exist;
495 struct rte_lpm6_rule_key rule_key;
496 uint32_t unused;
497
498 /* init a rule key */
499 rule_key_init(&rule_key, ip, depth);
500
501 /* Scan through rule list to see if rule already exists. */
502 rule_exist = rule_find_with_key(lpm, &rule_key, &unused);
503
504 /*
505 * If rule does not exist check if there is space to add a new rule to
506 * this rule group. If there is no space return error.
507 */
508 if (!rule_exist && lpm->used_rules == lpm->max_rules)
509 return -ENOSPC;
510
511 /* add the rule or update rules next hop */
512 ret = rte_hash_add_key_data(lpm->rules_tbl, &rule_key,
513 (void *)(uintptr_t) next_hop);
514 if (ret < 0)
515 return ret;
516
517 /* Increment the used rules counter for this rule group. */
518 if (!rule_exist) {
519 lpm->used_rules++;
520 return 1;
521 }
522
523 return 0;
524 }
525 #endif
526
527 /*
528 * Function that expands a rule across the data structure when a less-generic
529 * one has been added before. It assures that every possible combination of bits
530 * in the IP address returns a match.
531 */
532 static void
expand_rule(struct rte_lpm6 * lpm,uint32_t tbl8_gindex,uint8_t old_depth,uint8_t new_depth,uint32_t next_hop,uint8_t valid)533 expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t old_depth,
534 uint8_t new_depth, uint32_t next_hop, uint8_t valid)
535 {
536 uint32_t tbl8_group_end, tbl8_gindex_next, j;
537
538 tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
539
540 struct rte_lpm6_tbl_entry new_tbl8_entry = {
541 .valid = valid,
542 .valid_group = valid,
543 .depth = new_depth,
544 .next_hop = next_hop,
545 .ext_entry = 0,
546 };
547
548 for (j = tbl8_gindex; j < tbl8_group_end; j++) {
549 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0
550 && lpm->tbl8[j].depth <= old_depth)) {
551
552 lpm->tbl8[j] = new_tbl8_entry;
553
554 } else if (lpm->tbl8[j].ext_entry == 1) {
555
556 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex
557 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
558 expand_rule(lpm, tbl8_gindex_next, old_depth, new_depth,
559 next_hop, valid);
560 }
561 }
562 }
563
564 /*
565 * Init a tbl8 header
566 */
567 static inline void
init_tbl8_header(struct rte_lpm6 * lpm,uint32_t tbl_ind,uint32_t owner_tbl_ind,uint32_t owner_entry_ind)568 init_tbl8_header(struct rte_lpm6 *lpm, uint32_t tbl_ind,
569 uint32_t owner_tbl_ind, uint32_t owner_entry_ind)
570 {
571 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind];
572 tbl_hdr->owner_tbl_ind = owner_tbl_ind;
573 tbl_hdr->owner_entry_ind = owner_entry_ind;
574 tbl_hdr->ref_cnt = 0;
575 }
576
577 /*
578 * Calculate index to the table based on the number and position
579 * of the bytes being inspected in this step.
580 */
581 static uint32_t
get_bitshift(const uint8_t * ip,uint8_t first_byte,uint8_t bytes)582 get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes)
583 {
584 uint32_t entry_ind, i;
585 int8_t bitshift;
586
587 entry_ind = 0;
588 for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
589 bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
590
591 if (bitshift < 0)
592 bitshift = 0;
593 entry_ind = entry_ind | ip[i-1] << bitshift;
594 }
595
596 return entry_ind;
597 }
598
599 /*
600 * Simulate adding a new route to the LPM counting number
601 * of new tables that will be needed
602 *
603 * It returns 0 on success, or 1 if
604 * the process needs to be continued by calling the function again.
605 */
606 static inline int
simulate_add_step(struct rte_lpm6 * lpm,struct rte_lpm6_tbl_entry * tbl,struct rte_lpm6_tbl_entry ** next_tbl,const uint8_t * ip,uint8_t bytes,uint8_t first_byte,uint8_t depth,uint32_t * need_tbl_nb)607 simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
608 struct rte_lpm6_tbl_entry **next_tbl, const uint8_t *ip,
609 uint8_t bytes, uint8_t first_byte, uint8_t depth,
610 uint32_t *need_tbl_nb)
611 {
612 uint32_t entry_ind;
613 uint8_t bits_covered;
614 uint32_t next_tbl_ind;
615
616 /*
617 * Calculate index to the table based on the number and position
618 * of the bytes being inspected in this step.
619 */
620 entry_ind = get_bitshift(ip, first_byte, bytes);
621
622 /* Number of bits covered in this step */
623 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
624
625 if (depth <= bits_covered) {
626 *need_tbl_nb = 0;
627 return 0;
628 }
629
630 if (tbl[entry_ind].valid == 0 || tbl[entry_ind].ext_entry == 0) {
631 /* from this point on a new table is needed on each level
632 * that is not covered yet
633 */
634 depth -= bits_covered;
635 uint32_t cnt = depth >> 3; /* depth / BYTE_SIZE */
636 if (depth & 7) /* 0b00000111 */
637 /* if depth % 8 > 0 then one more table is needed
638 * for those last bits
639 */
640 cnt++;
641
642 *need_tbl_nb = cnt;
643 return 0;
644 }
645
646 next_tbl_ind = tbl[entry_ind].lpm6_tbl8_gindex;
647 *next_tbl = &(lpm->tbl8[next_tbl_ind *
648 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
649 *need_tbl_nb = 0;
650 return 1;
651 }
652
653 /*
654 * Partially adds a new route to the data structure (tbl24+tbl8s).
655 * It returns 0 on success, a negative number on failure, or 1 if
656 * the process needs to be continued by calling the function again.
657 */
658 static inline int
add_step(struct rte_lpm6 * lpm,struct rte_lpm6_tbl_entry * tbl,uint32_t tbl_ind,struct rte_lpm6_tbl_entry ** next_tbl,uint32_t * next_tbl_ind,uint8_t * ip,uint8_t bytes,uint8_t first_byte,uint8_t depth,uint32_t next_hop,uint8_t is_new_rule)659 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
660 uint32_t tbl_ind, struct rte_lpm6_tbl_entry **next_tbl,
661 uint32_t *next_tbl_ind, uint8_t *ip, uint8_t bytes,
662 uint8_t first_byte, uint8_t depth, uint32_t next_hop,
663 uint8_t is_new_rule)
664 {
665 uint32_t entry_ind, tbl_range, tbl8_group_start, tbl8_group_end, i;
666 uint32_t tbl8_gindex;
667 uint8_t bits_covered;
668 int ret;
669
670 /*
671 * Calculate index to the table based on the number and position
672 * of the bytes being inspected in this step.
673 */
674 entry_ind = get_bitshift(ip, first_byte, bytes);
675
676 /* Number of bits covered in this step */
677 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
678
679 /*
680 * If depth if smaller than this number (ie this is the last step)
681 * expand the rule across the relevant positions in the table.
682 */
683 if (depth <= bits_covered) {
684 tbl_range = 1 << (bits_covered - depth);
685
686 for (i = entry_ind; i < (entry_ind + tbl_range); i++) {
687 if (!tbl[i].valid || (tbl[i].ext_entry == 0 &&
688 tbl[i].depth <= depth)) {
689
690 struct rte_lpm6_tbl_entry new_tbl_entry = {
691 .next_hop = next_hop,
692 .depth = depth,
693 .valid = VALID,
694 .valid_group = VALID,
695 .ext_entry = 0,
696 };
697
698 tbl[i] = new_tbl_entry;
699
700 } else if (tbl[i].ext_entry == 1) {
701
702 /*
703 * If tbl entry is valid and extended calculate the index
704 * into next tbl8 and expand the rule across the data structure.
705 */
706 tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
707 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
708 expand_rule(lpm, tbl8_gindex, depth, depth,
709 next_hop, VALID);
710 }
711 }
712
713 /* update tbl8 rule reference counter */
714 if (tbl_ind != TBL24_IND && is_new_rule)
715 lpm->tbl8_hdrs[tbl_ind].ref_cnt++;
716
717 return 0;
718 }
719 /*
720 * If this is not the last step just fill one position
721 * and calculate the index to the next table.
722 */
723 else {
724 /* If it's invalid a new tbl8 is needed */
725 if (!tbl[entry_ind].valid) {
726 /* get a new table */
727 ret = tbl8_get(lpm, &tbl8_gindex);
728 if (ret != 0)
729 return -ENOSPC;
730
731 /* invalidate all new tbl8 entries */
732 tbl8_group_start = tbl8_gindex *
733 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
734 memset(&lpm->tbl8[tbl8_group_start], 0,
735 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES *
736 sizeof(struct rte_lpm6_tbl_entry));
737
738 /* init the new table's header:
739 * save the reference to the owner table
740 */
741 init_tbl8_header(lpm, tbl8_gindex, tbl_ind, entry_ind);
742
743 /* reference to a new tbl8 */
744 struct rte_lpm6_tbl_entry new_tbl_entry = {
745 .lpm6_tbl8_gindex = tbl8_gindex,
746 .depth = 0,
747 .valid = VALID,
748 .valid_group = VALID,
749 .ext_entry = 1,
750 };
751
752 tbl[entry_ind] = new_tbl_entry;
753
754 /* update the current table's reference counter */
755 if (tbl_ind != TBL24_IND)
756 lpm->tbl8_hdrs[tbl_ind].ref_cnt++;
757 }
758 /*
759 * If it's valid but not extended the rule that was stored
760 * here needs to be moved to the next table.
761 */
762 else if (tbl[entry_ind].ext_entry == 0) {
763 /* get a new tbl8 index */
764 ret = tbl8_get(lpm, &tbl8_gindex);
765 if (ret != 0)
766 return -ENOSPC;
767
768 tbl8_group_start = tbl8_gindex *
769 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
770 tbl8_group_end = tbl8_group_start +
771 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
772
773 struct rte_lpm6_tbl_entry tbl_entry = {
774 .next_hop = tbl[entry_ind].next_hop,
775 .depth = tbl[entry_ind].depth,
776 .valid = VALID,
777 .valid_group = VALID,
778 .ext_entry = 0
779 };
780
781 /* Populate new tbl8 with tbl value. */
782 for (i = tbl8_group_start; i < tbl8_group_end; i++)
783 lpm->tbl8[i] = tbl_entry;
784
785 /* init the new table's header:
786 * save the reference to the owner table
787 */
788 init_tbl8_header(lpm, tbl8_gindex, tbl_ind, entry_ind);
789
790 /*
791 * Update tbl entry to point to new tbl8 entry. Note: The
792 * ext_flag and tbl8_index need to be updated simultaneously,
793 * so assign whole structure in one go.
794 */
795 struct rte_lpm6_tbl_entry new_tbl_entry = {
796 .lpm6_tbl8_gindex = tbl8_gindex,
797 .depth = 0,
798 .valid = VALID,
799 .valid_group = VALID,
800 .ext_entry = 1,
801 };
802
803 tbl[entry_ind] = new_tbl_entry;
804
805 /* update the current table's reference counter */
806 if (tbl_ind != TBL24_IND)
807 lpm->tbl8_hdrs[tbl_ind].ref_cnt++;
808 }
809
810 *next_tbl_ind = tbl[entry_ind].lpm6_tbl8_gindex;
811 *next_tbl = &(lpm->tbl8[*next_tbl_ind *
812 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
813 }
814
815 return 1;
816 }
817
818 /*
819 * Simulate adding a route to LPM
820 *
821 * Returns:
822 * 0 on success
823 * -ENOSPC not enough tbl8 left
824 */
825 static int
simulate_add(struct rte_lpm6 * lpm,const uint8_t * masked_ip,uint8_t depth)826 simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth)
827 {
828 struct rte_lpm6_tbl_entry *tbl;
829 struct rte_lpm6_tbl_entry *tbl_next = NULL;
830 int ret, i;
831
832 /* number of new tables needed for a step */
833 uint32_t need_tbl_nb;
834 /* total number of new tables needed */
835 uint32_t total_need_tbl_nb;
836
837 /* Inspect the first three bytes through tbl24 on the first step. */
838 ret = simulate_add_step(lpm, lpm->tbl24, &tbl_next, masked_ip,
839 ADD_FIRST_BYTE, 1, depth, &need_tbl_nb);
840 total_need_tbl_nb = need_tbl_nb;
841 /*
842 * Inspect one by one the rest of the bytes until
843 * the process is completed.
844 */
845 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) {
846 tbl = tbl_next;
847 ret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1,
848 (uint8_t)(i + 1), depth, &need_tbl_nb);
849 total_need_tbl_nb += need_tbl_nb;
850 }
851
852 if (tbl8_available(lpm) < total_need_tbl_nb)
853 /* not enough tbl8 to add a rule */
854 return -ENOSPC;
855
856 return 0;
857 }
858
859 /*
860 * Add a route
861 */
862 int
rte_lpm6_add(struct rte_lpm6 * lpm,const uint8_t * ip,uint8_t depth,uint32_t next_hop,int is_new_rule)863 rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
864 uint32_t next_hop, int is_new_rule)
865 {
866 struct rte_lpm6_tbl_entry *tbl;
867 struct rte_lpm6_tbl_entry *tbl_next = NULL;
868 /* init to avoid compiler warning */
869 uint32_t tbl_next_num = 123456;
870 int status;
871 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
872 int i;
873
874 /* Check user arguments. */
875 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
876 return -EINVAL;
877
878 /* Copy the IP and mask it to avoid modifying user's input data. */
879 ip6_copy_addr(masked_ip, ip);
880 ip6_mask_addr(masked_ip, depth);
881
882 /* Simulate adding a new route */
883 int ret = simulate_add(lpm, masked_ip, depth);
884 if (ret < 0)
885 return ret;
886
887 #if 0
888 /* Add the rule to the rule table. */
889 int is_new_rule = rule_add(lpm, masked_ip, depth, next_hop);
890 /* If there is no space available for new rule return error. */
891 if (is_new_rule < 0)
892 return is_new_rule;
893 #endif
894
895 /* Inspect the first three bytes through tbl24 on the first step. */
896 tbl = lpm->tbl24;
897 status = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num,
898 masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop,
899 is_new_rule);
900 assert(status >= 0);
901
902 /*
903 * Inspect one by one the rest of the bytes until
904 * the process is completed.
905 */
906 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) {
907 tbl = tbl_next;
908 status = add_step(lpm, tbl, tbl_next_num, &tbl_next,
909 &tbl_next_num, masked_ip, 1, (uint8_t)(i + 1),
910 depth, next_hop, is_new_rule);
911 assert(status >= 0);
912 }
913
914 return status;
915 }
916
917 /*
918 * Takes a pointer to a table entry and inspect one level.
919 * The function returns 0 on lookup success, ENOENT if no match was found
920 * or 1 if the process needs to be continued by calling the function again.
921 */
922 static inline int
lookup_step(const struct rte_lpm6 * lpm,const struct rte_lpm6_tbl_entry * tbl,const struct rte_lpm6_tbl_entry ** tbl_next,const uint8_t * ip,uint8_t first_byte,uint32_t * next_hop)923 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
924 const struct rte_lpm6_tbl_entry **tbl_next, const uint8_t *ip,
925 uint8_t first_byte, uint32_t *next_hop)
926 {
927 uint32_t tbl8_index, tbl_entry;
928
929 /* Take the integer value from the pointer. */
930 tbl_entry = *(const uint32_t *)tbl;
931
932 /* If it is valid and extended we calculate the new pointer to return. */
933 if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
934 RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
935
936 tbl8_index = ip[first_byte-1] +
937 ((tbl_entry & RTE_LPM6_TBL8_BITMASK) *
938 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
939
940 *tbl_next = &lpm->tbl8[tbl8_index];
941
942 return 1;
943 } else {
944 /* If not extended then we can have a match. */
945 *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
946 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
947 }
948 }
949
950 /*
951 * Looks up an IP
952 */
953 int
rte_lpm6_lookup(const struct rte_lpm6 * lpm,const uint8_t * ip,uint32_t * next_hop)954 rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip,
955 uint32_t *next_hop)
956 {
957 const struct rte_lpm6_tbl_entry *tbl;
958 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
959 int status;
960 uint8_t first_byte;
961 uint32_t tbl24_index;
962
963 /* DEBUG: Check user input arguments. */
964 if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL))
965 return -EINVAL;
966
967 first_byte = LOOKUP_FIRST_BYTE;
968 tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
969
970 /* Calculate pointer to the first entry to be inspected */
971 tbl = &lpm->tbl24[tbl24_index];
972
973 do {
974 /* Continue inspecting following levels until success or failure */
975 status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
976 tbl = tbl_next;
977 } while (status == 1);
978
979 return status;
980 }
981
982 /*
983 * Looks up a group of IP addresses
984 */
985 int
rte_lpm6_lookup_bulk_func(const struct rte_lpm6 * lpm,uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],int32_t * next_hops,unsigned int n)986 rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
987 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
988 int32_t *next_hops, unsigned int n)
989 {
990 unsigned int i;
991 const struct rte_lpm6_tbl_entry *tbl;
992 const struct rte_lpm6_tbl_entry *tbl_next = NULL;
993 uint32_t tbl24_index, next_hop;
994 uint8_t first_byte;
995 int status;
996
997 /* DEBUG: Check user input arguments. */
998 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
999 return -EINVAL;
1000
1001 for (i = 0; i < n; i++) {
1002 first_byte = LOOKUP_FIRST_BYTE;
1003 tbl24_index = (ips[i][0] << BYTES2_SIZE) |
1004 (ips[i][1] << BYTE_SIZE) | ips[i][2];
1005
1006 /* Calculate pointer to the first entry to be inspected */
1007 tbl = &lpm->tbl24[tbl24_index];
1008
1009 do {
1010 /* Continue inspecting following levels
1011 * until success or failure
1012 */
1013 status = lookup_step(lpm, tbl, &tbl_next, ips[i],
1014 first_byte++, &next_hop);
1015 tbl = tbl_next;
1016 } while (status == 1);
1017
1018 if (status < 0)
1019 next_hops[i] = -1;
1020 else
1021 next_hops[i] = (int32_t)next_hop;
1022 }
1023
1024 return 0;
1025 }
1026
1027 struct rte_lpm6_rule *
fill_rule6(char * buffer,const uint8_t * ip,uint8_t depth,uint32_t next_hop)1028 fill_rule6(char *buffer, const uint8_t *ip, uint8_t depth, uint32_t next_hop)
1029 {
1030 struct rte_lpm6_rule *rule = (struct rte_lpm6_rule *)buffer;
1031
1032 ip6_copy_addr((uint8_t *)&rule->ip, ip);
1033 rule->depth = depth;
1034 rule->next_hop = next_hop;
1035
1036 return (rule);
1037 }
1038
1039 #if 0
1040 /*
1041 * Look for a rule in the high-level rules table
1042 */
1043 int
1044 rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
1045 uint32_t *next_hop)
1046 {
1047 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
1048
1049 /* Check user arguments. */
1050 if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
1051 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
1052 return -EINVAL;
1053
1054 /* Copy the IP and mask it to avoid modifying user's input data. */
1055 ip6_copy_addr(masked_ip, ip);
1056 ip6_mask_addr(masked_ip, depth);
1057
1058 return rule_find(lpm, masked_ip, depth, next_hop);
1059 }
1060
1061 /*
1062 * Delete a rule from the rule table.
1063 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
1064 * return
1065 * 0 on success
1066 * <0 on failure
1067 */
1068 static inline int
1069 rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
1070 {
1071 int ret;
1072 struct rte_lpm6_rule_key rule_key;
1073
1074 /* init rule key */
1075 rule_key_init(&rule_key, ip, depth);
1076
1077 /* delete the rule */
1078 ret = rte_hash_del_key(lpm->rules_tbl, (void *) &rule_key);
1079 if (ret >= 0)
1080 lpm->used_rules--;
1081
1082 return ret;
1083 }
1084
1085 /*
1086 * Deletes a group of rules
1087 *
1088 * Note that the function rebuilds the lpm table,
1089 * rather than doing incremental updates like
1090 * the regular delete function
1091 */
1092 int
1093 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
1094 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths,
1095 unsigned n)
1096 {
1097 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
1098 unsigned i;
1099
1100 /* Check input arguments. */
1101 if ((lpm == NULL) || (ips == NULL) || (depths == NULL))
1102 return -EINVAL;
1103
1104 for (i = 0; i < n; i++) {
1105 ip6_copy_addr(masked_ip, ips[i]);
1106 ip6_mask_addr(masked_ip, depths[i]);
1107 rule_delete(lpm, masked_ip, depths[i]);
1108 }
1109
1110 /*
1111 * Set all the table entries to 0 (ie delete every rule
1112 * from the data structure.
1113 */
1114 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1115 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1116 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1117 tbl8_pool_init(lpm);
1118
1119 /*
1120 * Add every rule again (except for the ones that were removed from
1121 * the rules table).
1122 */
1123 rebuild_lpm(lpm);
1124
1125 return 0;
1126 }
1127
1128 /*
1129 * Delete all rules from the LPM table.
1130 */
1131 void
1132 rte_lpm6_delete_all(struct rte_lpm6 *lpm)
1133 {
1134 /* Zero used rules counter. */
1135 lpm->used_rules = 0;
1136
1137 /* Zero tbl24. */
1138 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1139
1140 /* Zero tbl8. */
1141 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
1142 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1143
1144 /* init pool of free tbl8 indexes */
1145 tbl8_pool_init(lpm);
1146
1147 /* Delete all rules form the rules table. */
1148 rte_hash_reset(lpm->rules_tbl);
1149 }
1150 #endif
1151
1152 /*
1153 * Convert a depth to a one byte long mask
1154 * Example: 4 will be converted to 0xF0
1155 */
1156 static uint8_t __attribute__((pure))
depth_to_mask_1b(uint8_t depth)1157 depth_to_mask_1b(uint8_t depth)
1158 {
1159 /* To calculate a mask start with a 1 on the left hand side and right
1160 * shift while populating the left hand side with 1's
1161 */
1162 return (signed char)0x80 >> (depth - 1);
1163 }
1164
1165 #if 0
1166 /*
1167 * Find a less specific rule
1168 */
1169 static int
1170 rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
1171 struct rte_lpm6_rule *rule)
1172 {
1173 int ret;
1174 uint32_t next_hop;
1175 uint8_t mask;
1176 struct rte_lpm6_rule_key rule_key;
1177
1178 if (depth == 1)
1179 return 0;
1180
1181 rule_key_init(&rule_key, ip, depth);
1182
1183 while (depth > 1) {
1184 depth--;
1185
1186 /* each iteration zero one more bit of the key */
1187 mask = depth & 7; /* depth % BYTE_SIZE */
1188 if (mask > 0)
1189 mask = depth_to_mask_1b(mask);
1190
1191 rule_key.depth = depth;
1192 rule_key.ip[depth >> 3] &= mask;
1193
1194 ret = rule_find_with_key(lpm, &rule_key, &next_hop);
1195 if (ret) {
1196 rule->depth = depth;
1197 ip6_copy_addr(rule->ip, rule_key.ip);
1198 rule->next_hop = next_hop;
1199 return 1;
1200 }
1201 }
1202
1203 return 0;
1204 }
1205 #endif
1206
1207 /*
1208 * Find range of tbl8 cells occupied by a rule
1209 */
1210 static void
rule_find_range(struct rte_lpm6 * lpm,const uint8_t * ip,uint8_t depth,struct rte_lpm6_tbl_entry ** from,struct rte_lpm6_tbl_entry ** to,uint32_t * out_tbl_ind)1211 rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
1212 struct rte_lpm6_tbl_entry **from,
1213 struct rte_lpm6_tbl_entry **to,
1214 uint32_t *out_tbl_ind)
1215 {
1216 uint32_t ind;
1217 uint32_t first_3bytes = (uint32_t)ip[0] << 16 | ip[1] << 8 | ip[2];
1218
1219 if (depth <= 24) {
1220 /* rule is within the top level */
1221 ind = first_3bytes;
1222 *from = &lpm->tbl24[ind];
1223 ind += (1 << (24 - depth)) - 1;
1224 *to = &lpm->tbl24[ind];
1225 *out_tbl_ind = TBL24_IND;
1226 } else {
1227 /* top level entry */
1228 struct rte_lpm6_tbl_entry *tbl = &lpm->tbl24[first_3bytes];
1229 assert(tbl->ext_entry == 1);
1230 /* first tbl8 */
1231 uint32_t tbl_ind = tbl->lpm6_tbl8_gindex;
1232 tbl = &lpm->tbl8[tbl_ind *
1233 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES];
1234 /* current ip byte, the top level is already behind */
1235 uint8_t byte = 3;
1236 /* minus top level */
1237 depth -= 24;
1238
1239 /* iterate through levels (tbl8s)
1240 * until we reach the last one
1241 */
1242 while (depth > 8) {
1243 tbl += ip[byte];
1244 assert(tbl->ext_entry == 1);
1245 /* go to the next level/tbl8 */
1246 tbl_ind = tbl->lpm6_tbl8_gindex;
1247 tbl = &lpm->tbl8[tbl_ind *
1248 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES];
1249 byte += 1;
1250 depth -= 8;
1251 }
1252
1253 /* last level/tbl8 */
1254 ind = ip[byte] & depth_to_mask_1b(depth);
1255 *from = &tbl[ind];
1256 ind += (1 << (8 - depth)) - 1;
1257 *to = &tbl[ind];
1258 *out_tbl_ind = tbl_ind;
1259 }
1260 }
1261
1262 /*
1263 * Remove a table from the LPM tree
1264 */
1265 static void
remove_tbl(struct rte_lpm6 * lpm,struct rte_lpm_tbl8_hdr * tbl_hdr,uint32_t tbl_ind,struct rte_lpm6_rule * lsp_rule)1266 remove_tbl(struct rte_lpm6 *lpm, struct rte_lpm_tbl8_hdr *tbl_hdr,
1267 uint32_t tbl_ind, struct rte_lpm6_rule *lsp_rule)
1268 {
1269 struct rte_lpm6_tbl_entry *owner_entry;
1270
1271 if (tbl_hdr->owner_tbl_ind == TBL24_IND)
1272 owner_entry = &lpm->tbl24[tbl_hdr->owner_entry_ind];
1273 else {
1274 uint32_t owner_tbl_ind = tbl_hdr->owner_tbl_ind;
1275 owner_entry = &lpm->tbl8[
1276 owner_tbl_ind * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES +
1277 tbl_hdr->owner_entry_ind];
1278
1279 struct rte_lpm_tbl8_hdr *owner_tbl_hdr =
1280 &lpm->tbl8_hdrs[owner_tbl_ind];
1281 if (--owner_tbl_hdr->ref_cnt == 0)
1282 remove_tbl(lpm, owner_tbl_hdr, owner_tbl_ind, lsp_rule);
1283 }
1284
1285 assert(owner_entry->ext_entry == 1);
1286
1287 /* unlink the table */
1288 if (lsp_rule != NULL) {
1289 struct rte_lpm6_tbl_entry new_tbl_entry = {
1290 .next_hop = lsp_rule->next_hop,
1291 .depth = lsp_rule->depth,
1292 .valid = VALID,
1293 .valid_group = VALID,
1294 .ext_entry = 0
1295 };
1296
1297 *owner_entry = new_tbl_entry;
1298 } else {
1299 struct rte_lpm6_tbl_entry new_tbl_entry = {
1300 .next_hop = 0,
1301 .depth = 0,
1302 .valid = INVALID,
1303 .valid_group = INVALID,
1304 .ext_entry = 0
1305 };
1306
1307 *owner_entry = new_tbl_entry;
1308 }
1309
1310 /* return the table to the pool */
1311 tbl8_put(lpm, tbl_ind);
1312 }
1313
1314 /*
1315 * Deletes a rule
1316 */
1317 int
rte_lpm6_delete(struct rte_lpm6 * lpm,const uint8_t * ip,uint8_t depth,struct rte_lpm6_rule * lsp_rule)1318 rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
1319 struct rte_lpm6_rule *lsp_rule)
1320 {
1321 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
1322 //struct rte_lpm6_rule lsp_rule_obj;
1323 //struct rte_lpm6_rule *lsp_rule;
1324 //int ret;
1325 uint32_t tbl_ind;
1326 struct rte_lpm6_tbl_entry *from, *to;
1327
1328 /* Check input arguments. */
1329 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
1330 return -EINVAL;
1331
1332 /* Copy the IP and mask it to avoid modifying user's input data. */
1333 ip6_copy_addr(masked_ip, ip);
1334 ip6_mask_addr(masked_ip, depth);
1335
1336 #if 0
1337 /* Delete the rule from the rule table. */
1338 ret = rule_delete(lpm, masked_ip, depth);
1339 if (ret < 0)
1340 return -ENOENT;
1341 #endif
1342
1343 /* find rule cells */
1344 rule_find_range(lpm, masked_ip, depth, &from, &to, &tbl_ind);
1345
1346 #if 0
1347 /* find a less specific rule (a rule with smaller depth)
1348 * note: masked_ip will be modified, don't use it anymore
1349 */
1350 ret = rule_find_less_specific(lpm, masked_ip, depth,
1351 &lsp_rule_obj);
1352 lsp_rule = ret ? &lsp_rule_obj : NULL;
1353 #endif
1354 /* decrement the table rule counter,
1355 * note that tbl24 doesn't have a header
1356 */
1357 if (tbl_ind != TBL24_IND) {
1358 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind];
1359 if (--tbl_hdr->ref_cnt == 0) {
1360 /* remove the table */
1361 remove_tbl(lpm, tbl_hdr, tbl_ind, lsp_rule);
1362 return 0;
1363 }
1364 }
1365
1366 /* iterate rule cells */
1367 for (; from <= to; from++)
1368 if (from->ext_entry == 1) {
1369 /* reference to a more specific space
1370 * of the prefix/rule. Entries in a more
1371 * specific space that are not used by
1372 * a more specific prefix must be occupied
1373 * by the prefix
1374 */
1375 if (lsp_rule != NULL)
1376 expand_rule(lpm,
1377 from->lpm6_tbl8_gindex *
1378 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES,
1379 depth, lsp_rule->depth,
1380 lsp_rule->next_hop, VALID);
1381 else
1382 /* since the prefix has no less specific prefix,
1383 * its more specific space must be invalidated
1384 */
1385 expand_rule(lpm,
1386 from->lpm6_tbl8_gindex *
1387 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES,
1388 depth, 0, 0, INVALID);
1389 } else if (from->depth == depth) {
1390 /* entry is not a reference and belongs to the prefix */
1391 if (lsp_rule != NULL) {
1392 struct rte_lpm6_tbl_entry new_tbl_entry = {
1393 .next_hop = lsp_rule->next_hop,
1394 .depth = lsp_rule->depth,
1395 .valid = VALID,
1396 .valid_group = VALID,
1397 .ext_entry = 0
1398 };
1399
1400 *from = new_tbl_entry;
1401 } else {
1402 struct rte_lpm6_tbl_entry new_tbl_entry = {
1403 .next_hop = 0,
1404 .depth = 0,
1405 .valid = INVALID,
1406 .valid_group = INVALID,
1407 .ext_entry = 0
1408 };
1409
1410 *from = new_tbl_entry;
1411 }
1412 }
1413
1414 return 0;
1415 }
1416