xref: /linux/net/core/flow_offload.c (revision 55c8a987dd73a7ef9e53119f3329620768d4a655)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
9 
10 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
11 {
12 	struct flow_rule *rule;
13 	int i;
14 
15 	rule = kzalloc(struct_size(rule, action.entries, num_actions),
16 		       GFP_KERNEL);
17 	if (!rule)
18 		return NULL;
19 
20 	rule->action.num_entries = num_actions;
21 	/* Pre-fill each action hw_stats with DONT_CARE.
22 	 * Caller can override this if it wants stats for a given action.
23 	 */
24 	for (i = 0; i < num_actions; i++)
25 		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
26 
27 	return rule;
28 }
29 EXPORT_SYMBOL(flow_rule_alloc);
30 
31 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
32 {
33 	struct flow_offload_action *fl_action;
34 	int i;
35 
36 	fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
37 			    GFP_KERNEL);
38 	if (!fl_action)
39 		return NULL;
40 
41 	fl_action->action.num_entries = num_actions;
42 	/* Pre-fill each action hw_stats with DONT_CARE.
43 	 * Caller can override this if it wants stats for a given action.
44 	 */
45 	for (i = 0; i < num_actions; i++)
46 		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
47 
48 	return fl_action;
49 }
50 
51 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
52 	const struct flow_match *__m = &(__rule)->match;			\
53 	struct flow_dissector *__d = (__m)->dissector;				\
54 										\
55 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
56 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
57 
58 void flow_rule_match_meta(const struct flow_rule *rule,
59 			  struct flow_match_meta *out)
60 {
61 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_meta);
64 
65 void flow_rule_match_basic(const struct flow_rule *rule,
66 			   struct flow_match_basic *out)
67 {
68 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_basic);
71 
72 void flow_rule_match_control(const struct flow_rule *rule,
73 			     struct flow_match_control *out)
74 {
75 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_control);
78 
79 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
80 			       struct flow_match_eth_addrs *out)
81 {
82 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
85 
86 void flow_rule_match_vlan(const struct flow_rule *rule,
87 			  struct flow_match_vlan *out)
88 {
89 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_vlan);
92 
93 void flow_rule_match_cvlan(const struct flow_rule *rule,
94 			   struct flow_match_vlan *out)
95 {
96 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_cvlan);
99 
100 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
101 				struct flow_match_ipv4_addrs *out)
102 {
103 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
106 
107 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
108 				struct flow_match_ipv6_addrs *out)
109 {
110 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
113 
114 void flow_rule_match_ip(const struct flow_rule *rule,
115 			struct flow_match_ip *out)
116 {
117 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_ip);
120 
121 void flow_rule_match_ports(const struct flow_rule *rule,
122 			   struct flow_match_ports *out)
123 {
124 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_ports);
127 
128 void flow_rule_match_ports_range(const struct flow_rule *rule,
129 				 struct flow_match_ports_range *out)
130 {
131 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_ports_range);
134 
135 void flow_rule_match_tcp(const struct flow_rule *rule,
136 			 struct flow_match_tcp *out)
137 {
138 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_tcp);
141 
142 void flow_rule_match_icmp(const struct flow_rule *rule,
143 			  struct flow_match_icmp *out)
144 {
145 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_icmp);
148 
149 void flow_rule_match_mpls(const struct flow_rule *rule,
150 			  struct flow_match_mpls *out)
151 {
152 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_mpls);
155 
156 void flow_rule_match_enc_control(const struct flow_rule *rule,
157 				 struct flow_match_control *out)
158 {
159 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_enc_control);
162 
163 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
164 				    struct flow_match_ipv4_addrs *out)
165 {
166 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
169 
170 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
171 				    struct flow_match_ipv6_addrs *out)
172 {
173 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
174 }
175 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
176 
177 void flow_rule_match_enc_ip(const struct flow_rule *rule,
178 			    struct flow_match_ip *out)
179 {
180 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
181 }
182 EXPORT_SYMBOL(flow_rule_match_enc_ip);
183 
184 void flow_rule_match_enc_ports(const struct flow_rule *rule,
185 			       struct flow_match_ports *out)
186 {
187 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
188 }
189 EXPORT_SYMBOL(flow_rule_match_enc_ports);
190 
191 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
192 			       struct flow_match_enc_keyid *out)
193 {
194 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
195 }
196 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
197 
198 void flow_rule_match_enc_opts(const struct flow_rule *rule,
199 			      struct flow_match_enc_opts *out)
200 {
201 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
202 }
203 EXPORT_SYMBOL(flow_rule_match_enc_opts);
204 
205 struct flow_action_cookie *flow_action_cookie_create(void *data,
206 						     unsigned int len,
207 						     gfp_t gfp)
208 {
209 	struct flow_action_cookie *cookie;
210 
211 	cookie = kmalloc(sizeof(*cookie) + len, gfp);
212 	if (!cookie)
213 		return NULL;
214 	cookie->cookie_len = len;
215 	memcpy(cookie->cookie, data, len);
216 	return cookie;
217 }
218 EXPORT_SYMBOL(flow_action_cookie_create);
219 
220 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
221 {
222 	kfree(cookie);
223 }
224 EXPORT_SYMBOL(flow_action_cookie_destroy);
225 
226 void flow_rule_match_ct(const struct flow_rule *rule,
227 			struct flow_match_ct *out)
228 {
229 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
230 }
231 EXPORT_SYMBOL(flow_rule_match_ct);
232 
233 void flow_rule_match_pppoe(const struct flow_rule *rule,
234 			   struct flow_match_pppoe *out)
235 {
236 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
237 }
238 EXPORT_SYMBOL(flow_rule_match_pppoe);
239 
240 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
241 					  void *cb_ident, void *cb_priv,
242 					  void (*release)(void *cb_priv))
243 {
244 	struct flow_block_cb *block_cb;
245 
246 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
247 	if (!block_cb)
248 		return ERR_PTR(-ENOMEM);
249 
250 	block_cb->cb = cb;
251 	block_cb->cb_ident = cb_ident;
252 	block_cb->cb_priv = cb_priv;
253 	block_cb->release = release;
254 
255 	return block_cb;
256 }
257 EXPORT_SYMBOL(flow_block_cb_alloc);
258 
259 void flow_block_cb_free(struct flow_block_cb *block_cb)
260 {
261 	if (block_cb->release)
262 		block_cb->release(block_cb->cb_priv);
263 
264 	kfree(block_cb);
265 }
266 EXPORT_SYMBOL(flow_block_cb_free);
267 
268 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
269 					   flow_setup_cb_t *cb, void *cb_ident)
270 {
271 	struct flow_block_cb *block_cb;
272 
273 	list_for_each_entry(block_cb, &block->cb_list, list) {
274 		if (block_cb->cb == cb &&
275 		    block_cb->cb_ident == cb_ident)
276 			return block_cb;
277 	}
278 
279 	return NULL;
280 }
281 EXPORT_SYMBOL(flow_block_cb_lookup);
282 
283 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
284 {
285 	return block_cb->cb_priv;
286 }
287 EXPORT_SYMBOL(flow_block_cb_priv);
288 
289 void flow_block_cb_incref(struct flow_block_cb *block_cb)
290 {
291 	block_cb->refcnt++;
292 }
293 EXPORT_SYMBOL(flow_block_cb_incref);
294 
295 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
296 {
297 	return --block_cb->refcnt;
298 }
299 EXPORT_SYMBOL(flow_block_cb_decref);
300 
301 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
302 			   struct list_head *driver_block_list)
303 {
304 	struct flow_block_cb *block_cb;
305 
306 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
307 		if (block_cb->cb == cb &&
308 		    block_cb->cb_ident == cb_ident)
309 			return true;
310 	}
311 
312 	return false;
313 }
314 EXPORT_SYMBOL(flow_block_cb_is_busy);
315 
316 int flow_block_cb_setup_simple(struct flow_block_offload *f,
317 			       struct list_head *driver_block_list,
318 			       flow_setup_cb_t *cb,
319 			       void *cb_ident, void *cb_priv,
320 			       bool ingress_only)
321 {
322 	struct flow_block_cb *block_cb;
323 
324 	if (ingress_only &&
325 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
326 		return -EOPNOTSUPP;
327 
328 	f->driver_block_list = driver_block_list;
329 
330 	switch (f->command) {
331 	case FLOW_BLOCK_BIND:
332 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
333 			return -EBUSY;
334 
335 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
336 		if (IS_ERR(block_cb))
337 			return PTR_ERR(block_cb);
338 
339 		flow_block_cb_add(block_cb, f);
340 		list_add_tail(&block_cb->driver_list, driver_block_list);
341 		return 0;
342 	case FLOW_BLOCK_UNBIND:
343 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
344 		if (!block_cb)
345 			return -ENOENT;
346 
347 		flow_block_cb_remove(block_cb, f);
348 		list_del(&block_cb->driver_list);
349 		return 0;
350 	default:
351 		return -EOPNOTSUPP;
352 	}
353 }
354 EXPORT_SYMBOL(flow_block_cb_setup_simple);
355 
356 static DEFINE_MUTEX(flow_indr_block_lock);
357 static LIST_HEAD(flow_block_indr_list);
358 static LIST_HEAD(flow_block_indr_dev_list);
359 static LIST_HEAD(flow_indir_dev_list);
360 
361 struct flow_indr_dev {
362 	struct list_head		list;
363 	flow_indr_block_bind_cb_t	*cb;
364 	void				*cb_priv;
365 	refcount_t			refcnt;
366 };
367 
368 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
369 						 void *cb_priv)
370 {
371 	struct flow_indr_dev *indr_dev;
372 
373 	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
374 	if (!indr_dev)
375 		return NULL;
376 
377 	indr_dev->cb		= cb;
378 	indr_dev->cb_priv	= cb_priv;
379 	refcount_set(&indr_dev->refcnt, 1);
380 
381 	return indr_dev;
382 }
383 
384 struct flow_indir_dev_info {
385 	void *data;
386 	struct net_device *dev;
387 	struct Qdisc *sch;
388 	enum tc_setup_type type;
389 	void (*cleanup)(struct flow_block_cb *block_cb);
390 	struct list_head list;
391 	enum flow_block_command command;
392 	enum flow_block_binder_type binder_type;
393 	struct list_head *cb_list;
394 };
395 
396 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
397 {
398 	struct flow_block_offload bo;
399 	struct flow_indir_dev_info *cur;
400 
401 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
402 		memset(&bo, 0, sizeof(bo));
403 		bo.command = cur->command;
404 		bo.binder_type = cur->binder_type;
405 		INIT_LIST_HEAD(&bo.cb_list);
406 		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
407 		list_splice(&bo.cb_list, cur->cb_list);
408 	}
409 }
410 
411 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
412 {
413 	struct flow_indr_dev *indr_dev;
414 
415 	mutex_lock(&flow_indr_block_lock);
416 	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
417 		if (indr_dev->cb == cb &&
418 		    indr_dev->cb_priv == cb_priv) {
419 			refcount_inc(&indr_dev->refcnt);
420 			mutex_unlock(&flow_indr_block_lock);
421 			return 0;
422 		}
423 	}
424 
425 	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
426 	if (!indr_dev) {
427 		mutex_unlock(&flow_indr_block_lock);
428 		return -ENOMEM;
429 	}
430 
431 	list_add(&indr_dev->list, &flow_block_indr_dev_list);
432 	existing_qdiscs_register(cb, cb_priv);
433 	mutex_unlock(&flow_indr_block_lock);
434 
435 	tcf_action_reoffload_cb(cb, cb_priv, true);
436 
437 	return 0;
438 }
439 EXPORT_SYMBOL(flow_indr_dev_register);
440 
441 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
442 				      void *cb_priv,
443 				      struct list_head *cleanup_list)
444 {
445 	struct flow_block_cb *this, *next;
446 
447 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
448 		if (this->release == release &&
449 		    this->indr.cb_priv == cb_priv)
450 			list_move(&this->indr.list, cleanup_list);
451 	}
452 }
453 
454 static void flow_block_indr_notify(struct list_head *cleanup_list)
455 {
456 	struct flow_block_cb *this, *next;
457 
458 	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
459 		list_del(&this->indr.list);
460 		this->indr.cleanup(this);
461 	}
462 }
463 
464 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
465 			      void (*release)(void *cb_priv))
466 {
467 	struct flow_indr_dev *this, *next, *indr_dev = NULL;
468 	LIST_HEAD(cleanup_list);
469 
470 	mutex_lock(&flow_indr_block_lock);
471 	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
472 		if (this->cb == cb &&
473 		    this->cb_priv == cb_priv &&
474 		    refcount_dec_and_test(&this->refcnt)) {
475 			indr_dev = this;
476 			list_del(&indr_dev->list);
477 			break;
478 		}
479 	}
480 
481 	if (!indr_dev) {
482 		mutex_unlock(&flow_indr_block_lock);
483 		return;
484 	}
485 
486 	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
487 	mutex_unlock(&flow_indr_block_lock);
488 
489 	tcf_action_reoffload_cb(cb, cb_priv, false);
490 	flow_block_indr_notify(&cleanup_list);
491 	kfree(indr_dev);
492 }
493 EXPORT_SYMBOL(flow_indr_dev_unregister);
494 
495 static void flow_block_indr_init(struct flow_block_cb *flow_block,
496 				 struct flow_block_offload *bo,
497 				 struct net_device *dev, struct Qdisc *sch, void *data,
498 				 void *cb_priv,
499 				 void (*cleanup)(struct flow_block_cb *block_cb))
500 {
501 	flow_block->indr.binder_type = bo->binder_type;
502 	flow_block->indr.data = data;
503 	flow_block->indr.cb_priv = cb_priv;
504 	flow_block->indr.dev = dev;
505 	flow_block->indr.sch = sch;
506 	flow_block->indr.cleanup = cleanup;
507 }
508 
509 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
510 					       void *cb_ident, void *cb_priv,
511 					       void (*release)(void *cb_priv),
512 					       struct flow_block_offload *bo,
513 					       struct net_device *dev,
514 					       struct Qdisc *sch, void *data,
515 					       void *indr_cb_priv,
516 					       void (*cleanup)(struct flow_block_cb *block_cb))
517 {
518 	struct flow_block_cb *block_cb;
519 
520 	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
521 	if (IS_ERR(block_cb))
522 		goto out;
523 
524 	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
525 	list_add(&block_cb->indr.list, &flow_block_indr_list);
526 
527 out:
528 	return block_cb;
529 }
530 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
531 
532 static struct flow_indir_dev_info *find_indir_dev(void *data)
533 {
534 	struct flow_indir_dev_info *cur;
535 
536 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
537 		if (cur->data == data)
538 			return cur;
539 	}
540 	return NULL;
541 }
542 
543 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
544 			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
545 			 struct flow_block_offload *bo)
546 {
547 	struct flow_indir_dev_info *info;
548 
549 	info = find_indir_dev(data);
550 	if (info)
551 		return -EEXIST;
552 
553 	info = kzalloc(sizeof(*info), GFP_KERNEL);
554 	if (!info)
555 		return -ENOMEM;
556 
557 	info->data = data;
558 	info->dev = dev;
559 	info->sch = sch;
560 	info->type = type;
561 	info->cleanup = cleanup;
562 	info->command = bo->command;
563 	info->binder_type = bo->binder_type;
564 	info->cb_list = bo->cb_list_head;
565 
566 	list_add(&info->list, &flow_indir_dev_list);
567 	return 0;
568 }
569 
570 static int indir_dev_remove(void *data)
571 {
572 	struct flow_indir_dev_info *info;
573 
574 	info = find_indir_dev(data);
575 	if (!info)
576 		return -ENOENT;
577 
578 	list_del(&info->list);
579 
580 	kfree(info);
581 	return 0;
582 }
583 
584 int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
585 				enum tc_setup_type type, void *data,
586 				struct flow_block_offload *bo,
587 				void (*cleanup)(struct flow_block_cb *block_cb))
588 {
589 	struct flow_indr_dev *this;
590 	u32 count = 0;
591 	int err;
592 
593 	mutex_lock(&flow_indr_block_lock);
594 	if (bo) {
595 		if (bo->command == FLOW_BLOCK_BIND)
596 			indir_dev_add(data, dev, sch, type, cleanup, bo);
597 		else if (bo->command == FLOW_BLOCK_UNBIND)
598 			indir_dev_remove(data);
599 	}
600 
601 	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
602 		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
603 		if (!err)
604 			count++;
605 	}
606 
607 	mutex_unlock(&flow_indr_block_lock);
608 
609 	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
610 }
611 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
612 
613 bool flow_indr_dev_exists(void)
614 {
615 	return !list_empty(&flow_block_indr_dev_list);
616 }
617 EXPORT_SYMBOL(flow_indr_dev_exists);
618