xref: /linux/net/core/flow_offload.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
9 
10 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
11 {
12 	struct flow_rule *rule;
13 	int i;
14 
15 	rule = kzalloc(struct_size(rule, action.entries, num_actions),
16 		       GFP_KERNEL);
17 	if (!rule)
18 		return NULL;
19 
20 	rule->action.num_entries = num_actions;
21 	/* Pre-fill each action hw_stats with DONT_CARE.
22 	 * Caller can override this if it wants stats for a given action.
23 	 */
24 	for (i = 0; i < num_actions; i++)
25 		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
26 
27 	return rule;
28 }
29 EXPORT_SYMBOL(flow_rule_alloc);
30 
31 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
32 {
33 	struct flow_offload_action *fl_action;
34 	int i;
35 
36 	fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
37 			    GFP_KERNEL);
38 	if (!fl_action)
39 		return NULL;
40 
41 	fl_action->action.num_entries = num_actions;
42 	/* Pre-fill each action hw_stats with DONT_CARE.
43 	 * Caller can override this if it wants stats for a given action.
44 	 */
45 	for (i = 0; i < num_actions; i++)
46 		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
47 
48 	return fl_action;
49 }
50 
51 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
52 	const struct flow_match *__m = &(__rule)->match;			\
53 	struct flow_dissector *__d = (__m)->dissector;				\
54 										\
55 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
56 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
57 
58 void flow_rule_match_meta(const struct flow_rule *rule,
59 			  struct flow_match_meta *out)
60 {
61 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_meta);
64 
65 void flow_rule_match_basic(const struct flow_rule *rule,
66 			   struct flow_match_basic *out)
67 {
68 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_basic);
71 
72 void flow_rule_match_control(const struct flow_rule *rule,
73 			     struct flow_match_control *out)
74 {
75 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_control);
78 
79 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
80 			       struct flow_match_eth_addrs *out)
81 {
82 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
85 
86 void flow_rule_match_vlan(const struct flow_rule *rule,
87 			  struct flow_match_vlan *out)
88 {
89 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_vlan);
92 
93 void flow_rule_match_cvlan(const struct flow_rule *rule,
94 			   struct flow_match_vlan *out)
95 {
96 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_cvlan);
99 
100 void flow_rule_match_arp(const struct flow_rule *rule,
101 			 struct flow_match_arp *out)
102 {
103 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_arp);
106 
107 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
108 				struct flow_match_ipv4_addrs *out)
109 {
110 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
113 
114 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
115 				struct flow_match_ipv6_addrs *out)
116 {
117 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
120 
121 void flow_rule_match_ip(const struct flow_rule *rule,
122 			struct flow_match_ip *out)
123 {
124 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_ip);
127 
128 void flow_rule_match_ports(const struct flow_rule *rule,
129 			   struct flow_match_ports *out)
130 {
131 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_ports);
134 
135 void flow_rule_match_ports_range(const struct flow_rule *rule,
136 				 struct flow_match_ports_range *out)
137 {
138 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_ports_range);
141 
142 void flow_rule_match_tcp(const struct flow_rule *rule,
143 			 struct flow_match_tcp *out)
144 {
145 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_tcp);
148 
149 void flow_rule_match_ipsec(const struct flow_rule *rule,
150 			   struct flow_match_ipsec *out)
151 {
152 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_ipsec);
155 
156 void flow_rule_match_icmp(const struct flow_rule *rule,
157 			  struct flow_match_icmp *out)
158 {
159 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_icmp);
162 
163 void flow_rule_match_mpls(const struct flow_rule *rule,
164 			  struct flow_match_mpls *out)
165 {
166 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_mpls);
169 
170 void flow_rule_match_enc_control(const struct flow_rule *rule,
171 				 struct flow_match_control *out)
172 {
173 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
174 }
175 EXPORT_SYMBOL(flow_rule_match_enc_control);
176 
177 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
178 				    struct flow_match_ipv4_addrs *out)
179 {
180 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
181 }
182 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
183 
184 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
185 				    struct flow_match_ipv6_addrs *out)
186 {
187 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
188 }
189 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
190 
191 void flow_rule_match_enc_ip(const struct flow_rule *rule,
192 			    struct flow_match_ip *out)
193 {
194 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
195 }
196 EXPORT_SYMBOL(flow_rule_match_enc_ip);
197 
198 void flow_rule_match_enc_ports(const struct flow_rule *rule,
199 			       struct flow_match_ports *out)
200 {
201 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
202 }
203 EXPORT_SYMBOL(flow_rule_match_enc_ports);
204 
205 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
206 			       struct flow_match_enc_keyid *out)
207 {
208 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
209 }
210 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
211 
212 void flow_rule_match_enc_opts(const struct flow_rule *rule,
213 			      struct flow_match_enc_opts *out)
214 {
215 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
216 }
217 EXPORT_SYMBOL(flow_rule_match_enc_opts);
218 
219 struct flow_action_cookie *flow_action_cookie_create(void *data,
220 						     unsigned int len,
221 						     gfp_t gfp)
222 {
223 	struct flow_action_cookie *cookie;
224 
225 	cookie = kmalloc(sizeof(*cookie) + len, gfp);
226 	if (!cookie)
227 		return NULL;
228 	cookie->cookie_len = len;
229 	memcpy(cookie->cookie, data, len);
230 	return cookie;
231 }
232 EXPORT_SYMBOL(flow_action_cookie_create);
233 
234 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
235 {
236 	kfree(cookie);
237 }
238 EXPORT_SYMBOL(flow_action_cookie_destroy);
239 
240 void flow_rule_match_ct(const struct flow_rule *rule,
241 			struct flow_match_ct *out)
242 {
243 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
244 }
245 EXPORT_SYMBOL(flow_rule_match_ct);
246 
247 void flow_rule_match_pppoe(const struct flow_rule *rule,
248 			   struct flow_match_pppoe *out)
249 {
250 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
251 }
252 EXPORT_SYMBOL(flow_rule_match_pppoe);
253 
254 void flow_rule_match_l2tpv3(const struct flow_rule *rule,
255 			    struct flow_match_l2tpv3 *out)
256 {
257 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
258 }
259 EXPORT_SYMBOL(flow_rule_match_l2tpv3);
260 
261 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
262 					  void *cb_ident, void *cb_priv,
263 					  void (*release)(void *cb_priv))
264 {
265 	struct flow_block_cb *block_cb;
266 
267 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
268 	if (!block_cb)
269 		return ERR_PTR(-ENOMEM);
270 
271 	block_cb->cb = cb;
272 	block_cb->cb_ident = cb_ident;
273 	block_cb->cb_priv = cb_priv;
274 	block_cb->release = release;
275 
276 	return block_cb;
277 }
278 EXPORT_SYMBOL(flow_block_cb_alloc);
279 
280 void flow_block_cb_free(struct flow_block_cb *block_cb)
281 {
282 	if (block_cb->release)
283 		block_cb->release(block_cb->cb_priv);
284 
285 	kfree(block_cb);
286 }
287 EXPORT_SYMBOL(flow_block_cb_free);
288 
289 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
290 					   flow_setup_cb_t *cb, void *cb_ident)
291 {
292 	struct flow_block_cb *block_cb;
293 
294 	list_for_each_entry(block_cb, &block->cb_list, list) {
295 		if (block_cb->cb == cb &&
296 		    block_cb->cb_ident == cb_ident)
297 			return block_cb;
298 	}
299 
300 	return NULL;
301 }
302 EXPORT_SYMBOL(flow_block_cb_lookup);
303 
304 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
305 {
306 	return block_cb->cb_priv;
307 }
308 EXPORT_SYMBOL(flow_block_cb_priv);
309 
310 void flow_block_cb_incref(struct flow_block_cb *block_cb)
311 {
312 	block_cb->refcnt++;
313 }
314 EXPORT_SYMBOL(flow_block_cb_incref);
315 
316 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
317 {
318 	return --block_cb->refcnt;
319 }
320 EXPORT_SYMBOL(flow_block_cb_decref);
321 
322 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
323 			   struct list_head *driver_block_list)
324 {
325 	struct flow_block_cb *block_cb;
326 
327 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
328 		if (block_cb->cb == cb &&
329 		    block_cb->cb_ident == cb_ident)
330 			return true;
331 	}
332 
333 	return false;
334 }
335 EXPORT_SYMBOL(flow_block_cb_is_busy);
336 
337 int flow_block_cb_setup_simple(struct flow_block_offload *f,
338 			       struct list_head *driver_block_list,
339 			       flow_setup_cb_t *cb,
340 			       void *cb_ident, void *cb_priv,
341 			       bool ingress_only)
342 {
343 	struct flow_block_cb *block_cb;
344 
345 	if (ingress_only &&
346 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
347 		return -EOPNOTSUPP;
348 
349 	f->driver_block_list = driver_block_list;
350 
351 	switch (f->command) {
352 	case FLOW_BLOCK_BIND:
353 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
354 			return -EBUSY;
355 
356 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
357 		if (IS_ERR(block_cb))
358 			return PTR_ERR(block_cb);
359 
360 		flow_block_cb_add(block_cb, f);
361 		list_add_tail(&block_cb->driver_list, driver_block_list);
362 		return 0;
363 	case FLOW_BLOCK_UNBIND:
364 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
365 		if (!block_cb)
366 			return -ENOENT;
367 
368 		flow_block_cb_remove(block_cb, f);
369 		list_del(&block_cb->driver_list);
370 		return 0;
371 	default:
372 		return -EOPNOTSUPP;
373 	}
374 }
375 EXPORT_SYMBOL(flow_block_cb_setup_simple);
376 
377 static DEFINE_MUTEX(flow_indr_block_lock);
378 static LIST_HEAD(flow_block_indr_list);
379 static LIST_HEAD(flow_block_indr_dev_list);
380 static LIST_HEAD(flow_indir_dev_list);
381 
382 struct flow_indr_dev {
383 	struct list_head		list;
384 	flow_indr_block_bind_cb_t	*cb;
385 	void				*cb_priv;
386 	refcount_t			refcnt;
387 };
388 
389 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
390 						 void *cb_priv)
391 {
392 	struct flow_indr_dev *indr_dev;
393 
394 	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
395 	if (!indr_dev)
396 		return NULL;
397 
398 	indr_dev->cb		= cb;
399 	indr_dev->cb_priv	= cb_priv;
400 	refcount_set(&indr_dev->refcnt, 1);
401 
402 	return indr_dev;
403 }
404 
405 struct flow_indir_dev_info {
406 	void *data;
407 	struct net_device *dev;
408 	struct Qdisc *sch;
409 	enum tc_setup_type type;
410 	void (*cleanup)(struct flow_block_cb *block_cb);
411 	struct list_head list;
412 	enum flow_block_command command;
413 	enum flow_block_binder_type binder_type;
414 	struct list_head *cb_list;
415 };
416 
417 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
418 {
419 	struct flow_block_offload bo;
420 	struct flow_indir_dev_info *cur;
421 
422 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
423 		memset(&bo, 0, sizeof(bo));
424 		bo.command = cur->command;
425 		bo.binder_type = cur->binder_type;
426 		INIT_LIST_HEAD(&bo.cb_list);
427 		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
428 		list_splice(&bo.cb_list, cur->cb_list);
429 	}
430 }
431 
432 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
433 {
434 	struct flow_indr_dev *indr_dev;
435 
436 	mutex_lock(&flow_indr_block_lock);
437 	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
438 		if (indr_dev->cb == cb &&
439 		    indr_dev->cb_priv == cb_priv) {
440 			refcount_inc(&indr_dev->refcnt);
441 			mutex_unlock(&flow_indr_block_lock);
442 			return 0;
443 		}
444 	}
445 
446 	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
447 	if (!indr_dev) {
448 		mutex_unlock(&flow_indr_block_lock);
449 		return -ENOMEM;
450 	}
451 
452 	list_add(&indr_dev->list, &flow_block_indr_dev_list);
453 	existing_qdiscs_register(cb, cb_priv);
454 	mutex_unlock(&flow_indr_block_lock);
455 
456 	tcf_action_reoffload_cb(cb, cb_priv, true);
457 
458 	return 0;
459 }
460 EXPORT_SYMBOL(flow_indr_dev_register);
461 
462 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
463 				      void *cb_priv,
464 				      struct list_head *cleanup_list)
465 {
466 	struct flow_block_cb *this, *next;
467 
468 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
469 		if (this->release == release &&
470 		    this->indr.cb_priv == cb_priv)
471 			list_move(&this->indr.list, cleanup_list);
472 	}
473 }
474 
475 static void flow_block_indr_notify(struct list_head *cleanup_list)
476 {
477 	struct flow_block_cb *this, *next;
478 
479 	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
480 		list_del(&this->indr.list);
481 		this->indr.cleanup(this);
482 	}
483 }
484 
485 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
486 			      void (*release)(void *cb_priv))
487 {
488 	struct flow_indr_dev *this, *next, *indr_dev = NULL;
489 	LIST_HEAD(cleanup_list);
490 
491 	mutex_lock(&flow_indr_block_lock);
492 	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
493 		if (this->cb == cb &&
494 		    this->cb_priv == cb_priv &&
495 		    refcount_dec_and_test(&this->refcnt)) {
496 			indr_dev = this;
497 			list_del(&indr_dev->list);
498 			break;
499 		}
500 	}
501 
502 	if (!indr_dev) {
503 		mutex_unlock(&flow_indr_block_lock);
504 		return;
505 	}
506 
507 	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
508 	mutex_unlock(&flow_indr_block_lock);
509 
510 	tcf_action_reoffload_cb(cb, cb_priv, false);
511 	flow_block_indr_notify(&cleanup_list);
512 	kfree(indr_dev);
513 }
514 EXPORT_SYMBOL(flow_indr_dev_unregister);
515 
516 static void flow_block_indr_init(struct flow_block_cb *flow_block,
517 				 struct flow_block_offload *bo,
518 				 struct net_device *dev, struct Qdisc *sch, void *data,
519 				 void *cb_priv,
520 				 void (*cleanup)(struct flow_block_cb *block_cb))
521 {
522 	flow_block->indr.binder_type = bo->binder_type;
523 	flow_block->indr.data = data;
524 	flow_block->indr.cb_priv = cb_priv;
525 	flow_block->indr.dev = dev;
526 	flow_block->indr.sch = sch;
527 	flow_block->indr.cleanup = cleanup;
528 }
529 
530 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
531 					       void *cb_ident, void *cb_priv,
532 					       void (*release)(void *cb_priv),
533 					       struct flow_block_offload *bo,
534 					       struct net_device *dev,
535 					       struct Qdisc *sch, void *data,
536 					       void *indr_cb_priv,
537 					       void (*cleanup)(struct flow_block_cb *block_cb))
538 {
539 	struct flow_block_cb *block_cb;
540 
541 	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
542 	if (IS_ERR(block_cb))
543 		goto out;
544 
545 	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
546 	list_add(&block_cb->indr.list, &flow_block_indr_list);
547 
548 out:
549 	return block_cb;
550 }
551 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
552 
553 static struct flow_indir_dev_info *find_indir_dev(void *data)
554 {
555 	struct flow_indir_dev_info *cur;
556 
557 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
558 		if (cur->data == data)
559 			return cur;
560 	}
561 	return NULL;
562 }
563 
564 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
565 			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
566 			 struct flow_block_offload *bo)
567 {
568 	struct flow_indir_dev_info *info;
569 
570 	info = find_indir_dev(data);
571 	if (info)
572 		return -EEXIST;
573 
574 	info = kzalloc(sizeof(*info), GFP_KERNEL);
575 	if (!info)
576 		return -ENOMEM;
577 
578 	info->data = data;
579 	info->dev = dev;
580 	info->sch = sch;
581 	info->type = type;
582 	info->cleanup = cleanup;
583 	info->command = bo->command;
584 	info->binder_type = bo->binder_type;
585 	info->cb_list = bo->cb_list_head;
586 
587 	list_add(&info->list, &flow_indir_dev_list);
588 	return 0;
589 }
590 
591 static int indir_dev_remove(void *data)
592 {
593 	struct flow_indir_dev_info *info;
594 
595 	info = find_indir_dev(data);
596 	if (!info)
597 		return -ENOENT;
598 
599 	list_del(&info->list);
600 
601 	kfree(info);
602 	return 0;
603 }
604 
605 int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
606 				enum tc_setup_type type, void *data,
607 				struct flow_block_offload *bo,
608 				void (*cleanup)(struct flow_block_cb *block_cb))
609 {
610 	struct flow_indr_dev *this;
611 	u32 count = 0;
612 	int err;
613 
614 	mutex_lock(&flow_indr_block_lock);
615 	if (bo) {
616 		if (bo->command == FLOW_BLOCK_BIND)
617 			indir_dev_add(data, dev, sch, type, cleanup, bo);
618 		else if (bo->command == FLOW_BLOCK_UNBIND)
619 			indir_dev_remove(data);
620 	}
621 
622 	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
623 		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
624 		if (!err)
625 			count++;
626 	}
627 
628 	mutex_unlock(&flow_indr_block_lock);
629 
630 	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
631 }
632 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
633 
634 bool flow_indr_dev_exists(void)
635 {
636 	return !list_empty(&flow_block_indr_dev_list);
637 }
638 EXPORT_SYMBOL(flow_indr_dev_exists);
639