xref: /linux/net/core/flow_offload.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
9 
10 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
11 {
12 	struct flow_rule *rule;
13 	int i;
14 
15 	rule = kzalloc_flex(*rule, action.entries, num_actions, GFP_KERNEL);
16 	if (!rule)
17 		return NULL;
18 
19 	rule->action.num_entries = num_actions;
20 	/* Pre-fill each action hw_stats with DONT_CARE.
21 	 * Caller can override this if it wants stats for a given action.
22 	 */
23 	for (i = 0; i < num_actions; i++)
24 		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
25 
26 	return rule;
27 }
28 EXPORT_SYMBOL(flow_rule_alloc);
29 
30 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
31 {
32 	struct flow_offload_action *fl_action;
33 	int i;
34 
35 	fl_action = kzalloc_flex(*fl_action, action.entries, num_actions,
36 				 GFP_KERNEL);
37 	if (!fl_action)
38 		return NULL;
39 
40 	fl_action->action.num_entries = num_actions;
41 	/* Pre-fill each action hw_stats with DONT_CARE.
42 	 * Caller can override this if it wants stats for a given action.
43 	 */
44 	for (i = 0; i < num_actions; i++)
45 		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
46 
47 	return fl_action;
48 }
49 
50 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
51 	const struct flow_match *__m = &(__rule)->match;			\
52 	struct flow_dissector *__d = (__m)->dissector;				\
53 										\
54 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
55 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
56 
57 void flow_rule_match_meta(const struct flow_rule *rule,
58 			  struct flow_match_meta *out)
59 {
60 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
61 }
62 EXPORT_SYMBOL(flow_rule_match_meta);
63 
64 void flow_rule_match_basic(const struct flow_rule *rule,
65 			   struct flow_match_basic *out)
66 {
67 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
68 }
69 EXPORT_SYMBOL(flow_rule_match_basic);
70 
71 void flow_rule_match_control(const struct flow_rule *rule,
72 			     struct flow_match_control *out)
73 {
74 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
75 }
76 EXPORT_SYMBOL(flow_rule_match_control);
77 
78 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
79 			       struct flow_match_eth_addrs *out)
80 {
81 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
82 }
83 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
84 
85 void flow_rule_match_vlan(const struct flow_rule *rule,
86 			  struct flow_match_vlan *out)
87 {
88 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
89 }
90 EXPORT_SYMBOL(flow_rule_match_vlan);
91 
92 void flow_rule_match_cvlan(const struct flow_rule *rule,
93 			   struct flow_match_vlan *out)
94 {
95 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
96 }
97 EXPORT_SYMBOL(flow_rule_match_cvlan);
98 
99 void flow_rule_match_arp(const struct flow_rule *rule,
100 			 struct flow_match_arp *out)
101 {
102 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out);
103 }
104 EXPORT_SYMBOL(flow_rule_match_arp);
105 
106 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
107 				struct flow_match_ipv4_addrs *out)
108 {
109 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
110 }
111 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
112 
113 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
114 				struct flow_match_ipv6_addrs *out)
115 {
116 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
117 }
118 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
119 
120 void flow_rule_match_ip(const struct flow_rule *rule,
121 			struct flow_match_ip *out)
122 {
123 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
124 }
125 EXPORT_SYMBOL(flow_rule_match_ip);
126 
127 void flow_rule_match_ports(const struct flow_rule *rule,
128 			   struct flow_match_ports *out)
129 {
130 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
131 }
132 EXPORT_SYMBOL(flow_rule_match_ports);
133 
134 void flow_rule_match_ports_range(const struct flow_rule *rule,
135 				 struct flow_match_ports_range *out)
136 {
137 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
138 }
139 EXPORT_SYMBOL(flow_rule_match_ports_range);
140 
141 void flow_rule_match_tcp(const struct flow_rule *rule,
142 			 struct flow_match_tcp *out)
143 {
144 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
145 }
146 EXPORT_SYMBOL(flow_rule_match_tcp);
147 
148 void flow_rule_match_ipsec(const struct flow_rule *rule,
149 			   struct flow_match_ipsec *out)
150 {
151 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out);
152 }
153 EXPORT_SYMBOL(flow_rule_match_ipsec);
154 
155 void flow_rule_match_icmp(const struct flow_rule *rule,
156 			  struct flow_match_icmp *out)
157 {
158 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
159 }
160 EXPORT_SYMBOL(flow_rule_match_icmp);
161 
162 void flow_rule_match_mpls(const struct flow_rule *rule,
163 			  struct flow_match_mpls *out)
164 {
165 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
166 }
167 EXPORT_SYMBOL(flow_rule_match_mpls);
168 
169 void flow_rule_match_enc_control(const struct flow_rule *rule,
170 				 struct flow_match_control *out)
171 {
172 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
173 }
174 EXPORT_SYMBOL(flow_rule_match_enc_control);
175 
176 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
177 				    struct flow_match_ipv4_addrs *out)
178 {
179 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
180 }
181 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
182 
183 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
184 				    struct flow_match_ipv6_addrs *out)
185 {
186 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
187 }
188 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
189 
190 void flow_rule_match_enc_ip(const struct flow_rule *rule,
191 			    struct flow_match_ip *out)
192 {
193 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
194 }
195 EXPORT_SYMBOL(flow_rule_match_enc_ip);
196 
197 void flow_rule_match_enc_ports(const struct flow_rule *rule,
198 			       struct flow_match_ports *out)
199 {
200 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
201 }
202 EXPORT_SYMBOL(flow_rule_match_enc_ports);
203 
204 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
205 			       struct flow_match_enc_keyid *out)
206 {
207 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
208 }
209 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
210 
211 void flow_rule_match_enc_opts(const struct flow_rule *rule,
212 			      struct flow_match_enc_opts *out)
213 {
214 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
215 }
216 EXPORT_SYMBOL(flow_rule_match_enc_opts);
217 
218 struct flow_action_cookie *flow_action_cookie_create(void *data,
219 						     unsigned int len,
220 						     gfp_t gfp)
221 {
222 	struct flow_action_cookie *cookie;
223 
224 	cookie = kmalloc(sizeof(*cookie) + len, gfp);
225 	if (!cookie)
226 		return NULL;
227 	cookie->cookie_len = len;
228 	memcpy(cookie->cookie, data, len);
229 	return cookie;
230 }
231 EXPORT_SYMBOL(flow_action_cookie_create);
232 
233 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
234 {
235 	kfree(cookie);
236 }
237 EXPORT_SYMBOL(flow_action_cookie_destroy);
238 
239 void flow_rule_match_ct(const struct flow_rule *rule,
240 			struct flow_match_ct *out)
241 {
242 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
243 }
244 EXPORT_SYMBOL(flow_rule_match_ct);
245 
246 void flow_rule_match_pppoe(const struct flow_rule *rule,
247 			   struct flow_match_pppoe *out)
248 {
249 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
250 }
251 EXPORT_SYMBOL(flow_rule_match_pppoe);
252 
253 void flow_rule_match_l2tpv3(const struct flow_rule *rule,
254 			    struct flow_match_l2tpv3 *out)
255 {
256 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
257 }
258 EXPORT_SYMBOL(flow_rule_match_l2tpv3);
259 
260 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
261 					  void *cb_ident, void *cb_priv,
262 					  void (*release)(void *cb_priv))
263 {
264 	struct flow_block_cb *block_cb;
265 
266 	block_cb = kzalloc_obj(*block_cb, GFP_KERNEL);
267 	if (!block_cb)
268 		return ERR_PTR(-ENOMEM);
269 
270 	block_cb->cb = cb;
271 	block_cb->cb_ident = cb_ident;
272 	block_cb->cb_priv = cb_priv;
273 	block_cb->release = release;
274 
275 	return block_cb;
276 }
277 EXPORT_SYMBOL(flow_block_cb_alloc);
278 
279 void flow_block_cb_free(struct flow_block_cb *block_cb)
280 {
281 	if (block_cb->release)
282 		block_cb->release(block_cb->cb_priv);
283 
284 	kfree(block_cb);
285 }
286 EXPORT_SYMBOL(flow_block_cb_free);
287 
288 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
289 					   flow_setup_cb_t *cb, void *cb_ident)
290 {
291 	struct flow_block_cb *block_cb;
292 
293 	list_for_each_entry(block_cb, &block->cb_list, list) {
294 		if (block_cb->cb == cb &&
295 		    block_cb->cb_ident == cb_ident)
296 			return block_cb;
297 	}
298 
299 	return NULL;
300 }
301 EXPORT_SYMBOL(flow_block_cb_lookup);
302 
303 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
304 {
305 	return block_cb->cb_priv;
306 }
307 EXPORT_SYMBOL(flow_block_cb_priv);
308 
309 void flow_block_cb_incref(struct flow_block_cb *block_cb)
310 {
311 	block_cb->refcnt++;
312 }
313 EXPORT_SYMBOL(flow_block_cb_incref);
314 
315 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
316 {
317 	return --block_cb->refcnt;
318 }
319 EXPORT_SYMBOL(flow_block_cb_decref);
320 
321 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
322 			   struct list_head *driver_block_list)
323 {
324 	struct flow_block_cb *block_cb;
325 
326 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
327 		if (block_cb->cb == cb &&
328 		    block_cb->cb_ident == cb_ident)
329 			return true;
330 	}
331 
332 	return false;
333 }
334 EXPORT_SYMBOL(flow_block_cb_is_busy);
335 
336 int flow_block_cb_setup_simple(struct flow_block_offload *f,
337 			       struct list_head *driver_block_list,
338 			       flow_setup_cb_t *cb,
339 			       void *cb_ident, void *cb_priv,
340 			       bool ingress_only)
341 {
342 	struct flow_block_cb *block_cb;
343 
344 	if (ingress_only &&
345 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
346 		return -EOPNOTSUPP;
347 
348 	f->driver_block_list = driver_block_list;
349 
350 	switch (f->command) {
351 	case FLOW_BLOCK_BIND:
352 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
353 			return -EBUSY;
354 
355 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
356 		if (IS_ERR(block_cb))
357 			return PTR_ERR(block_cb);
358 
359 		flow_block_cb_add(block_cb, f);
360 		list_add_tail(&block_cb->driver_list, driver_block_list);
361 		return 0;
362 	case FLOW_BLOCK_UNBIND:
363 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
364 		if (!block_cb)
365 			return -ENOENT;
366 
367 		flow_block_cb_remove(block_cb, f);
368 		list_del(&block_cb->driver_list);
369 		return 0;
370 	default:
371 		return -EOPNOTSUPP;
372 	}
373 }
374 EXPORT_SYMBOL(flow_block_cb_setup_simple);
375 
376 static DEFINE_MUTEX(flow_indr_block_lock);
377 static LIST_HEAD(flow_block_indr_list);
378 static LIST_HEAD(flow_block_indr_dev_list);
379 static LIST_HEAD(flow_indir_dev_list);
380 
381 struct flow_indr_dev {
382 	struct list_head		list;
383 	flow_indr_block_bind_cb_t	*cb;
384 	void				*cb_priv;
385 	refcount_t			refcnt;
386 };
387 
388 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
389 						 void *cb_priv)
390 {
391 	struct flow_indr_dev *indr_dev;
392 
393 	indr_dev = kmalloc_obj(*indr_dev, GFP_KERNEL);
394 	if (!indr_dev)
395 		return NULL;
396 
397 	indr_dev->cb		= cb;
398 	indr_dev->cb_priv	= cb_priv;
399 	refcount_set(&indr_dev->refcnt, 1);
400 
401 	return indr_dev;
402 }
403 
404 struct flow_indir_dev_info {
405 	void *data;
406 	struct net_device *dev;
407 	struct Qdisc *sch;
408 	enum tc_setup_type type;
409 	void (*cleanup)(struct flow_block_cb *block_cb);
410 	struct list_head list;
411 	enum flow_block_command command;
412 	enum flow_block_binder_type binder_type;
413 	struct list_head *cb_list;
414 };
415 
416 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
417 {
418 	struct flow_block_offload bo;
419 	struct flow_indir_dev_info *cur;
420 
421 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
422 		memset(&bo, 0, sizeof(bo));
423 		bo.command = cur->command;
424 		bo.binder_type = cur->binder_type;
425 		INIT_LIST_HEAD(&bo.cb_list);
426 		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
427 		list_splice(&bo.cb_list, cur->cb_list);
428 	}
429 }
430 
431 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
432 {
433 	struct flow_indr_dev *indr_dev;
434 
435 	mutex_lock(&flow_indr_block_lock);
436 	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
437 		if (indr_dev->cb == cb &&
438 		    indr_dev->cb_priv == cb_priv) {
439 			refcount_inc(&indr_dev->refcnt);
440 			mutex_unlock(&flow_indr_block_lock);
441 			return 0;
442 		}
443 	}
444 
445 	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
446 	if (!indr_dev) {
447 		mutex_unlock(&flow_indr_block_lock);
448 		return -ENOMEM;
449 	}
450 
451 	list_add(&indr_dev->list, &flow_block_indr_dev_list);
452 	existing_qdiscs_register(cb, cb_priv);
453 	mutex_unlock(&flow_indr_block_lock);
454 
455 	tcf_action_reoffload_cb(cb, cb_priv, true);
456 
457 	return 0;
458 }
459 EXPORT_SYMBOL(flow_indr_dev_register);
460 
461 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
462 				      void *cb_priv,
463 				      struct list_head *cleanup_list)
464 {
465 	struct flow_block_cb *this, *next;
466 
467 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
468 		if (this->release == release &&
469 		    this->indr.cb_priv == cb_priv)
470 			list_move(&this->indr.list, cleanup_list);
471 	}
472 }
473 
474 static void flow_block_indr_notify(struct list_head *cleanup_list)
475 {
476 	struct flow_block_cb *this, *next;
477 
478 	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
479 		list_del(&this->indr.list);
480 		this->indr.cleanup(this);
481 	}
482 }
483 
484 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
485 			      void (*release)(void *cb_priv))
486 {
487 	struct flow_indr_dev *this, *next, *indr_dev = NULL;
488 	LIST_HEAD(cleanup_list);
489 
490 	mutex_lock(&flow_indr_block_lock);
491 	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
492 		if (this->cb == cb &&
493 		    this->cb_priv == cb_priv &&
494 		    refcount_dec_and_test(&this->refcnt)) {
495 			indr_dev = this;
496 			list_del(&indr_dev->list);
497 			break;
498 		}
499 	}
500 
501 	if (!indr_dev) {
502 		mutex_unlock(&flow_indr_block_lock);
503 		return;
504 	}
505 
506 	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
507 	mutex_unlock(&flow_indr_block_lock);
508 
509 	tcf_action_reoffload_cb(cb, cb_priv, false);
510 	flow_block_indr_notify(&cleanup_list);
511 	kfree(indr_dev);
512 }
513 EXPORT_SYMBOL(flow_indr_dev_unregister);
514 
515 static void flow_block_indr_init(struct flow_block_cb *flow_block,
516 				 struct flow_block_offload *bo,
517 				 struct net_device *dev, struct Qdisc *sch, void *data,
518 				 void *cb_priv,
519 				 void (*cleanup)(struct flow_block_cb *block_cb))
520 {
521 	flow_block->indr.binder_type = bo->binder_type;
522 	flow_block->indr.data = data;
523 	flow_block->indr.cb_priv = cb_priv;
524 	flow_block->indr.dev = dev;
525 	flow_block->indr.sch = sch;
526 	flow_block->indr.cleanup = cleanup;
527 }
528 
529 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
530 					       void *cb_ident, void *cb_priv,
531 					       void (*release)(void *cb_priv),
532 					       struct flow_block_offload *bo,
533 					       struct net_device *dev,
534 					       struct Qdisc *sch, void *data,
535 					       void *indr_cb_priv,
536 					       void (*cleanup)(struct flow_block_cb *block_cb))
537 {
538 	struct flow_block_cb *block_cb;
539 
540 	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
541 	if (IS_ERR(block_cb))
542 		goto out;
543 
544 	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
545 	list_add(&block_cb->indr.list, &flow_block_indr_list);
546 
547 out:
548 	return block_cb;
549 }
550 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
551 
552 static struct flow_indir_dev_info *find_indir_dev(void *data)
553 {
554 	struct flow_indir_dev_info *cur;
555 
556 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
557 		if (cur->data == data)
558 			return cur;
559 	}
560 	return NULL;
561 }
562 
563 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
564 			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
565 			 struct flow_block_offload *bo)
566 {
567 	struct flow_indir_dev_info *info;
568 
569 	info = find_indir_dev(data);
570 	if (info)
571 		return -EEXIST;
572 
573 	info = kzalloc_obj(*info, GFP_KERNEL);
574 	if (!info)
575 		return -ENOMEM;
576 
577 	info->data = data;
578 	info->dev = dev;
579 	info->sch = sch;
580 	info->type = type;
581 	info->cleanup = cleanup;
582 	info->command = bo->command;
583 	info->binder_type = bo->binder_type;
584 	info->cb_list = bo->cb_list_head;
585 
586 	list_add(&info->list, &flow_indir_dev_list);
587 	return 0;
588 }
589 
590 static int indir_dev_remove(void *data)
591 {
592 	struct flow_indir_dev_info *info;
593 
594 	info = find_indir_dev(data);
595 	if (!info)
596 		return -ENOENT;
597 
598 	list_del(&info->list);
599 
600 	kfree(info);
601 	return 0;
602 }
603 
604 int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
605 				enum tc_setup_type type, void *data,
606 				struct flow_block_offload *bo,
607 				void (*cleanup)(struct flow_block_cb *block_cb))
608 {
609 	struct flow_indr_dev *this;
610 	u32 count = 0;
611 	int err;
612 
613 	mutex_lock(&flow_indr_block_lock);
614 	if (bo) {
615 		if (bo->command == FLOW_BLOCK_BIND)
616 			indir_dev_add(data, dev, sch, type, cleanup, bo);
617 		else if (bo->command == FLOW_BLOCK_UNBIND)
618 			indir_dev_remove(data);
619 	}
620 
621 	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
622 		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
623 		if (!err)
624 			count++;
625 	}
626 
627 	mutex_unlock(&flow_indr_block_lock);
628 
629 	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
630 }
631 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
632 
633 bool flow_indr_dev_exists(void)
634 {
635 	return !list_empty(&flow_block_indr_dev_list);
636 }
637 EXPORT_SYMBOL(flow_indr_dev_exists);
638