1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/l3mdev/l3mdev.c - L3 master device implementation
4 * Copyright (c) 2015 Cumulus Networks
5 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
6 */
7
8 #include <linux/netdevice.h>
9 #include <net/fib_rules.h>
10 #include <net/l3mdev.h>
11
12 static DEFINE_SPINLOCK(l3mdev_lock);
13
14 struct l3mdev_handler {
15 lookup_by_table_id_t dev_lookup;
16 };
17
18 static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1];
19
l3mdev_check_type(enum l3mdev_type l3type)20 static int l3mdev_check_type(enum l3mdev_type l3type)
21 {
22 if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX)
23 return -EINVAL;
24
25 return 0;
26 }
27
l3mdev_table_lookup_register(enum l3mdev_type l3type,lookup_by_table_id_t fn)28 int l3mdev_table_lookup_register(enum l3mdev_type l3type,
29 lookup_by_table_id_t fn)
30 {
31 struct l3mdev_handler *hdlr;
32 int res;
33
34 res = l3mdev_check_type(l3type);
35 if (res)
36 return res;
37
38 hdlr = &l3mdev_handlers[l3type];
39
40 spin_lock(&l3mdev_lock);
41
42 if (hdlr->dev_lookup) {
43 res = -EBUSY;
44 goto unlock;
45 }
46
47 hdlr->dev_lookup = fn;
48 res = 0;
49
50 unlock:
51 spin_unlock(&l3mdev_lock);
52
53 return res;
54 }
55 EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register);
56
l3mdev_table_lookup_unregister(enum l3mdev_type l3type,lookup_by_table_id_t fn)57 void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
58 lookup_by_table_id_t fn)
59 {
60 struct l3mdev_handler *hdlr;
61
62 if (l3mdev_check_type(l3type))
63 return;
64
65 hdlr = &l3mdev_handlers[l3type];
66
67 spin_lock(&l3mdev_lock);
68
69 if (hdlr->dev_lookup == fn)
70 hdlr->dev_lookup = NULL;
71
72 spin_unlock(&l3mdev_lock);
73 }
74 EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister);
75
l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,struct net * net,u32 table_id)76 int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,
77 struct net *net, u32 table_id)
78 {
79 lookup_by_table_id_t lookup;
80 struct l3mdev_handler *hdlr;
81 int ifindex = -EINVAL;
82 int res;
83
84 res = l3mdev_check_type(l3type);
85 if (res)
86 return res;
87
88 hdlr = &l3mdev_handlers[l3type];
89
90 spin_lock(&l3mdev_lock);
91
92 lookup = hdlr->dev_lookup;
93 if (!lookup)
94 goto unlock;
95
96 ifindex = lookup(net, table_id);
97
98 unlock:
99 spin_unlock(&l3mdev_lock);
100
101 return ifindex;
102 }
103 EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id);
104
105 /**
106 * l3mdev_master_ifindex_rcu - get index of L3 master device
107 * @dev: targeted interface
108 */
109
l3mdev_master_ifindex_rcu(const struct net_device * dev)110 int l3mdev_master_ifindex_rcu(const struct net_device *dev)
111 {
112 int ifindex = 0;
113
114 if (!dev)
115 return 0;
116
117 if (netif_is_l3_master(dev)) {
118 ifindex = dev->ifindex;
119 } else if (netif_is_l3_slave(dev)) {
120 struct net_device *master;
121 struct net_device *_dev = (struct net_device *)dev;
122
123 /* netdev_master_upper_dev_get_rcu calls
124 * list_first_or_null_rcu to walk the upper dev list.
125 * list_first_or_null_rcu does not handle a const arg. We aren't
126 * making changes, just want the master device from that list so
127 * typecast to remove the const
128 */
129 master = netdev_master_upper_dev_get_rcu(_dev);
130 if (master)
131 ifindex = master->ifindex;
132 }
133
134 return ifindex;
135 }
136 EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
137
138 /**
139 * l3mdev_master_upper_ifindex_by_index_rcu - get index of upper l3 master
140 * device
141 * @net: network namespace for device index lookup
142 * @ifindex: targeted interface
143 */
l3mdev_master_upper_ifindex_by_index_rcu(struct net * net,int ifindex)144 int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
145 {
146 struct net_device *dev;
147
148 dev = dev_get_by_index_rcu(net, ifindex);
149 while (dev && !netif_is_l3_master(dev))
150 dev = netdev_master_upper_dev_get_rcu(dev);
151
152 return dev ? dev->ifindex : 0;
153 }
154 EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
155
156 /**
157 * l3mdev_fib_table_rcu - get FIB table id associated with an L3
158 * master interface
159 * @dev: targeted interface
160 */
161
l3mdev_fib_table_rcu(const struct net_device * dev)162 u32 l3mdev_fib_table_rcu(const struct net_device *dev)
163 {
164 u32 tb_id = 0;
165
166 if (!dev)
167 return 0;
168
169 if (netif_is_l3_master(dev)) {
170 if (dev->l3mdev_ops->l3mdev_fib_table)
171 tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
172 } else if (netif_is_l3_slave(dev)) {
173 /* Users of netdev_master_upper_dev_get_rcu need non-const,
174 * but current inet_*type functions take a const
175 */
176 struct net_device *_dev = (struct net_device *) dev;
177 const struct net_device *master;
178
179 master = netdev_master_upper_dev_get_rcu(_dev);
180 if (master &&
181 master->l3mdev_ops->l3mdev_fib_table)
182 tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
183 }
184
185 return tb_id;
186 }
187 EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
188
l3mdev_fib_table_by_index(struct net * net,int ifindex)189 u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
190 {
191 struct net_device *dev;
192 u32 tb_id = 0;
193
194 if (!ifindex)
195 return 0;
196
197 rcu_read_lock();
198
199 dev = dev_get_by_index_rcu(net, ifindex);
200 if (dev)
201 tb_id = l3mdev_fib_table_rcu(dev);
202
203 rcu_read_unlock();
204
205 return tb_id;
206 }
207 EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
208
209 /**
210 * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link
211 * local and multicast addresses
212 * @net: network namespace for device index lookup
213 * @fl6: IPv6 flow struct for lookup
214 * This function does not hold refcnt on the returned dst.
215 * Caller must hold rcu_read_lock().
216 */
217
l3mdev_link_scope_lookup(struct net * net,struct flowi6 * fl6)218 struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
219 struct flowi6 *fl6)
220 {
221 struct dst_entry *dst = NULL;
222 struct net_device *dev;
223
224 WARN_ON_ONCE(!rcu_read_lock_held());
225 if (fl6->flowi6_oif) {
226 dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
227 if (dev && netif_is_l3_slave(dev))
228 dev = netdev_master_upper_dev_get_rcu(dev);
229
230 if (dev && netif_is_l3_master(dev) &&
231 dev->l3mdev_ops->l3mdev_link_scope_lookup)
232 dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
233 }
234
235 return dst;
236 }
237 EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
238
239 /**
240 * l3mdev_fib_rule_match - Determine if flowi references an
241 * L3 master device
242 * @net: network namespace for device index lookup
243 * @fl: flow struct
244 * @arg: store the table the rule matched with here
245 */
246
l3mdev_fib_rule_match(struct net * net,struct flowi * fl,struct fib_lookup_arg * arg)247 int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
248 struct fib_lookup_arg *arg)
249 {
250 struct net_device *dev;
251 int rc = 0;
252
253 /* update flow ensures flowi_l3mdev is set when relevant */
254 if (!fl->flowi_l3mdev)
255 return 0;
256
257 rcu_read_lock();
258
259 dev = dev_get_by_index_rcu(net, fl->flowi_l3mdev);
260 if (dev && netif_is_l3_master(dev) &&
261 dev->l3mdev_ops->l3mdev_fib_table) {
262 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
263 rc = 1;
264 }
265
266 rcu_read_unlock();
267
268 return rc;
269 }
270
l3mdev_update_flow(struct net * net,struct flowi * fl)271 void l3mdev_update_flow(struct net *net, struct flowi *fl)
272 {
273 struct net_device *dev;
274
275 rcu_read_lock();
276
277 if (fl->flowi_oif) {
278 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
279 if (dev) {
280 if (!fl->flowi_l3mdev)
281 fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
282
283 /* oif set to L3mdev directs lookup to its table;
284 * reset to avoid oif match in fib_lookup
285 */
286 if (netif_is_l3_master(dev))
287 fl->flowi_oif = 0;
288 goto out;
289 }
290 }
291
292 if (fl->flowi_iif > LOOPBACK_IFINDEX && !fl->flowi_l3mdev) {
293 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
294 if (dev)
295 fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
296 }
297
298 out:
299 rcu_read_unlock();
300 }
301 EXPORT_SYMBOL_GPL(l3mdev_update_flow);
302