1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/init.h>
5 #include <linux/netlink.h>
6 #include <linux/netfilter.h>
7 #include <linux/spinlock.h>
8 #include <linux/netfilter/nf_conntrack_common.h>
9 #include <linux/netfilter/nf_tables.h>
10 #include <net/ip.h>
11 #include <net/inet_dscp.h>
12 #include <net/netfilter/nf_tables.h>
13 #include <net/netfilter/nf_tables_core.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_extend.h>
16 #include <net/netfilter/nf_flow_table.h>
17
nft_xmit_type(struct dst_entry * dst)18 static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
19 {
20 if (dst_xfrm(dst))
21 return FLOW_OFFLOAD_XMIT_XFRM;
22
23 return FLOW_OFFLOAD_XMIT_NEIGH;
24 }
25
nft_default_forward_path(struct nf_flow_route * route,struct dst_entry * dst_cache,enum ip_conntrack_dir dir)26 static void nft_default_forward_path(struct nf_flow_route *route,
27 struct dst_entry *dst_cache,
28 enum ip_conntrack_dir dir)
29 {
30 route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
31 route->tuple[dir].dst = dst_cache;
32 route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
33 }
34
nft_is_valid_ether_device(const struct net_device * dev)35 static bool nft_is_valid_ether_device(const struct net_device *dev)
36 {
37 if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
38 dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
39 return false;
40
41 return true;
42 }
43
nft_dev_fill_forward_path(const struct nf_flow_route * route,const struct dst_entry * dst_cache,const struct nf_conn * ct,enum ip_conntrack_dir dir,u8 * ha,struct net_device_path_stack * stack)44 static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
45 const struct dst_entry *dst_cache,
46 const struct nf_conn *ct,
47 enum ip_conntrack_dir dir, u8 *ha,
48 struct net_device_path_stack *stack)
49 {
50 const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
51 struct net_device *dev = dst_cache->dev;
52 struct neighbour *n;
53 u8 nud_state;
54
55 if (!nft_is_valid_ether_device(dev))
56 goto out;
57
58 n = dst_neigh_lookup(dst_cache, daddr);
59 if (!n)
60 return -1;
61
62 read_lock_bh(&n->lock);
63 nud_state = n->nud_state;
64 ether_addr_copy(ha, n->ha);
65 read_unlock_bh(&n->lock);
66 neigh_release(n);
67
68 if (!(nud_state & NUD_VALID))
69 return -1;
70
71 out:
72 return dev_fill_forward_path(dev, ha, stack);
73 }
74
75 struct nft_forward_info {
76 const struct net_device *indev;
77 const struct net_device *outdev;
78 struct id {
79 __u16 id;
80 __be16 proto;
81 } encap[NF_FLOW_TABLE_ENCAP_MAX];
82 u8 num_encaps;
83 struct flow_offload_tunnel tun;
84 u8 num_tuns;
85 u8 ingress_vlans;
86 u8 h_source[ETH_ALEN];
87 u8 h_dest[ETH_ALEN];
88 enum flow_offload_xmit_type xmit_type;
89 };
90
nft_dev_path_info(const struct net_device_path_stack * stack,struct nft_forward_info * info,unsigned char * ha,struct nf_flowtable * flowtable)91 static void nft_dev_path_info(const struct net_device_path_stack *stack,
92 struct nft_forward_info *info,
93 unsigned char *ha, struct nf_flowtable *flowtable)
94 {
95 const struct net_device_path *path;
96 int i;
97
98 memcpy(info->h_dest, ha, ETH_ALEN);
99
100 for (i = 0; i < stack->num_paths; i++) {
101 path = &stack->path[i];
102 switch (path->type) {
103 case DEV_PATH_ETHERNET:
104 case DEV_PATH_DSA:
105 case DEV_PATH_VLAN:
106 case DEV_PATH_PPPOE:
107 case DEV_PATH_TUN:
108 info->indev = path->dev;
109 if (is_zero_ether_addr(info->h_source))
110 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
111
112 if (path->type == DEV_PATH_ETHERNET)
113 break;
114 if (path->type == DEV_PATH_DSA) {
115 i = stack->num_paths;
116 break;
117 }
118
119 /* DEV_PATH_VLAN, DEV_PATH_PPPOE and DEV_PATH_TUN */
120 if (path->type == DEV_PATH_TUN) {
121 if (info->num_tuns) {
122 info->indev = NULL;
123 break;
124 }
125 info->tun.src_v6 = path->tun.src_v6;
126 info->tun.dst_v6 = path->tun.dst_v6;
127 info->tun.l3_proto = path->tun.l3_proto;
128 info->num_tuns++;
129 } else {
130 if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
131 info->indev = NULL;
132 break;
133 }
134 info->encap[info->num_encaps].id =
135 path->encap.id;
136 info->encap[info->num_encaps].proto =
137 path->encap.proto;
138 info->num_encaps++;
139 }
140 if (path->type == DEV_PATH_PPPOE)
141 memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
142 break;
143 case DEV_PATH_BRIDGE:
144 if (is_zero_ether_addr(info->h_source))
145 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
146
147 switch (path->bridge.vlan_mode) {
148 case DEV_PATH_BR_VLAN_UNTAG_HW:
149 info->ingress_vlans |= BIT(info->num_encaps - 1);
150 break;
151 case DEV_PATH_BR_VLAN_TAG:
152 if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
153 info->indev = NULL;
154 break;
155 }
156 info->encap[info->num_encaps].id = path->bridge.vlan_id;
157 info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
158 info->num_encaps++;
159 break;
160 case DEV_PATH_BR_VLAN_UNTAG:
161 if (WARN_ON_ONCE(info->num_encaps-- == 0)) {
162 info->indev = NULL;
163 break;
164 }
165 break;
166 case DEV_PATH_BR_VLAN_KEEP:
167 break;
168 }
169 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
170 break;
171 default:
172 info->indev = NULL;
173 break;
174 }
175 }
176 info->outdev = info->indev;
177
178 if (nf_flowtable_hw_offload(flowtable) &&
179 nft_is_valid_ether_device(info->indev))
180 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
181 }
182
nft_flowtable_find_dev(const struct net_device * dev,struct nft_flowtable * ft)183 static bool nft_flowtable_find_dev(const struct net_device *dev,
184 struct nft_flowtable *ft)
185 {
186 struct nft_hook *hook;
187 bool found = false;
188
189 list_for_each_entry_rcu(hook, &ft->hook_list, list) {
190 if (!nft_hook_find_ops_rcu(hook, dev))
191 continue;
192
193 found = true;
194 break;
195 }
196
197 return found;
198 }
199
nft_flow_tunnel_update_route(const struct nft_pktinfo * pkt,struct flow_offload_tunnel * tun,struct nf_flow_route * route,enum ip_conntrack_dir dir)200 static int nft_flow_tunnel_update_route(const struct nft_pktinfo *pkt,
201 struct flow_offload_tunnel *tun,
202 struct nf_flow_route *route,
203 enum ip_conntrack_dir dir)
204 {
205 struct dst_entry *cur_dst = route->tuple[dir].dst;
206 struct dst_entry *tun_dst = NULL;
207 struct flowi fl = {};
208
209 switch (nft_pf(pkt)) {
210 case NFPROTO_IPV4:
211 fl.u.ip4.daddr = tun->dst_v4.s_addr;
212 fl.u.ip4.saddr = tun->src_v4.s_addr;
213 fl.u.ip4.flowi4_iif = nft_in(pkt)->ifindex;
214 fl.u.ip4.flowi4_dscp = ip4h_dscp(ip_hdr(pkt->skb));
215 fl.u.ip4.flowi4_mark = pkt->skb->mark;
216 fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
217 break;
218 case NFPROTO_IPV6:
219 fl.u.ip6.daddr = tun->dst_v6;
220 fl.u.ip6.saddr = tun->src_v6;
221 fl.u.ip6.flowi6_iif = nft_in(pkt)->ifindex;
222 fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
223 fl.u.ip6.flowi6_mark = pkt->skb->mark;
224 fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
225 break;
226 }
227
228 nf_route(nft_net(pkt), &tun_dst, &fl, false, nft_pf(pkt));
229 if (!tun_dst)
230 return -ENOENT;
231
232 route->tuple[dir].dst = tun_dst;
233 dst_release(cur_dst);
234
235 return 0;
236 }
237
nft_dev_forward_path(const struct nft_pktinfo * pkt,struct nf_flow_route * route,const struct nf_conn * ct,enum ip_conntrack_dir dir,struct nft_flowtable * ft)238 static void nft_dev_forward_path(const struct nft_pktinfo *pkt,
239 struct nf_flow_route *route,
240 const struct nf_conn *ct,
241 enum ip_conntrack_dir dir,
242 struct nft_flowtable *ft)
243 {
244 const struct dst_entry *dst = route->tuple[dir].dst;
245 struct net_device_path_stack stack;
246 struct nft_forward_info info = {};
247 unsigned char ha[ETH_ALEN];
248 int i;
249
250 if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
251 nft_dev_path_info(&stack, &info, ha, &ft->data);
252
253 if (info.outdev)
254 route->tuple[dir].out.ifindex = info.outdev->ifindex;
255
256 if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
257 return;
258
259 route->tuple[!dir].in.ifindex = info.indev->ifindex;
260 for (i = 0; i < info.num_encaps; i++) {
261 route->tuple[!dir].in.encap[i].id = info.encap[i].id;
262 route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
263 }
264
265 if (info.num_tuns &&
266 !nft_flow_tunnel_update_route(pkt, &info.tun, route, dir)) {
267 route->tuple[!dir].in.tun.src_v6 = info.tun.dst_v6;
268 route->tuple[!dir].in.tun.dst_v6 = info.tun.src_v6;
269 route->tuple[!dir].in.tun.l3_proto = info.tun.l3_proto;
270 route->tuple[!dir].in.num_tuns = info.num_tuns;
271 }
272
273 route->tuple[!dir].in.num_encaps = info.num_encaps;
274 route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
275
276 if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
277 memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
278 memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
279 route->tuple[dir].xmit_type = info.xmit_type;
280 }
281 }
282
nft_flow_route(const struct nft_pktinfo * pkt,const struct nf_conn * ct,struct nf_flow_route * route,enum ip_conntrack_dir dir,struct nft_flowtable * ft)283 int nft_flow_route(const struct nft_pktinfo *pkt, const struct nf_conn *ct,
284 struct nf_flow_route *route, enum ip_conntrack_dir dir,
285 struct nft_flowtable *ft)
286 {
287 struct dst_entry *this_dst = skb_dst(pkt->skb);
288 struct dst_entry *other_dst = NULL;
289 struct flowi fl;
290
291 memset(&fl, 0, sizeof(fl));
292 switch (nft_pf(pkt)) {
293 case NFPROTO_IPV4:
294 fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
295 fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
296 fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
297 fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
298 fl.u.ip4.flowi4_dscp = ip4h_dscp(ip_hdr(pkt->skb));
299 fl.u.ip4.flowi4_mark = pkt->skb->mark;
300 fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
301 break;
302 case NFPROTO_IPV6:
303 fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
304 fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.src.u3.in6;
305 fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
306 fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
307 fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
308 fl.u.ip6.flowi6_mark = pkt->skb->mark;
309 fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
310 break;
311 }
312
313 if (!dst_hold_safe(this_dst))
314 return -ENOENT;
315
316 nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
317 if (!other_dst) {
318 dst_release(this_dst);
319 return -ENOENT;
320 }
321
322 nft_default_forward_path(route, this_dst, dir);
323 nft_default_forward_path(route, other_dst, !dir);
324
325 if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
326 nft_dev_forward_path(pkt, route, ct, dir, ft);
327 if (route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
328 nft_dev_forward_path(pkt, route, ct, !dir, ft);
329
330 return 0;
331 }
332 EXPORT_SYMBOL_GPL(nft_flow_route);
333