xref: /linux/net/sched/sch_frag.c (revision 91a4855d6c03e770e42f17c798a36a3c46e63de2)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 #include <linux/if_vlan.h>
3 #include <net/netlink.h>
4 #include <net/sch_generic.h>
5 #include <net/pkt_sched.h>
6 #include <net/dst.h>
7 #include <net/ip.h>
8 #include <net/ip6_fib.h>
9 #include <net/ip6_route.h>
10 
11 struct sch_frag_data {
12 	unsigned long dst;
13 	struct qdisc_skb_cb cb;
14 	__be16 inner_protocol;
15 	u16 vlan_tci;
16 	__be16 vlan_proto;
17 	unsigned int l2_len;
18 	u8 l2_data[VLAN_ETH_HLEN];
19 	int (*xmit)(struct sk_buff *skb);
20 	local_lock_t bh_lock;
21 };
22 
23 static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage) = {
24 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
25 };
26 
27 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
28 {
29 	struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
30 
31 	lockdep_assert_held(&data->bh_lock);
32 	if (skb_cow_head(skb, data->l2_len) < 0) {
33 		kfree_skb(skb);
34 		return -ENOMEM;
35 	}
36 
37 	__skb_dst_copy(skb, data->dst);
38 	*qdisc_skb_cb(skb) = data->cb;
39 	skb->inner_protocol = data->inner_protocol;
40 	if (data->vlan_tci & VLAN_CFI_MASK)
41 		__vlan_hwaccel_put_tag(skb, data->vlan_proto,
42 				       data->vlan_tci & ~VLAN_CFI_MASK);
43 	else
44 		__vlan_hwaccel_clear_tag(skb);
45 
46 	/* Reconstruct the MAC header.  */
47 	skb_push(skb, data->l2_len);
48 	memcpy(skb->data, &data->l2_data, data->l2_len);
49 	skb_postpush_rcsum(skb, skb->data, data->l2_len);
50 	skb_reset_mac_header(skb);
51 
52 	return data->xmit(skb);
53 }
54 
55 static void sch_frag_prepare_frag(struct sk_buff *skb,
56 				  int (*xmit)(struct sk_buff *skb))
57 {
58 	unsigned int hlen = skb_network_offset(skb);
59 	struct sch_frag_data *data;
60 
61 	data = this_cpu_ptr(&sch_frag_data_storage);
62 	data->dst = skb->_skb_refdst;
63 	data->cb = *qdisc_skb_cb(skb);
64 	data->xmit = xmit;
65 	data->inner_protocol = skb->inner_protocol;
66 	if (skb_vlan_tag_present(skb))
67 		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
68 	else
69 		data->vlan_tci = 0;
70 	data->vlan_proto = skb->vlan_proto;
71 	data->l2_len = hlen;
72 	memcpy(&data->l2_data, skb->data, hlen);
73 
74 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
75 	skb_pull(skb, hlen);
76 }
77 
78 static unsigned int
79 sch_frag_dst_get_mtu(const struct dst_entry *dst)
80 {
81 	return dst->dev->mtu;
82 }
83 
84 static struct dst_ops sch_frag_dst_ops = {
85 	.family = AF_UNSPEC,
86 	.mtu = sch_frag_dst_get_mtu,
87 };
88 
89 static int sch_fragment(struct net *net, struct sk_buff *skb,
90 			u16 mru, int (*xmit)(struct sk_buff *skb))
91 {
92 	int ret = -1;
93 
94 	if (skb_network_offset(skb) > VLAN_ETH_HLEN) {
95 		net_warn_ratelimited("L2 header too long to fragment\n");
96 		goto err;
97 	}
98 
99 	if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
100 		struct rtable sch_frag_rt = { 0 };
101 		unsigned long orig_dst;
102 
103 		local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
104 		sch_frag_prepare_frag(skb, xmit);
105 		dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
106 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
107 		sch_frag_rt.dst.dev = skb->dev;
108 
109 		orig_dst = skb->_skb_refdst;
110 		skb_dst_set_noref(skb, &sch_frag_rt.dst);
111 		IPCB(skb)->frag_max_size = mru;
112 
113 		ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
114 		local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
115 		refdst_drop(orig_dst);
116 	} else if (skb_protocol(skb, true) == htons(ETH_P_IPV6)) {
117 		unsigned long orig_dst;
118 		struct rt6_info sch_frag_rt;
119 
120 		local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
121 		sch_frag_prepare_frag(skb, xmit);
122 		memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
123 		dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
124 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
125 		sch_frag_rt.dst.dev = skb->dev;
126 
127 		orig_dst = skb->_skb_refdst;
128 		skb_dst_set_noref(skb, &sch_frag_rt.dst);
129 		IP6CB(skb)->frag_max_size = mru;
130 
131 		ret = ip6_fragment(net, skb->sk, skb, sch_frag_xmit);
132 		local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
133 		refdst_drop(orig_dst);
134 	} else {
135 		net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",
136 				     netdev_name(skb->dev),
137 				     ntohs(skb_protocol(skb, true)), mru,
138 				     skb->dev->mtu);
139 		goto err;
140 	}
141 
142 	return ret;
143 err:
144 	kfree_skb(skb);
145 	return ret;
146 }
147 
148 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
149 {
150 	u16 mru = tc_skb_cb(skb)->mru;
151 	int err;
152 
153 	if (mru && skb->len > mru + skb->dev->hard_header_len)
154 		err = sch_fragment(dev_net(skb->dev), skb, mru, xmit);
155 	else
156 		err = xmit(skb);
157 
158 	return err;
159 }
160 EXPORT_SYMBOL_GPL(sch_frag_xmit_hook);
161