xref: /linux/net/sched/sch_frag.c (revision 1b98f357dadd6ea613a435fbaef1a5dd7b35fd21)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 #include <linux/if_vlan.h>
3 #include <net/netlink.h>
4 #include <net/sch_generic.h>
5 #include <net/pkt_sched.h>
6 #include <net/dst.h>
7 #include <net/ip.h>
8 #include <net/ip6_fib.h>
9 
10 struct sch_frag_data {
11 	unsigned long dst;
12 	struct qdisc_skb_cb cb;
13 	__be16 inner_protocol;
14 	u16 vlan_tci;
15 	__be16 vlan_proto;
16 	unsigned int l2_len;
17 	u8 l2_data[VLAN_ETH_HLEN];
18 	int (*xmit)(struct sk_buff *skb);
19 	local_lock_t bh_lock;
20 };
21 
22 static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage) = {
23 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
24 };
25 
26 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
27 {
28 	struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
29 
30 	lockdep_assert_held(&data->bh_lock);
31 	if (skb_cow_head(skb, data->l2_len) < 0) {
32 		kfree_skb(skb);
33 		return -ENOMEM;
34 	}
35 
36 	__skb_dst_copy(skb, data->dst);
37 	*qdisc_skb_cb(skb) = data->cb;
38 	skb->inner_protocol = data->inner_protocol;
39 	if (data->vlan_tci & VLAN_CFI_MASK)
40 		__vlan_hwaccel_put_tag(skb, data->vlan_proto,
41 				       data->vlan_tci & ~VLAN_CFI_MASK);
42 	else
43 		__vlan_hwaccel_clear_tag(skb);
44 
45 	/* Reconstruct the MAC header.  */
46 	skb_push(skb, data->l2_len);
47 	memcpy(skb->data, &data->l2_data, data->l2_len);
48 	skb_postpush_rcsum(skb, skb->data, data->l2_len);
49 	skb_reset_mac_header(skb);
50 
51 	return data->xmit(skb);
52 }
53 
54 static void sch_frag_prepare_frag(struct sk_buff *skb,
55 				  int (*xmit)(struct sk_buff *skb))
56 {
57 	unsigned int hlen = skb_network_offset(skb);
58 	struct sch_frag_data *data;
59 
60 	data = this_cpu_ptr(&sch_frag_data_storage);
61 	data->dst = skb->_skb_refdst;
62 	data->cb = *qdisc_skb_cb(skb);
63 	data->xmit = xmit;
64 	data->inner_protocol = skb->inner_protocol;
65 	if (skb_vlan_tag_present(skb))
66 		data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
67 	else
68 		data->vlan_tci = 0;
69 	data->vlan_proto = skb->vlan_proto;
70 	data->l2_len = hlen;
71 	memcpy(&data->l2_data, skb->data, hlen);
72 
73 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
74 	skb_pull(skb, hlen);
75 }
76 
77 static unsigned int
78 sch_frag_dst_get_mtu(const struct dst_entry *dst)
79 {
80 	return dst->dev->mtu;
81 }
82 
83 static struct dst_ops sch_frag_dst_ops = {
84 	.family = AF_UNSPEC,
85 	.mtu = sch_frag_dst_get_mtu,
86 };
87 
88 static int sch_fragment(struct net *net, struct sk_buff *skb,
89 			u16 mru, int (*xmit)(struct sk_buff *skb))
90 {
91 	int ret = -1;
92 
93 	if (skb_network_offset(skb) > VLAN_ETH_HLEN) {
94 		net_warn_ratelimited("L2 header too long to fragment\n");
95 		goto err;
96 	}
97 
98 	if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
99 		struct rtable sch_frag_rt = { 0 };
100 		unsigned long orig_dst;
101 
102 		local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
103 		sch_frag_prepare_frag(skb, xmit);
104 		dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
105 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
106 		sch_frag_rt.dst.dev = skb->dev;
107 
108 		orig_dst = skb->_skb_refdst;
109 		skb_dst_set_noref(skb, &sch_frag_rt.dst);
110 		IPCB(skb)->frag_max_size = mru;
111 
112 		ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
113 		local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
114 		refdst_drop(orig_dst);
115 	} else if (skb_protocol(skb, true) == htons(ETH_P_IPV6)) {
116 		unsigned long orig_dst;
117 		struct rt6_info sch_frag_rt;
118 
119 		local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
120 		sch_frag_prepare_frag(skb, xmit);
121 		memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
122 		dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
123 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
124 		sch_frag_rt.dst.dev = skb->dev;
125 
126 		orig_dst = skb->_skb_refdst;
127 		skb_dst_set_noref(skb, &sch_frag_rt.dst);
128 		IP6CB(skb)->frag_max_size = mru;
129 
130 		ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb,
131 					       sch_frag_xmit);
132 		local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
133 		refdst_drop(orig_dst);
134 	} else {
135 		net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",
136 				     netdev_name(skb->dev),
137 				     ntohs(skb_protocol(skb, true)), mru,
138 				     skb->dev->mtu);
139 		goto err;
140 	}
141 
142 	return ret;
143 err:
144 	kfree_skb(skb);
145 	return ret;
146 }
147 
148 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
149 {
150 	u16 mru = tc_skb_cb(skb)->mru;
151 	int err;
152 
153 	if (mru && skb->len > mru + skb->dev->hard_header_len)
154 		err = sch_fragment(dev_net(skb->dev), skb, mru, xmit);
155 	else
156 		err = xmit(skb);
157 
158 	return err;
159 }
160 EXPORT_SYMBOL_GPL(sch_frag_xmit_hook);
161