1 // SPDX-License-Identifier: GPL-2.0-only 2 /* (C) 1999-2001 Paul `Rusty' Russell 3 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> 4 */ 5 6 #include <linux/module.h> 7 #include <net/ip.h> 8 #include <net/tcp.h> 9 #include <net/route.h> 10 #include <net/dst.h> 11 #include <net/netfilter/ipv4/nf_reject.h> 12 #include <linux/netfilter_ipv4.h> 13 #include <linux/netfilter_bridge.h> 14 15 static struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, 16 const struct sk_buff *oldskb, 17 __u8 protocol, int ttl); 18 static void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, 19 const struct tcphdr *oth); 20 static const struct tcphdr * 21 nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, 22 struct tcphdr *_oth, int hook); 23 24 static int nf_reject_iphdr_validate(struct sk_buff *skb) 25 { 26 struct iphdr *iph; 27 u32 len; 28 29 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 30 return 0; 31 32 iph = ip_hdr(skb); 33 if (iph->ihl < 5 || iph->version != 4) 34 return 0; 35 36 len = ntohs(iph->tot_len); 37 if (skb->len < len) 38 return 0; 39 else if (len < (iph->ihl*4)) 40 return 0; 41 42 if (!pskb_may_pull(skb, iph->ihl*4)) 43 return 0; 44 45 return 1; 46 } 47 48 struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net, 49 struct sk_buff *oldskb, 50 const struct net_device *dev, 51 int hook) 52 { 53 const struct tcphdr *oth; 54 struct sk_buff *nskb; 55 struct iphdr *niph; 56 struct tcphdr _oth; 57 58 if (!nf_reject_iphdr_validate(oldskb)) 59 return NULL; 60 61 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); 62 if (!oth) 63 return NULL; 64 65 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 66 LL_MAX_HEADER, GFP_ATOMIC); 67 if (!nskb) 68 return NULL; 69 70 nskb->dev = (struct net_device *)dev; 71 72 skb_reserve(nskb, LL_MAX_HEADER); 73 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 74 READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); 75 nf_reject_ip_tcphdr_put(nskb, oldskb, oth); 76 niph->tot_len = htons(nskb->len); 77 ip_send_check(niph); 78 79 return nskb; 80 } 81 EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset); 82 83 static bool nf_skb_is_icmp_unreach(const struct sk_buff *skb) 84 { 85 const struct iphdr *iph = ip_hdr(skb); 86 u8 *tp, _type; 87 int thoff; 88 89 if (iph->protocol != IPPROTO_ICMP) 90 return false; 91 92 thoff = skb_network_offset(skb) + sizeof(*iph); 93 94 tp = skb_header_pointer(skb, 95 thoff + offsetof(struct icmphdr, type), 96 sizeof(_type), &_type); 97 98 if (!tp) 99 return false; 100 101 return *tp == ICMP_DEST_UNREACH; 102 } 103 104 struct sk_buff *nf_reject_skb_v4_unreach(struct net *net, 105 struct sk_buff *oldskb, 106 const struct net_device *dev, 107 int hook, u8 code) 108 { 109 struct sk_buff *nskb; 110 struct iphdr *niph; 111 struct icmphdr *icmph; 112 unsigned int len; 113 int dataoff; 114 __wsum csum; 115 u8 proto; 116 117 if (!nf_reject_iphdr_validate(oldskb)) 118 return NULL; 119 120 /* IP header checks: fragment. */ 121 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 122 return NULL; 123 124 /* don't reply to ICMP_DEST_UNREACH with ICMP_DEST_UNREACH. */ 125 if (nf_skb_is_icmp_unreach(oldskb)) 126 return NULL; 127 128 /* RFC says return as much as we can without exceeding 576 bytes. */ 129 len = min_t(unsigned int, 536, oldskb->len); 130 131 if (!pskb_may_pull(oldskb, len)) 132 return NULL; 133 134 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len))) 135 return NULL; 136 137 dataoff = ip_hdrlen(oldskb); 138 proto = ip_hdr(oldskb)->protocol; 139 140 if (!skb_csum_unnecessary(oldskb) && 141 nf_reject_verify_csum(oldskb, dataoff, proto) && 142 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto)) 143 return NULL; 144 145 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + 146 LL_MAX_HEADER + len, GFP_ATOMIC); 147 if (!nskb) 148 return NULL; 149 150 nskb->dev = (struct net_device *)dev; 151 152 skb_reserve(nskb, LL_MAX_HEADER); 153 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, 154 READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); 155 156 skb_reset_transport_header(nskb); 157 icmph = skb_put_zero(nskb, sizeof(struct icmphdr)); 158 icmph->type = ICMP_DEST_UNREACH; 159 icmph->code = code; 160 161 skb_put_data(nskb, skb_network_header(oldskb), len); 162 163 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); 164 icmph->checksum = csum_fold(csum); 165 166 niph->tot_len = htons(nskb->len); 167 ip_send_check(niph); 168 169 return nskb; 170 } 171 EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach); 172 173 static const struct tcphdr * 174 nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, 175 struct tcphdr *_oth, int hook) 176 { 177 const struct tcphdr *oth; 178 179 /* IP header checks: fragment. */ 180 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 181 return NULL; 182 183 if (ip_hdr(oldskb)->protocol != IPPROTO_TCP) 184 return NULL; 185 186 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 187 sizeof(struct tcphdr), _oth); 188 if (oth == NULL) 189 return NULL; 190 191 /* No RST for RST. */ 192 if (oth->rst) 193 return NULL; 194 195 /* Check checksum */ 196 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 197 return NULL; 198 199 return oth; 200 } 201 202 static struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, 203 const struct sk_buff *oldskb, 204 __u8 protocol, int ttl) 205 { 206 struct iphdr *niph, *oiph = ip_hdr(oldskb); 207 208 skb_reset_network_header(nskb); 209 niph = skb_put(nskb, sizeof(struct iphdr)); 210 niph->version = 4; 211 niph->ihl = sizeof(struct iphdr) / 4; 212 niph->tos = 0; 213 niph->id = 0; 214 niph->frag_off = htons(IP_DF); 215 niph->protocol = protocol; 216 niph->check = 0; 217 niph->saddr = oiph->daddr; 218 niph->daddr = oiph->saddr; 219 niph->ttl = ttl; 220 221 nskb->protocol = htons(ETH_P_IP); 222 223 return niph; 224 } 225 226 static void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, 227 const struct tcphdr *oth) 228 { 229 struct iphdr *niph = ip_hdr(nskb); 230 struct tcphdr *tcph; 231 232 skb_reset_transport_header(nskb); 233 tcph = skb_put_zero(nskb, sizeof(struct tcphdr)); 234 tcph->source = oth->dest; 235 tcph->dest = oth->source; 236 tcph->doff = sizeof(struct tcphdr) / 4; 237 238 if (oth->ack) { 239 tcph->seq = oth->ack_seq; 240 } else { 241 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + 242 oldskb->len - ip_hdrlen(oldskb) - 243 (oth->doff << 2)); 244 tcph->ack = 1; 245 } 246 247 tcph->rst = 1; 248 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, 249 niph->daddr, 0); 250 nskb->ip_summed = CHECKSUM_PARTIAL; 251 nskb->csum_start = (unsigned char *)tcph - nskb->head; 252 nskb->csum_offset = offsetof(struct tcphdr, check); 253 } 254 255 static int nf_reject_fill_skb_dst(struct sk_buff *skb_in) 256 { 257 struct dst_entry *dst = NULL; 258 struct flowi fl; 259 260 memset(&fl, 0, sizeof(struct flowi)); 261 fl.u.ip4.daddr = ip_hdr(skb_in)->saddr; 262 nf_ip_route(dev_net(skb_in->dev), &dst, &fl, false); 263 if (!dst) 264 return -1; 265 266 skb_dst_set(skb_in, dst); 267 return 0; 268 } 269 270 /* Send RST reply */ 271 void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb, 272 int hook) 273 { 274 const struct tcphdr *oth; 275 struct sk_buff *nskb; 276 struct tcphdr _oth; 277 278 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); 279 if (!oth) 280 return; 281 282 if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0) 283 return; 284 285 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 286 return; 287 288 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 289 LL_MAX_HEADER, GFP_ATOMIC); 290 if (!nskb) 291 return; 292 293 /* ip_route_me_harder expects skb->dst to be set */ 294 skb_dst_set_noref(nskb, skb_dst(oldskb)); 295 296 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark); 297 298 skb_reserve(nskb, LL_MAX_HEADER); 299 nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 300 ip4_dst_hoplimit(skb_dst(nskb))); 301 nf_reject_ip_tcphdr_put(nskb, oldskb, oth); 302 if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC)) 303 goto free_nskb; 304 305 /* "Never happens" */ 306 if (nskb->len > dst_mtu(skb_dst(nskb))) 307 goto free_nskb; 308 309 nf_ct_attach(nskb, oldskb); 310 nf_ct_set_closing(skb_nfct(oldskb)); 311 312 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 313 /* If we use ip_local_out for bridged traffic, the MAC source on 314 * the RST will be ours, instead of the destination's. This confuses 315 * some routers/firewalls, and they drop the packet. So we need to 316 * build the eth header using the original destination's MAC as the 317 * source, and send the RST packet directly. 318 */ 319 if (nf_bridge_info_exists(oldskb)) { 320 struct ethhdr *oeth = eth_hdr(oldskb); 321 struct iphdr *niph = ip_hdr(nskb); 322 struct net_device *br_indev; 323 324 br_indev = nf_bridge_get_physindev(oldskb, net); 325 if (!br_indev) 326 goto free_nskb; 327 328 nskb->dev = br_indev; 329 niph->tot_len = htons(nskb->len); 330 ip_send_check(niph); 331 if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), 332 oeth->h_source, oeth->h_dest, nskb->len) < 0) 333 goto free_nskb; 334 dev_queue_xmit(nskb); 335 } else 336 #endif 337 ip_local_out(net, nskb->sk, nskb); 338 339 return; 340 341 free_nskb: 342 kfree_skb(nskb); 343 } 344 EXPORT_SYMBOL_GPL(nf_send_reset); 345 346 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) 347 { 348 struct iphdr *iph = ip_hdr(skb_in); 349 int dataoff = ip_hdrlen(skb_in); 350 u8 proto = iph->protocol; 351 352 if (iph->frag_off & htons(IP_OFFSET)) 353 return; 354 355 if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0) 356 return; 357 358 if (skb_csum_unnecessary(skb_in) || 359 !nf_reject_verify_csum(skb_in, dataoff, proto)) { 360 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 361 return; 362 } 363 364 if (nf_ip_checksum(skb_in, hook, dataoff, proto) == 0) 365 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); 366 } 367 EXPORT_SYMBOL_GPL(nf_send_unreach); 368 369 MODULE_LICENSE("GPL"); 370 MODULE_DESCRIPTION("IPv4 packet rejection core"); 371