1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_NETFILTER_H 3 #define __LINUX_NETFILTER_H 4 5 #include <linux/init.h> 6 #include <linux/skbuff.h> 7 #include <linux/net.h> 8 #include <linux/if.h> 9 #include <linux/in.h> 10 #include <linux/in6.h> 11 #include <linux/wait.h> 12 #include <linux/list.h> 13 #include <linux/static_key.h> 14 #include <linux/module.h> 15 #include <linux/netfilter_defs.h> 16 #include <linux/netdevice.h> 17 #include <linux/sockptr.h> 18 #include <net/net_namespace.h> 19 20 static inline int NF_DROP_GETERR(int verdict) 21 { 22 return -(verdict >> NF_VERDICT_QBITS); 23 } 24 25 static __always_inline int 26 NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err) 27 { 28 BUILD_BUG_ON(err > 0xffff); 29 30 kfree_skb_reason(skb, reason); 31 32 return ((err << 16) | NF_STOLEN); 33 } 34 35 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, 36 const union nf_inet_addr *a2) 37 { 38 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 39 const unsigned long *ul1 = (const unsigned long *)a1; 40 const unsigned long *ul2 = (const unsigned long *)a2; 41 42 return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; 43 #else 44 return a1->all[0] == a2->all[0] && 45 a1->all[1] == a2->all[1] && 46 a1->all[2] == a2->all[2] && 47 a1->all[3] == a2->all[3]; 48 #endif 49 } 50 51 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, 52 union nf_inet_addr *result, 53 const union nf_inet_addr *mask) 54 { 55 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 56 const unsigned long *ua = (const unsigned long *)a1; 57 unsigned long *ur = (unsigned long *)result; 58 const unsigned long *um = (const unsigned long *)mask; 59 60 ur[0] = ua[0] & um[0]; 61 ur[1] = ua[1] & um[1]; 62 #else 63 result->all[0] = a1->all[0] & mask->all[0]; 64 result->all[1] = a1->all[1] & mask->all[1]; 65 result->all[2] = a1->all[2] & mask->all[2]; 66 result->all[3] = a1->all[3] & mask->all[3]; 67 #endif 68 } 69 70 int netfilter_init(void); 71 72 struct sk_buff; 73 74 struct nf_hook_ops; 75 76 struct sock; 77 78 struct nf_hook_state { 79 u8 hook; 80 u8 pf; 81 struct net_device *in; 82 struct net_device *out; 83 struct sock *sk; 84 struct net *net; 85 int (*okfn)(struct net *, struct sock *, struct sk_buff *); 86 }; 87 88 typedef unsigned int nf_hookfn(void *priv, 89 struct sk_buff *skb, 90 const struct nf_hook_state *state); 91 enum nf_hook_ops_type { 92 NF_HOOK_OP_UNDEFINED, 93 NF_HOOK_OP_NF_TABLES, 94 NF_HOOK_OP_BPF, 95 }; 96 97 struct nf_hook_ops { 98 struct list_head list; 99 struct rcu_head rcu; 100 101 /* User fills in from here down. */ 102 nf_hookfn *hook; 103 struct net_device *dev; 104 void *priv; 105 u8 pf; 106 enum nf_hook_ops_type hook_ops_type:8; 107 unsigned int hooknum; 108 /* Hooks are ordered in ascending priority. */ 109 int priority; 110 }; 111 112 struct nf_hook_entry { 113 nf_hookfn *hook; 114 void *priv; 115 }; 116 117 struct nf_hook_entries_rcu_head { 118 struct rcu_head head; 119 void *allocation; 120 }; 121 122 struct nf_hook_entries { 123 u16 num_hook_entries; 124 /* padding */ 125 struct nf_hook_entry hooks[]; 126 127 /* trailer: pointers to original orig_ops of each hook, 128 * followed by rcu_head and scratch space used for freeing 129 * the structure via call_rcu. 130 * 131 * This is not part of struct nf_hook_entry since its only 132 * needed in slow path (hook register/unregister): 133 * const struct nf_hook_ops *orig_ops[] 134 * 135 * For the same reason, we store this at end -- its 136 * only needed when a hook is deleted, not during 137 * packet path processing: 138 * struct nf_hook_entries_rcu_head head 139 */ 140 }; 141 142 #ifdef CONFIG_NETFILTER 143 static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) 144 { 145 unsigned int n = e->num_hook_entries; 146 const void *hook_end; 147 148 hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ 149 150 return (struct nf_hook_ops **)hook_end; 151 } 152 153 static inline int 154 nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, 155 struct nf_hook_state *state) 156 { 157 return entry->hook(entry->priv, skb, state); 158 } 159 160 static inline void nf_hook_state_init(struct nf_hook_state *p, 161 unsigned int hook, 162 u_int8_t pf, 163 struct net_device *indev, 164 struct net_device *outdev, 165 struct sock *sk, 166 struct net *net, 167 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 168 { 169 p->hook = hook; 170 p->pf = pf; 171 p->in = indev; 172 p->out = outdev; 173 p->sk = sk; 174 p->net = net; 175 p->okfn = okfn; 176 } 177 178 179 180 struct nf_sockopt_ops { 181 struct list_head list; 182 183 u_int8_t pf; 184 185 /* Non-inclusive ranges: use 0/0/NULL to never get called. */ 186 int set_optmin; 187 int set_optmax; 188 int (*set)(struct sock *sk, int optval, sockptr_t arg, 189 unsigned int len); 190 int get_optmin; 191 int get_optmax; 192 int (*get)(struct sock *sk, int optval, void __user *user, int *len); 193 /* Use the module struct to lock set/get code in place */ 194 struct module *owner; 195 }; 196 197 /* Function to register/unregister hook points. */ 198 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); 199 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); 200 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, 201 unsigned int n); 202 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, 203 unsigned int n); 204 205 /* Functions to register get/setsockopt ranges (non-inclusive). You 206 need to check permissions yourself! */ 207 int nf_register_sockopt(struct nf_sockopt_ops *reg); 208 void nf_unregister_sockopt(struct nf_sockopt_ops *reg); 209 210 #ifdef CONFIG_JUMP_LABEL 211 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 212 #endif 213 214 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, 215 const struct nf_hook_entries *e, unsigned int i); 216 217 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, 218 const struct nf_hook_entries *e); 219 /** 220 * nf_hook - call a netfilter hook 221 * 222 * Returns 1 if the hook has allowed the packet to pass. The function 223 * okfn must be invoked by the caller in this case. Any other return 224 * value indicates the packet has been consumed by the hook. 225 */ 226 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 227 struct sock *sk, struct sk_buff *skb, 228 struct net_device *indev, struct net_device *outdev, 229 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 230 { 231 struct nf_hook_entries *hook_head = NULL; 232 int ret = 1; 233 234 #ifdef CONFIG_JUMP_LABEL 235 if (__builtin_constant_p(pf) && 236 __builtin_constant_p(hook) && 237 !static_key_false(&nf_hooks_needed[pf][hook])) 238 return 1; 239 #endif 240 241 rcu_read_lock(); 242 switch (pf) { 243 case NFPROTO_IPV4: 244 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); 245 break; 246 case NFPROTO_IPV6: 247 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); 248 break; 249 case NFPROTO_ARP: 250 #ifdef CONFIG_NETFILTER_FAMILY_ARP 251 if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) 252 break; 253 hook_head = rcu_dereference(net->nf.hooks_arp[hook]); 254 #endif 255 break; 256 case NFPROTO_BRIDGE: 257 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE 258 hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); 259 #endif 260 break; 261 default: 262 WARN_ON_ONCE(1); 263 break; 264 } 265 266 if (hook_head) { 267 struct nf_hook_state state; 268 269 nf_hook_state_init(&state, hook, pf, indev, outdev, 270 sk, net, okfn); 271 272 ret = nf_hook_slow(skb, &state, hook_head, 0); 273 } 274 rcu_read_unlock(); 275 276 return ret; 277 } 278 279 /* Activate hook; either okfn or kfree_skb called, unless a hook 280 returns NF_STOLEN (in which case, it's up to the hook to deal with 281 the consequences). 282 283 Returns -ERRNO if packet dropped. Zero means queued, stolen or 284 accepted. 285 */ 286 287 /* RR: 288 > I don't want nf_hook to return anything because people might forget 289 > about async and trust the return value to mean "packet was ok". 290 291 AK: 292 Just document it clearly, then you can expect some sense from kernel 293 coders :) 294 */ 295 296 static inline int 297 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 298 struct sk_buff *skb, struct net_device *in, struct net_device *out, 299 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 300 bool cond) 301 { 302 int ret; 303 304 if (!cond || 305 ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) 306 ret = okfn(net, sk, skb); 307 return ret; 308 } 309 310 static inline int 311 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, 312 struct net_device *in, struct net_device *out, 313 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 314 { 315 int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); 316 if (ret == 1) 317 ret = okfn(net, sk, skb); 318 return ret; 319 } 320 321 static inline void 322 NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 323 struct list_head *head, struct net_device *in, struct net_device *out, 324 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 325 { 326 struct nf_hook_entries *hook_head = NULL; 327 328 #ifdef CONFIG_JUMP_LABEL 329 if (__builtin_constant_p(pf) && 330 __builtin_constant_p(hook) && 331 !static_key_false(&nf_hooks_needed[pf][hook])) 332 return; 333 #endif 334 335 rcu_read_lock(); 336 switch (pf) { 337 case NFPROTO_IPV4: 338 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); 339 break; 340 case NFPROTO_IPV6: 341 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); 342 break; 343 default: 344 WARN_ON_ONCE(1); 345 break; 346 } 347 348 if (hook_head) { 349 struct nf_hook_state state; 350 351 nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn); 352 353 nf_hook_slow_list(head, &state, hook_head); 354 } 355 rcu_read_unlock(); 356 } 357 358 /* Call setsockopt() */ 359 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, 360 unsigned int len); 361 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 362 int *len); 363 364 struct flowi; 365 struct nf_queue_entry; 366 367 __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, 368 unsigned int dataoff, u_int8_t protocol, 369 unsigned short family); 370 371 __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, 372 unsigned int dataoff, unsigned int len, 373 u_int8_t protocol, unsigned short family); 374 int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, 375 bool strict, unsigned short family); 376 377 #include <net/flow.h> 378 379 struct nf_conn; 380 enum nf_nat_manip_type; 381 struct nlattr; 382 383 struct nf_nat_hook { 384 int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, 385 const struct nlattr *attr); 386 void (*decode_session)(struct sk_buff *skb, struct flowi *fl); 387 void (*remove_nat_bysrc)(struct nf_conn *ct); 388 }; 389 390 extern const struct nf_nat_hook __rcu *nf_nat_hook; 391 392 static inline void 393 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 394 { 395 #if IS_ENABLED(CONFIG_NF_NAT) 396 const struct nf_nat_hook *nat_hook; 397 398 rcu_read_lock(); 399 nat_hook = rcu_dereference(nf_nat_hook); 400 if (nat_hook && nat_hook->decode_session) 401 nat_hook->decode_session(skb, fl); 402 rcu_read_unlock(); 403 #endif 404 } 405 406 #else /* !CONFIG_NETFILTER */ 407 static inline int 408 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 409 struct sk_buff *skb, struct net_device *in, struct net_device *out, 410 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 411 bool cond) 412 { 413 return okfn(net, sk, skb); 414 } 415 416 static inline int 417 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 418 struct sk_buff *skb, struct net_device *in, struct net_device *out, 419 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 420 { 421 return okfn(net, sk, skb); 422 } 423 424 static inline void 425 NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 426 struct list_head *head, struct net_device *in, struct net_device *out, 427 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 428 { 429 /* nothing to do */ 430 } 431 432 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 433 struct sock *sk, struct sk_buff *skb, 434 struct net_device *indev, struct net_device *outdev, 435 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 436 { 437 return 1; 438 } 439 struct flowi; 440 static inline void 441 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 442 { 443 } 444 #endif /*CONFIG_NETFILTER*/ 445 446 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 447 #include <linux/netfilter/nf_conntrack_zones_common.h> 448 449 void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 450 void nf_ct_set_closing(struct nf_conntrack *nfct); 451 struct nf_conntrack_tuple; 452 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, 453 const struct sk_buff *skb); 454 #else 455 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 456 static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {} 457 struct nf_conntrack_tuple; 458 static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, 459 const struct sk_buff *skb) 460 { 461 return false; 462 } 463 #endif 464 465 struct nf_conn; 466 enum ip_conntrack_info; 467 468 struct nf_ct_hook { 469 int (*update)(struct net *net, struct sk_buff *skb); 470 void (*destroy)(struct nf_conntrack *); 471 bool (*get_tuple_skb)(struct nf_conntrack_tuple *, 472 const struct sk_buff *); 473 void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb); 474 void (*set_closing)(struct nf_conntrack *nfct); 475 int (*confirm)(struct sk_buff *skb); 476 u32 (*get_id)(const struct nf_conntrack *nfct); 477 }; 478 extern const struct nf_ct_hook __rcu *nf_ct_hook; 479 480 struct nlattr; 481 482 struct nfnl_ct_hook { 483 size_t (*build_size)(const struct nf_conn *ct); 484 int (*build)(struct sk_buff *skb, struct nf_conn *ct, 485 enum ip_conntrack_info ctinfo, 486 u_int16_t ct_attr, u_int16_t ct_info_attr); 487 int (*parse)(const struct nlattr *attr, struct nf_conn *ct); 488 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, 489 u32 portid, u32 report); 490 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, 491 enum ip_conntrack_info ctinfo, s32 off); 492 }; 493 extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; 494 495 struct nf_defrag_hook { 496 struct module *owner; 497 int (*enable)(struct net *net); 498 void (*disable)(struct net *net); 499 }; 500 501 extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook; 502 extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook; 503 504 /* 505 * Contains bitmask of ctnetlink event subscribers, if any. 506 * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag. 507 */ 508 extern u8 nf_ctnetlink_has_listener; 509 #endif /*__LINUX_NETFILTER_H*/ 510