xref: /linux/net/netfilter/nfnetlink_queue.c (revision 2c7e63d702f6c4209c5af833308e7fcbc7d4ab17)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is a module which is used for queueing packets and communicating with
4  * userspace via nfnetlink.
5  *
6  * (C) 2005 by Harald Welte <laforge@netfilter.org>
7  * (C) 2007 by Patrick McHardy <kaber@trash.net>
8  *
9  * Based on the old ipv4-only ip_queue.c:
10  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
11  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/notifier.h>
22 #include <linux/netdevice.h>
23 #include <linux/netfilter.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/netfilter_ipv6.h>
27 #include <linux/netfilter_bridge.h>
28 #include <linux/netfilter/nfnetlink.h>
29 #include <linux/netfilter/nfnetlink_queue.h>
30 #include <linux/netfilter/nf_conntrack_common.h>
31 #include <linux/list.h>
32 #include <linux/cgroup-defs.h>
33 #include <linux/rhashtable.h>
34 #include <linux/jhash.h>
35 #include <net/gso.h>
36 #include <net/sock.h>
37 #include <net/tcp_states.h>
38 #include <net/netfilter/nf_queue.h>
39 #include <net/netns/generic.h>
40 
41 #include <linux/atomic.h>
42 
43 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
44 #include "../bridge/br_private.h"
45 #endif
46 
47 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
48 #include <net/netfilter/nf_conntrack.h>
49 #endif
50 
51 #define NFQNL_QMAX_DEFAULT 1024
52 #define NFQNL_HASH_MIN     1024
53 #define NFQNL_HASH_MAX     1048576
54 
55 /* We're using struct nlattr which has 16bit nla_len. Note that nla_len
56  * includes the header length. Thus, the maximum packet length that we
57  * support is 65531 bytes. We send truncated packets if the specified length
58  * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
59  * attribute to detect truncation.
60  */
61 #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
62 
63 /* Composite key for packet lookup: (net, queue_num, packet_id) */
64 struct nfqnl_packet_key {
65 	possible_net_t net;
66 	u32 packet_id;
67 	u16 queue_num;
68 } __aligned(sizeof(u32));  /* jhash2 requires 32-bit alignment */
69 
70 /* Global rhashtable - one for entire system, all netns */
71 static struct rhashtable nfqnl_packet_map __read_mostly;
72 
73 /* Helper to initialize composite key */
nfqnl_init_key(struct nfqnl_packet_key * key,struct net * net,u32 packet_id,u16 queue_num)74 static inline void nfqnl_init_key(struct nfqnl_packet_key *key,
75 				  struct net *net, u32 packet_id, u16 queue_num)
76 {
77 	memset(key, 0, sizeof(*key));
78 	write_pnet(&key->net, net);
79 	key->packet_id = packet_id;
80 	key->queue_num = queue_num;
81 }
82 
83 struct nfqnl_instance {
84 	struct hlist_node hlist;		/* global list of queues */
85 	struct rcu_head rcu;
86 
87 	u32 peer_portid;
88 	unsigned int queue_maxlen;
89 	unsigned int copy_range;
90 	unsigned int queue_dropped;
91 	unsigned int queue_user_dropped;
92 
93 
94 	u_int16_t queue_num;			/* number of this queue */
95 	u_int8_t copy_mode;
96 	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
97 /*
98  * Following fields are dirtied for each queued packet,
99  * keep them in same cache line if possible.
100  */
101 	spinlock_t	lock	____cacheline_aligned_in_smp;
102 	unsigned int	queue_total;
103 	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
104 	struct list_head queue_list;		/* packets in queue */
105 };
106 
107 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
108 
109 static unsigned int nfnl_queue_net_id __read_mostly;
110 
111 #define INSTANCE_BUCKETS	16
112 struct nfnl_queue_net {
113 	spinlock_t instances_lock;
114 	struct hlist_head instance_table[INSTANCE_BUCKETS];
115 };
116 
nfnl_queue_pernet(struct net * net)117 static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
118 {
119 	return net_generic(net, nfnl_queue_net_id);
120 }
121 
instance_hashfn(u_int16_t queue_num)122 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
123 {
124 	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
125 }
126 
127 /* Extract composite key from nf_queue_entry for hashing */
nfqnl_packet_obj_hashfn(const void * data,u32 len,u32 seed)128 static u32 nfqnl_packet_obj_hashfn(const void *data, u32 len, u32 seed)
129 {
130 	const struct nf_queue_entry *entry = data;
131 	struct nfqnl_packet_key key;
132 
133 	nfqnl_init_key(&key, entry->state.net, entry->id, entry->queue_num);
134 
135 	return jhash2((u32 *)&key, sizeof(key) / sizeof(u32), seed);
136 }
137 
138 /* Compare stack-allocated key against entry */
nfqnl_packet_obj_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)139 static int nfqnl_packet_obj_cmpfn(struct rhashtable_compare_arg *arg,
140 				  const void *obj)
141 {
142 	const struct nfqnl_packet_key *key = arg->key;
143 	const struct nf_queue_entry *entry = obj;
144 
145 	return !net_eq(entry->state.net, read_pnet(&key->net)) ||
146 	       entry->queue_num != key->queue_num ||
147 	       entry->id != key->packet_id;
148 }
149 
150 static const struct rhashtable_params nfqnl_rhashtable_params = {
151 	.head_offset = offsetof(struct nf_queue_entry, hash_node),
152 	.key_len = sizeof(struct nfqnl_packet_key),
153 	.obj_hashfn = nfqnl_packet_obj_hashfn,
154 	.obj_cmpfn = nfqnl_packet_obj_cmpfn,
155 	.automatic_shrinking = true,
156 	.min_size = NFQNL_HASH_MIN,
157 	.max_size = NFQNL_HASH_MAX,
158 };
159 
160 static struct nfqnl_instance *
instance_lookup(struct nfnl_queue_net * q,u_int16_t queue_num)161 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
162 {
163 	struct hlist_head *head;
164 	struct nfqnl_instance *inst;
165 
166 	head = &q->instance_table[instance_hashfn(queue_num)];
167 	hlist_for_each_entry_rcu(inst, head, hlist) {
168 		if (inst->queue_num == queue_num)
169 			return inst;
170 	}
171 	return NULL;
172 }
173 
174 static struct nfqnl_instance *
instance_create(struct nfnl_queue_net * q,u_int16_t queue_num,u32 portid)175 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
176 {
177 	struct nfqnl_instance *inst;
178 	unsigned int h;
179 	int err;
180 
181 	inst = kzalloc_obj(*inst, GFP_KERNEL_ACCOUNT);
182 	if (!inst)
183 		return ERR_PTR(-ENOMEM);
184 
185 	inst->queue_num = queue_num;
186 	inst->peer_portid = portid;
187 	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
188 	inst->copy_range = NFQNL_MAX_COPY_RANGE;
189 	inst->copy_mode = NFQNL_COPY_NONE;
190 	spin_lock_init(&inst->lock);
191 	INIT_LIST_HEAD(&inst->queue_list);
192 
193 	spin_lock(&q->instances_lock);
194 	if (instance_lookup(q, queue_num)) {
195 		err = -EEXIST;
196 		goto out_unlock;
197 	}
198 
199 	if (!try_module_get(THIS_MODULE)) {
200 		err = -EAGAIN;
201 		goto out_unlock;
202 	}
203 
204 	h = instance_hashfn(queue_num);
205 	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
206 
207 	spin_unlock(&q->instances_lock);
208 
209 	return inst;
210 
211 out_unlock:
212 	spin_unlock(&q->instances_lock);
213 	kfree(inst);
214 	return ERR_PTR(err);
215 }
216 
217 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
218 			unsigned long data);
219 
220 static void
instance_destroy_rcu(struct rcu_head * head)221 instance_destroy_rcu(struct rcu_head *head)
222 {
223 	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
224 						   rcu);
225 
226 	rcu_read_lock();
227 	nfqnl_flush(inst, NULL, 0);
228 	rcu_read_unlock();
229 	kfree(inst);
230 	module_put(THIS_MODULE);
231 }
232 
233 static void
__instance_destroy(struct nfqnl_instance * inst)234 __instance_destroy(struct nfqnl_instance *inst)
235 {
236 	hlist_del_rcu(&inst->hlist);
237 	call_rcu(&inst->rcu, instance_destroy_rcu);
238 }
239 
240 static void
instance_destroy(struct nfnl_queue_net * q,struct nfqnl_instance * inst)241 instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
242 {
243 	spin_lock(&q->instances_lock);
244 	__instance_destroy(inst);
245 	spin_unlock(&q->instances_lock);
246 }
247 
248 static int
__enqueue_entry(struct nfqnl_instance * queue,struct nf_queue_entry * entry)249 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
250 {
251 	int err;
252 
253 	entry->queue_num = queue->queue_num;
254 
255 	err = rhashtable_insert_fast(&nfqnl_packet_map, &entry->hash_node,
256 				     nfqnl_rhashtable_params);
257 	if (unlikely(err))
258 		return err;
259 
260 	list_add_tail(&entry->list, &queue->queue_list);
261 	queue->queue_total++;
262 
263 	return 0;
264 }
265 
266 static void
__dequeue_entry(struct nfqnl_instance * queue,struct nf_queue_entry * entry)267 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
268 {
269 	rhashtable_remove_fast(&nfqnl_packet_map, &entry->hash_node,
270 			       nfqnl_rhashtable_params);
271 	list_del(&entry->list);
272 	queue->queue_total--;
273 }
274 
275 static struct nf_queue_entry *
find_dequeue_entry(struct nfqnl_instance * queue,unsigned int id,struct net * net)276 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id,
277 		   struct net *net)
278 {
279 	struct nfqnl_packet_key key;
280 	struct nf_queue_entry *entry;
281 
282 	nfqnl_init_key(&key, net, id, queue->queue_num);
283 
284 	spin_lock_bh(&queue->lock);
285 	entry = rhashtable_lookup_fast(&nfqnl_packet_map, &key,
286 				       nfqnl_rhashtable_params);
287 
288 	if (entry)
289 		__dequeue_entry(queue, entry);
290 
291 	spin_unlock_bh(&queue->lock);
292 
293 	return entry;
294 }
295 
nf_iterate(struct sk_buff * skb,struct nf_hook_state * state,const struct nf_hook_entries * hooks,unsigned int * index)296 static unsigned int nf_iterate(struct sk_buff *skb,
297 			       struct nf_hook_state *state,
298 			       const struct nf_hook_entries *hooks,
299 			       unsigned int *index)
300 {
301 	const struct nf_hook_entry *hook;
302 	unsigned int verdict, i = *index;
303 
304 	while (i < hooks->num_hook_entries) {
305 		hook = &hooks->hooks[i];
306 repeat:
307 		verdict = nf_hook_entry_hookfn(hook, skb, state);
308 		if (verdict != NF_ACCEPT) {
309 			*index = i;
310 			if (verdict != NF_REPEAT)
311 				return verdict;
312 			goto repeat;
313 		}
314 		i++;
315 	}
316 
317 	*index = i;
318 	return NF_ACCEPT;
319 }
320 
nf_hook_entries_head(const struct net * net,u8 pf,u8 hooknum)321 static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
322 {
323 	switch (pf) {
324 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
325 	case NFPROTO_BRIDGE:
326 		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
327 #endif
328 	case NFPROTO_IPV4:
329 		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
330 	case NFPROTO_IPV6:
331 		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
332 	default:
333 		WARN_ON_ONCE(1);
334 		return NULL;
335 	}
336 
337 	return NULL;
338 }
339 
nf_ip_reroute(struct sk_buff * skb,const struct nf_queue_entry * entry)340 static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
341 {
342 #ifdef CONFIG_INET
343 	const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
344 
345 	if (entry->state.hook == NF_INET_LOCAL_OUT) {
346 		const struct iphdr *iph = ip_hdr(skb);
347 
348 		if (!(iph->tos == rt_info->tos &&
349 		      skb->mark == rt_info->mark &&
350 		      iph->daddr == rt_info->daddr &&
351 		      iph->saddr == rt_info->saddr))
352 			return ip_route_me_harder(entry->state.net, entry->state.sk,
353 						  skb, RTN_UNSPEC);
354 	}
355 #endif
356 	return 0;
357 }
358 
nf_reroute(struct sk_buff * skb,struct nf_queue_entry * entry)359 static int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
360 {
361 	const struct nf_ipv6_ops *v6ops;
362 	int ret = 0;
363 
364 	switch (entry->state.pf) {
365 	case AF_INET:
366 		ret = nf_ip_reroute(skb, entry);
367 		break;
368 	case AF_INET6:
369 		v6ops = rcu_dereference(nf_ipv6_ops);
370 		if (v6ops)
371 			ret = v6ops->reroute(skb, entry);
372 		break;
373 	}
374 	return ret;
375 }
376 
377 /* caller must hold rcu read-side lock */
nf_reinject(struct nf_queue_entry * entry,unsigned int verdict)378 static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
379 {
380 	const struct nf_hook_entry *hook_entry;
381 	const struct nf_hook_entries *hooks;
382 	struct sk_buff *skb = entry->skb;
383 	const struct net *net;
384 	unsigned int i;
385 	int err;
386 	u8 pf;
387 
388 	net = entry->state.net;
389 	pf = entry->state.pf;
390 
391 	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
392 
393 	i = entry->hook_index;
394 	if (!hooks || i >= hooks->num_hook_entries) {
395 		kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP);
396 		nf_queue_entry_free(entry);
397 		return;
398 	}
399 
400 	hook_entry = &hooks->hooks[i];
401 
402 	/* Continue traversal iff userspace said ok... */
403 	if (verdict == NF_REPEAT)
404 		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
405 
406 	if (verdict == NF_ACCEPT) {
407 		if (nf_reroute(skb, entry) < 0)
408 			verdict = NF_DROP;
409 	}
410 
411 	if (verdict == NF_ACCEPT) {
412 next_hook:
413 		++i;
414 		verdict = nf_iterate(skb, &entry->state, hooks, &i);
415 	}
416 
417 	switch (verdict & NF_VERDICT_MASK) {
418 	case NF_ACCEPT:
419 	case NF_STOP:
420 		local_bh_disable();
421 		entry->state.okfn(entry->state.net, entry->state.sk, skb);
422 		local_bh_enable();
423 		break;
424 	case NF_QUEUE:
425 		err = nf_queue(skb, &entry->state, i, verdict);
426 		if (err == 1)
427 			goto next_hook;
428 		break;
429 	case NF_STOLEN:
430 		break;
431 	default:
432 		kfree_skb(skb);
433 	}
434 
435 	nf_queue_entry_free(entry);
436 }
437 
438 /* return true if the entry has an unconfirmed conntrack attached that isn't owned by us
439  * exclusively.
440  */
nf_ct_drop_unconfirmed(const struct nf_queue_entry * entry,bool * is_unconfirmed)441 static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry, bool *is_unconfirmed)
442 {
443 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
444 	struct nf_conn *ct = (void *)skb_nfct(entry->skb);
445 
446 	if (!ct || nf_ct_is_confirmed(ct))
447 		return false;
448 
449 	if (is_unconfirmed)
450 		*is_unconfirmed = true;
451 
452 	/* in some cases skb_clone() can occur after initial conntrack
453 	 * pickup, but conntrack assumes exclusive skb->_nfct ownership for
454 	 * unconfirmed entries.
455 	 *
456 	 * This happens for br_netfilter and with ip multicast routing.
457 	 * This can't be solved with serialization here because one clone
458 	 * could have been queued for local delivery or could be transmitted
459 	 * in parallel on another CPU.
460 	 */
461 	return refcount_read(&ct->ct_general.use) > 1;
462 #endif
463 	return false;
464 }
465 
nfqnl_reinject(struct nf_queue_entry * entry,unsigned int verdict)466 static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
467 {
468 	const struct nf_ct_hook *ct_hook;
469 
470 	if (verdict == NF_ACCEPT ||
471 	    verdict == NF_REPEAT ||
472 	    verdict == NF_STOP) {
473 		unsigned int ct_verdict = verdict;
474 
475 		rcu_read_lock();
476 		ct_hook = rcu_dereference(nf_ct_hook);
477 		if (ct_hook)
478 			ct_verdict = ct_hook->update(entry->state.net, entry->skb);
479 		rcu_read_unlock();
480 
481 		switch (ct_verdict & NF_VERDICT_MASK) {
482 		case NF_ACCEPT:
483 			/* follow userspace verdict, could be REPEAT */
484 			break;
485 		case NF_STOLEN:
486 			nf_queue_entry_free(entry);
487 			return;
488 		default:
489 			verdict = ct_verdict & NF_VERDICT_MASK;
490 			break;
491 		}
492 	}
493 
494 	if (verdict != NF_DROP && entry->nf_ct_is_unconfirmed) {
495 		/* If first queued segment was already reinjected then
496 		 * there is a good chance the ct entry is now confirmed.
497 		 *
498 		 * Handle the rare cases:
499 		 *  - out-of-order verdict
500 		 *  - threaded userspace reinjecting in parallel
501 		 *  - first segment was dropped
502 		 *
503 		 * In all of those cases we can't handle this packet
504 		 * because we can't be sure that another CPU won't modify
505 		 * nf_conn->ext in parallel which isn't allowed.
506 		 */
507 		if (nf_ct_drop_unconfirmed(entry, NULL))
508 			verdict = NF_DROP;
509 	}
510 
511 	nf_reinject(entry, verdict);
512 }
513 
514 static void
nfqnl_flush(struct nfqnl_instance * queue,nfqnl_cmpfn cmpfn,unsigned long data)515 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
516 {
517 	struct nf_queue_entry *entry, *next;
518 
519 	spin_lock_bh(&queue->lock);
520 	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
521 		if (!cmpfn || cmpfn(entry, data)) {
522 			__dequeue_entry(queue, entry);
523 			nfqnl_reinject(entry, NF_DROP);
524 		}
525 	}
526 	spin_unlock_bh(&queue->lock);
527 }
528 
529 static int
nfqnl_put_packet_info(struct sk_buff * nlskb,struct sk_buff * packet,bool csum_verify)530 nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
531 		      bool csum_verify)
532 {
533 	__u32 flags = 0;
534 
535 	if (packet->ip_summed == CHECKSUM_PARTIAL)
536 		flags = NFQA_SKB_CSUMNOTREADY;
537 	else if (csum_verify)
538 		flags = NFQA_SKB_CSUM_NOTVERIFIED;
539 
540 	if (skb_is_gso(packet))
541 		flags |= NFQA_SKB_GSO;
542 
543 	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
544 }
545 
nfqnl_put_sk_uidgid(struct sk_buff * skb,struct sock * sk)546 static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
547 {
548 	const struct cred *cred;
549 
550 	if (!sk_fullsock(sk))
551 		return 0;
552 
553 	read_lock_bh(&sk->sk_callback_lock);
554 	if (sk->sk_socket && sk->sk_socket->file) {
555 		cred = sk->sk_socket->file->f_cred;
556 		if (nla_put_be32(skb, NFQA_UID,
557 		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
558 			goto nla_put_failure;
559 		if (nla_put_be32(skb, NFQA_GID,
560 		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
561 			goto nla_put_failure;
562 	}
563 	read_unlock_bh(&sk->sk_callback_lock);
564 	return 0;
565 
566 nla_put_failure:
567 	read_unlock_bh(&sk->sk_callback_lock);
568 	return -1;
569 }
570 
nfqnl_put_sk_classid(struct sk_buff * skb,struct sock * sk)571 static int nfqnl_put_sk_classid(struct sk_buff *skb, struct sock *sk)
572 {
573 #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
574 	if (sk && sk_fullsock(sk)) {
575 		u32 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
576 
577 		if (classid && nla_put_be32(skb, NFQA_CGROUP_CLASSID, htonl(classid)))
578 			return -1;
579 	}
580 #endif
581 	return 0;
582 }
583 
nfqnl_get_sk_secctx(struct sk_buff * skb,struct lsm_context * ctx)584 static int nfqnl_get_sk_secctx(struct sk_buff *skb, struct lsm_context *ctx)
585 {
586 	int seclen = 0;
587 #if IS_ENABLED(CONFIG_NETWORK_SECMARK)
588 
589 	if (!skb || !sk_fullsock(skb->sk))
590 		return 0;
591 
592 	read_lock_bh(&skb->sk->sk_callback_lock);
593 
594 	if (skb->secmark)
595 		seclen = security_secid_to_secctx(skb->secmark, ctx);
596 	read_unlock_bh(&skb->sk->sk_callback_lock);
597 #endif
598 	return seclen;
599 }
600 
nfqnl_get_bridge_size(struct nf_queue_entry * entry)601 static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
602 {
603 	struct sk_buff *entskb = entry->skb;
604 	u32 nlalen = 0;
605 
606 	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
607 		return 0;
608 
609 	if (skb_vlan_tag_present(entskb))
610 		nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
611 					 nla_total_size(sizeof(__be16)));
612 
613 	if (entskb->network_header > entskb->mac_header)
614 		nlalen += nla_total_size((entskb->network_header -
615 					  entskb->mac_header));
616 
617 	return nlalen;
618 }
619 
nfqnl_put_bridge(struct nf_queue_entry * entry,struct sk_buff * skb)620 static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
621 {
622 	struct sk_buff *entskb = entry->skb;
623 
624 	if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
625 		return 0;
626 
627 	if (skb_vlan_tag_present(entskb)) {
628 		struct nlattr *nest;
629 
630 		nest = nla_nest_start(skb, NFQA_VLAN);
631 		if (!nest)
632 			goto nla_put_failure;
633 
634 		if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
635 		    nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
636 			goto nla_put_failure;
637 
638 		nla_nest_end(skb, nest);
639 	}
640 
641 	if (entskb->mac_header < entskb->network_header) {
642 		int len = (int)(entskb->network_header - entskb->mac_header);
643 
644 		if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
645 			goto nla_put_failure;
646 	}
647 
648 	return 0;
649 
650 nla_put_failure:
651 	return -1;
652 }
653 
nf_queue_checksum_help(struct sk_buff * entskb)654 static int nf_queue_checksum_help(struct sk_buff *entskb)
655 {
656 	if (skb_csum_is_sctp(entskb))
657 		return skb_crc32c_csum_help(entskb);
658 
659 	return skb_checksum_help(entskb);
660 }
661 
662 static struct sk_buff *
nfqnl_build_packet_message(struct net * net,struct nfqnl_instance * queue,struct nf_queue_entry * entry,__be32 ** packet_id_ptr)663 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
664 			   struct nf_queue_entry *entry,
665 			   __be32 **packet_id_ptr)
666 {
667 	size_t size;
668 	size_t data_len = 0, cap_len = 0;
669 	unsigned int hlen = 0;
670 	struct sk_buff *skb;
671 	struct nlattr *nla;
672 	struct nfqnl_msg_packet_hdr *pmsg;
673 	struct nlmsghdr *nlh;
674 	struct sk_buff *entskb = entry->skb;
675 	struct net_device *indev;
676 	struct net_device *outdev;
677 	struct nf_conn *ct = NULL;
678 	enum ip_conntrack_info ctinfo = 0;
679 	const struct nfnl_ct_hook *nfnl_ct;
680 	bool csum_verify;
681 	struct lsm_context ctx = { NULL, 0, 0 };
682 	int seclen = 0;
683 	ktime_t tstamp;
684 
685 	size = nlmsg_total_size(sizeof(struct nfgenmsg))
686 		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
687 		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
688 		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
689 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
690 		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
691 		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
692 #endif
693 		+ nla_total_size(sizeof(u_int32_t))	/* mark */
694 		+ nla_total_size(sizeof(u_int32_t))	/* priority */
695 		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
696 		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
697 #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
698 		+ nla_total_size(sizeof(u_int32_t))	/* classid */
699 #endif
700 		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */
701 
702 	tstamp = skb_tstamp_cond(entskb, false);
703 	if (tstamp)
704 		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
705 
706 	size += nfqnl_get_bridge_size(entry);
707 
708 	if (entry->state.hook <= NF_INET_FORWARD ||
709 	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
710 		csum_verify = !skb_csum_unnecessary(entskb);
711 	else
712 		csum_verify = false;
713 
714 	outdev = entry->state.out;
715 
716 	switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
717 	case NFQNL_COPY_META:
718 	case NFQNL_COPY_NONE:
719 		break;
720 
721 	case NFQNL_COPY_PACKET:
722 		if (!(queue->flags & NFQA_CFG_F_GSO) &&
723 		    entskb->ip_summed == CHECKSUM_PARTIAL &&
724 		    nf_queue_checksum_help(entskb))
725 			return NULL;
726 
727 		data_len = READ_ONCE(queue->copy_range);
728 		if (data_len > entskb->len)
729 			data_len = entskb->len;
730 
731 		hlen = skb_zerocopy_headlen(entskb);
732 		hlen = min_t(unsigned int, hlen, data_len);
733 		size += sizeof(struct nlattr) + hlen;
734 		cap_len = entskb->len;
735 		break;
736 	}
737 
738 	nfnl_ct = rcu_dereference(nfnl_ct_hook);
739 
740 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
741 	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
742 		if (nfnl_ct != NULL) {
743 			ct = nf_ct_get(entskb, &ctinfo);
744 			if (ct != NULL)
745 				size += nfnl_ct->build_size(ct);
746 		}
747 	}
748 #endif
749 
750 	if (queue->flags & NFQA_CFG_F_UID_GID) {
751 		size += (nla_total_size(sizeof(u_int32_t))	/* uid */
752 			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
753 	}
754 
755 	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
756 		seclen = nfqnl_get_sk_secctx(entskb, &ctx);
757 		if (seclen < 0)
758 			return NULL;
759 		if (seclen)
760 			size += nla_total_size(seclen);
761 	}
762 
763 	skb = alloc_skb(size, GFP_ATOMIC);
764 	if (!skb) {
765 		skb_tx_error(entskb);
766 		goto nlmsg_failure;
767 	}
768 
769 	nlh = nfnl_msg_put(skb, 0, 0,
770 			   nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
771 			   0, entry->state.pf, NFNETLINK_V0,
772 			   htons(queue->queue_num));
773 	if (!nlh) {
774 		skb_tx_error(entskb);
775 		kfree_skb(skb);
776 		goto nlmsg_failure;
777 	}
778 
779 	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
780 	pmsg = nla_data(nla);
781 	pmsg->hw_protocol	= entskb->protocol;
782 	pmsg->hook		= entry->state.hook;
783 	*packet_id_ptr		= &pmsg->packet_id;
784 
785 	indev = entry->state.in;
786 	if (indev) {
787 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
788 		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
789 			goto nla_put_failure;
790 #else
791 		if (entry->state.pf == PF_BRIDGE) {
792 			/* Case 1: indev is physical input device, we need to
793 			 * look for bridge group (when called from
794 			 * netfilter_bridge) */
795 			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
796 					 htonl(indev->ifindex)) ||
797 			/* this is the bridge group "brX" */
798 			/* rcu_read_lock()ed by __nf_queue */
799 			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
800 					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
801 				goto nla_put_failure;
802 		} else {
803 			int physinif;
804 
805 			/* Case 2: indev is bridge group, we need to look for
806 			 * physical device (when called from ipv4) */
807 			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
808 					 htonl(indev->ifindex)))
809 				goto nla_put_failure;
810 
811 			physinif = nf_bridge_get_physinif(entskb);
812 			if (physinif &&
813 			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
814 					 htonl(physinif)))
815 				goto nla_put_failure;
816 		}
817 #endif
818 	}
819 
820 	if (outdev) {
821 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
822 		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
823 			goto nla_put_failure;
824 #else
825 		if (entry->state.pf == PF_BRIDGE) {
826 			/* Case 1: outdev is physical output device, we need to
827 			 * look for bridge group (when called from
828 			 * netfilter_bridge) */
829 			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
830 					 htonl(outdev->ifindex)) ||
831 			/* this is the bridge group "brX" */
832 			/* rcu_read_lock()ed by __nf_queue */
833 			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
834 					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
835 				goto nla_put_failure;
836 		} else {
837 			int physoutif;
838 
839 			/* Case 2: outdev is bridge group, we need to look for
840 			 * physical output device (when called from ipv4) */
841 			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
842 					 htonl(outdev->ifindex)))
843 				goto nla_put_failure;
844 
845 			physoutif = nf_bridge_get_physoutif(entskb);
846 			if (physoutif &&
847 			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
848 					 htonl(physoutif)))
849 				goto nla_put_failure;
850 		}
851 #endif
852 	}
853 
854 	if (entskb->mark &&
855 	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
856 		goto nla_put_failure;
857 
858 	if (entskb->priority &&
859 	    nla_put_be32(skb, NFQA_PRIORITY, htonl(entskb->priority)))
860 		goto nla_put_failure;
861 
862 	if (indev && entskb->dev &&
863 	    skb_mac_header_was_set(entskb) &&
864 	    skb_mac_header_len(entskb) != 0) {
865 		struct nfqnl_msg_packet_hw phw;
866 		int len;
867 
868 		memset(&phw, 0, sizeof(phw));
869 		len = dev_parse_header(entskb, phw.hw_addr);
870 		if (len) {
871 			phw.hw_addrlen = htons(len);
872 			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
873 				goto nla_put_failure;
874 		}
875 	}
876 
877 	if (nfqnl_put_bridge(entry, skb) < 0)
878 		goto nla_put_failure;
879 
880 	if (entry->state.hook <= NF_INET_FORWARD && tstamp) {
881 		struct nfqnl_msg_packet_timestamp ts;
882 		struct timespec64 kts = ktime_to_timespec64(tstamp);
883 
884 		ts.sec = cpu_to_be64(kts.tv_sec);
885 		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
886 
887 		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
888 			goto nla_put_failure;
889 	}
890 
891 	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
892 	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
893 		goto nla_put_failure;
894 
895 	if (nfqnl_put_sk_classid(skb, entskb->sk) < 0)
896 		goto nla_put_failure;
897 
898 	if (seclen > 0 && nla_put(skb, NFQA_SECCTX, ctx.len, ctx.context))
899 		goto nla_put_failure;
900 
901 	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
902 		goto nla_put_failure;
903 
904 	if (cap_len > data_len &&
905 	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
906 		goto nla_put_failure;
907 
908 	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
909 		goto nla_put_failure;
910 
911 	if (data_len) {
912 		struct nlattr *nla;
913 
914 		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
915 			goto nla_put_failure;
916 
917 		nla = skb_put(skb, sizeof(*nla));
918 		nla->nla_type = NFQA_PAYLOAD;
919 		nla->nla_len = nla_attr_size(data_len);
920 
921 		if (skb_zerocopy(skb, entskb, data_len, hlen))
922 			goto nla_put_failure;
923 	}
924 
925 	nlh->nlmsg_len = skb->len;
926 	if (seclen >= 0)
927 		security_release_secctx(&ctx);
928 	return skb;
929 
930 nla_put_failure:
931 	skb_tx_error(entskb);
932 	kfree_skb(skb);
933 	net_err_ratelimited("nf_queue: error creating packet message\n");
934 nlmsg_failure:
935 	if (seclen >= 0)
936 		security_release_secctx(&ctx);
937 	return NULL;
938 }
939 
940 static int
__nfqnl_enqueue_packet(struct net * net,struct nfqnl_instance * queue,struct nf_queue_entry * entry)941 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
942 			struct nf_queue_entry *entry)
943 {
944 	struct sk_buff *nskb;
945 	int err = -ENOBUFS;
946 	__be32 *packet_id_ptr;
947 	int failopen = 0;
948 
949 	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
950 	if (nskb == NULL) {
951 		err = -ENOMEM;
952 		goto err_out;
953 	}
954 	spin_lock_bh(&queue->lock);
955 
956 	if (queue->queue_total >= queue->queue_maxlen)
957 		goto err_out_queue_drop;
958 
959 	entry->id = ++queue->id_sequence;
960 	*packet_id_ptr = htonl(entry->id);
961 
962 	/* Insert into hash BEFORE unicast. If failure don't send to userspace. */
963 	err = __enqueue_entry(queue, entry);
964 	if (unlikely(err))
965 		goto err_out_queue_drop;
966 
967 	/* nfnetlink_unicast will either free the nskb or add it to a socket */
968 	err = nfnetlink_unicast(nskb, net, queue->peer_portid);
969 	if (err < 0) {
970 		/* Unicast failed - remove entry we just inserted */
971 		__dequeue_entry(queue, entry);
972 
973 		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
974 			failopen = 1;
975 			err = 0;
976 		} else {
977 			queue->queue_user_dropped++;
978 		}
979 		goto err_out_unlock;
980 	}
981 
982 	spin_unlock_bh(&queue->lock);
983 	return 0;
984 
985 err_out_queue_drop:
986 	if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
987 		failopen = 1;
988 		err = 0;
989 	} else {
990 		queue->queue_dropped++;
991 
992 		if (queue->queue_total >= queue->queue_maxlen)
993 			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
994 					     queue->queue_total);
995 		else
996 			net_warn_ratelimited("nf_queue: hash insert failed: %d\n", err);
997 	}
998 	kfree_skb(nskb);
999 err_out_unlock:
1000 	spin_unlock_bh(&queue->lock);
1001 	if (failopen)
1002 		nfqnl_reinject(entry, NF_ACCEPT);
1003 err_out:
1004 	return err;
1005 }
1006 
1007 static struct nf_queue_entry *
nf_queue_entry_dup(struct nf_queue_entry * e)1008 nf_queue_entry_dup(struct nf_queue_entry *e)
1009 {
1010 	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
1011 
1012 	if (!entry)
1013 		return NULL;
1014 
1015 	if (nf_queue_entry_get_refs(entry))
1016 		return entry;
1017 
1018 	kfree(entry);
1019 	return NULL;
1020 }
1021 
1022 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1023 /* When called from bridge netfilter, skb->data must point to MAC header
1024  * before calling skb_gso_segment(). Else, original MAC header is lost
1025  * and segmented skbs will be sent to wrong destination.
1026  */
nf_bridge_adjust_skb_data(struct sk_buff * skb)1027 static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
1028 {
1029 	if (nf_bridge_info_get(skb))
1030 		__skb_push(skb, skb->network_header - skb->mac_header);
1031 }
1032 
nf_bridge_adjust_segmented_data(struct sk_buff * skb)1033 static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
1034 {
1035 	if (nf_bridge_info_get(skb))
1036 		__skb_pull(skb, skb->network_header - skb->mac_header);
1037 }
1038 #else
1039 #define nf_bridge_adjust_skb_data(s) do {} while (0)
1040 #define nf_bridge_adjust_segmented_data(s) do {} while (0)
1041 #endif
1042 
1043 static int
__nfqnl_enqueue_packet_gso(struct net * net,struct nfqnl_instance * queue,struct sk_buff * skb,struct nf_queue_entry * entry)1044 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
1045 			   struct sk_buff *skb, struct nf_queue_entry *entry)
1046 {
1047 	int ret = -ENOMEM;
1048 	struct nf_queue_entry *entry_seg;
1049 
1050 	nf_bridge_adjust_segmented_data(skb);
1051 
1052 	if (skb->next == NULL) { /* last packet, no need to copy entry */
1053 		struct sk_buff *gso_skb = entry->skb;
1054 		entry->skb = skb;
1055 		ret = __nfqnl_enqueue_packet(net, queue, entry);
1056 		if (ret)
1057 			entry->skb = gso_skb;
1058 		return ret;
1059 	}
1060 
1061 	skb_mark_not_on_list(skb);
1062 
1063 	entry_seg = nf_queue_entry_dup(entry);
1064 	if (entry_seg) {
1065 		entry_seg->skb = skb;
1066 		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
1067 		if (ret)
1068 			nf_queue_entry_free(entry_seg);
1069 	}
1070 	return ret;
1071 }
1072 
1073 static int
nfqnl_enqueue_packet(struct nf_queue_entry * entry,unsigned int queuenum)1074 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
1075 {
1076 	struct sk_buff *skb, *segs, *nskb;
1077 	bool ct_is_unconfirmed = false;
1078 	struct nfqnl_instance *queue;
1079 	unsigned int queued;
1080 	int err = -ENOBUFS;
1081 	struct net *net = entry->state.net;
1082 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1083 
1084 	/* rcu_read_lock()ed by nf_hook_thresh */
1085 	queue = instance_lookup(q, queuenum);
1086 	if (!queue)
1087 		return -ESRCH;
1088 
1089 	if (queue->copy_mode == NFQNL_COPY_NONE)
1090 		return -EINVAL;
1091 
1092 	skb = entry->skb;
1093 
1094 	switch (entry->state.pf) {
1095 	case NFPROTO_IPV4:
1096 		skb->protocol = htons(ETH_P_IP);
1097 		break;
1098 	case NFPROTO_IPV6:
1099 		skb->protocol = htons(ETH_P_IPV6);
1100 		break;
1101 	}
1102 
1103 	/* Check if someone already holds another reference to
1104 	 * unconfirmed ct.  If so, we cannot queue the skb:
1105 	 * concurrent modifications of nf_conn->ext are not
1106 	 * allowed and we can't know if another CPU isn't
1107 	 * processing the same nf_conn entry in parallel.
1108 	 */
1109 	if (nf_ct_drop_unconfirmed(entry, &ct_is_unconfirmed))
1110 		return -EINVAL;
1111 
1112 	if (!skb_is_gso(skb) || ((queue->flags & NFQA_CFG_F_GSO) && !skb_is_gso_sctp(skb)))
1113 		return __nfqnl_enqueue_packet(net, queue, entry);
1114 
1115 	nf_bridge_adjust_skb_data(skb);
1116 	segs = skb_gso_segment(skb, 0);
1117 	/* Does not use PTR_ERR to limit the number of error codes that can be
1118 	 * returned by nf_queue.  For instance, callers rely on -ESRCH to
1119 	 * mean 'ignore this hook'.
1120 	 */
1121 	if (IS_ERR_OR_NULL(segs))
1122 		goto out_err;
1123 	queued = 0;
1124 	err = 0;
1125 
1126 	skb_list_walk_safe(segs, segs, nskb) {
1127 		if (ct_is_unconfirmed && queued > 0) {
1128 			/* skb_gso_segment() increments the ct refcount.
1129 			 * This is a problem for unconfirmed (not in hash)
1130 			 * entries, those can race when reinjections happen
1131 			 * in parallel.
1132 			 *
1133 			 * Annotate this for all queued entries except the
1134 			 * first one.
1135 			 *
1136 			 * As long as the first one is reinjected first it
1137 			 * will do the confirmation for us.
1138 			 */
1139 			entry->nf_ct_is_unconfirmed = ct_is_unconfirmed;
1140 		}
1141 
1142 		if (err == 0)
1143 			err = __nfqnl_enqueue_packet_gso(net, queue,
1144 							segs, entry);
1145 		if (err == 0)
1146 			queued++;
1147 		else
1148 			kfree_skb(segs);
1149 	}
1150 
1151 	if (queued) {
1152 		if (err) /* some segments are already queued */
1153 			nf_queue_entry_free(entry);
1154 		kfree_skb(skb);
1155 		return 0;
1156 	}
1157  out_err:
1158 	nf_bridge_adjust_segmented_data(skb);
1159 	return err;
1160 }
1161 
1162 static int
nfqnl_mangle(void * data,unsigned int data_len,struct nf_queue_entry * e,int diff)1163 nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
1164 {
1165 	struct sk_buff *nskb;
1166 
1167 	if (diff < 0) {
1168 		unsigned int min_len = skb_transport_offset(e->skb);
1169 
1170 		if (data_len < min_len)
1171 			return -EINVAL;
1172 
1173 		if (pskb_trim(e->skb, data_len))
1174 			return -ENOMEM;
1175 	} else if (diff > 0) {
1176 		if (data_len > 0xFFFF)
1177 			return -EINVAL;
1178 		if (diff > skb_tailroom(e->skb)) {
1179 			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
1180 					       diff, GFP_ATOMIC);
1181 			if (!nskb)
1182 				return -ENOMEM;
1183 			kfree_skb(e->skb);
1184 			e->skb = nskb;
1185 		}
1186 		skb_put(e->skb, diff);
1187 	}
1188 	if (skb_ensure_writable(e->skb, data_len))
1189 		return -ENOMEM;
1190 	skb_copy_to_linear_data(e->skb, data, data_len);
1191 	e->skb->ip_summed = CHECKSUM_NONE;
1192 	return 0;
1193 }
1194 
1195 static int
nfqnl_set_mode(struct nfqnl_instance * queue,unsigned char mode,unsigned int range)1196 nfqnl_set_mode(struct nfqnl_instance *queue,
1197 	       unsigned char mode, unsigned int range)
1198 {
1199 	int status = 0;
1200 
1201 	spin_lock_bh(&queue->lock);
1202 	switch (mode) {
1203 	case NFQNL_COPY_NONE:
1204 	case NFQNL_COPY_META:
1205 		queue->copy_mode = mode;
1206 		queue->copy_range = 0;
1207 		break;
1208 
1209 	case NFQNL_COPY_PACKET:
1210 		queue->copy_mode = mode;
1211 		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
1212 			queue->copy_range = NFQNL_MAX_COPY_RANGE;
1213 		else
1214 			queue->copy_range = range;
1215 		break;
1216 
1217 	default:
1218 		status = -EINVAL;
1219 
1220 	}
1221 	spin_unlock_bh(&queue->lock);
1222 
1223 	return status;
1224 }
1225 
1226 static int
dev_cmp(struct nf_queue_entry * entry,unsigned long ifindex)1227 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
1228 {
1229 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1230 	int physinif, physoutif;
1231 
1232 	physinif = nf_bridge_get_physinif(entry->skb);
1233 	physoutif = nf_bridge_get_physoutif(entry->skb);
1234 
1235 	if (physinif == ifindex || physoutif == ifindex)
1236 		return 1;
1237 #endif
1238 	if (entry->state.in)
1239 		if (entry->state.in->ifindex == ifindex)
1240 			return 1;
1241 	if (entry->state.out)
1242 		if (entry->state.out->ifindex == ifindex)
1243 			return 1;
1244 
1245 	return 0;
1246 }
1247 
1248 /* drop all packets with either indev or outdev == ifindex from all queue
1249  * instances */
1250 static void
nfqnl_dev_drop(struct net * net,int ifindex)1251 nfqnl_dev_drop(struct net *net, int ifindex)
1252 {
1253 	int i;
1254 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1255 
1256 	rcu_read_lock();
1257 
1258 	for (i = 0; i < INSTANCE_BUCKETS; i++) {
1259 		struct nfqnl_instance *inst;
1260 		struct hlist_head *head = &q->instance_table[i];
1261 
1262 		hlist_for_each_entry_rcu(inst, head, hlist)
1263 			nfqnl_flush(inst, dev_cmp, ifindex);
1264 	}
1265 
1266 	rcu_read_unlock();
1267 }
1268 
1269 static int
nfqnl_rcv_dev_event(struct notifier_block * this,unsigned long event,void * ptr)1270 nfqnl_rcv_dev_event(struct notifier_block *this,
1271 		    unsigned long event, void *ptr)
1272 {
1273 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1274 
1275 	/* Drop any packets associated with the downed device */
1276 	if (event == NETDEV_DOWN)
1277 		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
1278 	return NOTIFY_DONE;
1279 }
1280 
1281 static struct notifier_block nfqnl_dev_notifier = {
1282 	.notifier_call	= nfqnl_rcv_dev_event,
1283 };
1284 
nfqnl_nf_hook_drop(struct net * net)1285 static void nfqnl_nf_hook_drop(struct net *net)
1286 {
1287 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1288 	int i;
1289 
1290 	/* This function is also called on net namespace error unwind,
1291 	 * when pernet_ops->init() failed and ->exit() functions of the
1292 	 * previous pernet_ops gets called.
1293 	 *
1294 	 * This may result in a call to nfqnl_nf_hook_drop() before
1295 	 * struct nfnl_queue_net was allocated.
1296 	 */
1297 	if (!q)
1298 		return;
1299 
1300 	for (i = 0; i < INSTANCE_BUCKETS; i++) {
1301 		struct nfqnl_instance *inst;
1302 		struct hlist_head *head = &q->instance_table[i];
1303 
1304 		hlist_for_each_entry_rcu(inst, head, hlist)
1305 			nfqnl_flush(inst, NULL, 0);
1306 	}
1307 }
1308 
1309 static int
nfqnl_rcv_nl_event(struct notifier_block * this,unsigned long event,void * ptr)1310 nfqnl_rcv_nl_event(struct notifier_block *this,
1311 		   unsigned long event, void *ptr)
1312 {
1313 	struct netlink_notify *n = ptr;
1314 	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
1315 
1316 	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
1317 		int i;
1318 
1319 		/* destroy all instances for this portid */
1320 		spin_lock(&q->instances_lock);
1321 		for (i = 0; i < INSTANCE_BUCKETS; i++) {
1322 			struct hlist_node *t2;
1323 			struct nfqnl_instance *inst;
1324 			struct hlist_head *head = &q->instance_table[i];
1325 
1326 			hlist_for_each_entry_safe(inst, t2, head, hlist) {
1327 				if (n->portid == inst->peer_portid)
1328 					__instance_destroy(inst);
1329 			}
1330 		}
1331 		spin_unlock(&q->instances_lock);
1332 	}
1333 	return NOTIFY_DONE;
1334 }
1335 
1336 static struct notifier_block nfqnl_rtnl_notifier = {
1337 	.notifier_call	= nfqnl_rcv_nl_event,
1338 };
1339 
1340 static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
1341 	[NFQA_VLAN_TCI]		= { .type = NLA_U16},
1342 	[NFQA_VLAN_PROTO]	= { .type = NLA_U16},
1343 };
1344 
1345 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
1346 	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1347 	[NFQA_MARK]		= { .type = NLA_U32 },
1348 	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
1349 	[NFQA_CT]		= { .type = NLA_UNSPEC },
1350 	[NFQA_EXP]		= { .type = NLA_UNSPEC },
1351 	[NFQA_VLAN]		= { .type = NLA_NESTED },
1352 	[NFQA_PRIORITY]		= { .type = NLA_U32 },
1353 };
1354 
1355 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
1356 	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
1357 	[NFQA_MARK]		= { .type = NLA_U32 },
1358 	[NFQA_PRIORITY]		= { .type = NLA_U32 },
1359 };
1360 
1361 static struct nfqnl_instance *
verdict_instance_lookup(struct nfnl_queue_net * q,u16 queue_num,u32 nlportid)1362 verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
1363 {
1364 	struct nfqnl_instance *queue;
1365 
1366 	queue = instance_lookup(q, queue_num);
1367 	if (!queue)
1368 		return ERR_PTR(-ENODEV);
1369 
1370 	if (queue->peer_portid != nlportid)
1371 		return ERR_PTR(-EPERM);
1372 
1373 	return queue;
1374 }
1375 
1376 static struct nfqnl_msg_verdict_hdr*
verdicthdr_get(const struct nlattr * const nfqa[])1377 verdicthdr_get(const struct nlattr * const nfqa[])
1378 {
1379 	struct nfqnl_msg_verdict_hdr *vhdr;
1380 	unsigned int verdict;
1381 
1382 	if (!nfqa[NFQA_VERDICT_HDR])
1383 		return NULL;
1384 
1385 	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
1386 	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
1387 	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
1388 		return NULL;
1389 	return vhdr;
1390 }
1391 
nfq_id_after(unsigned int id,unsigned int max)1392 static int nfq_id_after(unsigned int id, unsigned int max)
1393 {
1394 	return (int)(id - max) > 0;
1395 }
1396 
nfqnl_recv_verdict_batch(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const nfqa[])1397 static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
1398 				    const struct nfnl_info *info,
1399 				    const struct nlattr * const nfqa[])
1400 {
1401 	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1402 	u16 queue_num = ntohs(info->nfmsg->res_id);
1403 	struct nf_queue_entry *entry, *tmp;
1404 	struct nfqnl_msg_verdict_hdr *vhdr;
1405 	struct nfqnl_instance *queue;
1406 	unsigned int verdict, maxid;
1407 	LIST_HEAD(batch_list);
1408 
1409 	queue = verdict_instance_lookup(q, queue_num,
1410 					NETLINK_CB(skb).portid);
1411 	if (IS_ERR(queue))
1412 		return PTR_ERR(queue);
1413 
1414 	vhdr = verdicthdr_get(nfqa);
1415 	if (!vhdr)
1416 		return -EINVAL;
1417 
1418 	verdict = ntohl(vhdr->verdict);
1419 	maxid = ntohl(vhdr->id);
1420 
1421 	spin_lock_bh(&queue->lock);
1422 
1423 	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
1424 		if (nfq_id_after(entry->id, maxid))
1425 			break;
1426 		__dequeue_entry(queue, entry);
1427 		list_add_tail(&entry->list, &batch_list);
1428 	}
1429 
1430 	spin_unlock_bh(&queue->lock);
1431 
1432 	if (list_empty(&batch_list))
1433 		return -ENOENT;
1434 
1435 	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
1436 		if (nfqa[NFQA_MARK])
1437 			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1438 
1439 		if (nfqa[NFQA_PRIORITY])
1440 			entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
1441 
1442 		nfqnl_reinject(entry, verdict);
1443 	}
1444 	return 0;
1445 }
1446 
nfqnl_ct_parse(const struct nfnl_ct_hook * nfnl_ct,const struct nlmsghdr * nlh,const struct nlattr * const nfqa[],struct nf_queue_entry * entry,enum ip_conntrack_info * ctinfo)1447 static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct,
1448 				      const struct nlmsghdr *nlh,
1449 				      const struct nlattr * const nfqa[],
1450 				      struct nf_queue_entry *entry,
1451 				      enum ip_conntrack_info *ctinfo)
1452 {
1453 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1454 	struct nf_conn *ct;
1455 
1456 	ct = nf_ct_get(entry->skb, ctinfo);
1457 	if (ct == NULL)
1458 		return NULL;
1459 
1460 	if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
1461 		return NULL;
1462 
1463 	if (nfqa[NFQA_EXP])
1464 		nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
1465 				      NETLINK_CB(entry->skb).portid,
1466 				      nlmsg_report(nlh));
1467 	return ct;
1468 #else
1469 	return NULL;
1470 #endif
1471 }
1472 
nfqa_parse_bridge(struct nf_queue_entry * entry,const struct nlattr * const nfqa[])1473 static int nfqa_parse_bridge(struct nf_queue_entry *entry,
1474 			     const struct nlattr * const nfqa[])
1475 {
1476 	if (nfqa[NFQA_VLAN]) {
1477 		struct nlattr *tb[NFQA_VLAN_MAX + 1];
1478 		int err;
1479 
1480 		err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
1481 						  nfqa[NFQA_VLAN],
1482 						  nfqa_vlan_policy, NULL);
1483 		if (err < 0)
1484 			return err;
1485 
1486 		if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
1487 			return -EINVAL;
1488 
1489 		__vlan_hwaccel_put_tag(entry->skb,
1490 			nla_get_be16(tb[NFQA_VLAN_PROTO]),
1491 			ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
1492 	}
1493 
1494 	if (nfqa[NFQA_L2HDR]) {
1495 		int mac_header_len = entry->skb->network_header -
1496 			entry->skb->mac_header;
1497 
1498 		if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
1499 			return -EINVAL;
1500 		else if (mac_header_len > 0)
1501 			memcpy(skb_mac_header(entry->skb),
1502 			       nla_data(nfqa[NFQA_L2HDR]),
1503 			       mac_header_len);
1504 	}
1505 
1506 	return 0;
1507 }
1508 
nfqnl_recv_verdict(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const nfqa[])1509 static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
1510 			      const struct nlattr * const nfqa[])
1511 {
1512 	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1513 	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
1514 	const struct nfnl_ct_hook *nfnl_ct;
1515 	struct nfqnl_msg_verdict_hdr *vhdr;
1516 	enum ip_conntrack_info ctinfo;
1517 	struct nfqnl_instance *queue;
1518 	struct nf_queue_entry *entry;
1519 	struct nf_conn *ct = NULL;
1520 	unsigned int verdict;
1521 	int err;
1522 
1523 	queue = verdict_instance_lookup(q, queue_num,
1524 					NETLINK_CB(skb).portid);
1525 	if (IS_ERR(queue))
1526 		return PTR_ERR(queue);
1527 
1528 	vhdr = verdicthdr_get(nfqa);
1529 	if (!vhdr)
1530 		return -EINVAL;
1531 
1532 	verdict = ntohl(vhdr->verdict);
1533 
1534 	entry = find_dequeue_entry(queue, ntohl(vhdr->id), info->net);
1535 	if (entry == NULL)
1536 		return -ENOENT;
1537 
1538 	/* rcu lock already held from nfnl->call_rcu. */
1539 	nfnl_ct = rcu_dereference(nfnl_ct_hook);
1540 
1541 	if (nfqa[NFQA_CT]) {
1542 		if (nfnl_ct != NULL)
1543 			ct = nfqnl_ct_parse(nfnl_ct, info->nlh, nfqa, entry,
1544 					    &ctinfo);
1545 	}
1546 
1547 	if (entry->state.pf == PF_BRIDGE) {
1548 		err = nfqa_parse_bridge(entry, nfqa);
1549 		if (err < 0) {
1550 			nfqnl_reinject(entry, NF_DROP);
1551 			return err;
1552 		}
1553 	}
1554 
1555 	if (nfqa[NFQA_PAYLOAD]) {
1556 		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1557 		int diff = payload_len - entry->skb->len;
1558 
1559 		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
1560 				 payload_len, entry, diff) < 0)
1561 			verdict = NF_DROP;
1562 
1563 		if (ct && diff)
1564 			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
1565 	}
1566 
1567 	if (nfqa[NFQA_MARK])
1568 		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1569 
1570 	if (nfqa[NFQA_PRIORITY])
1571 		entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY]));
1572 
1573 	nfqnl_reinject(entry, verdict);
1574 	return 0;
1575 }
1576 
nfqnl_recv_unsupp(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const cda[])1577 static int nfqnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info,
1578 			     const struct nlattr * const cda[])
1579 {
1580 	return -ENOTSUPP;
1581 }
1582 
1583 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1584 	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
1585 	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
1586 	[NFQA_CFG_QUEUE_MAXLEN]	= { .type = NLA_U32 },
1587 	[NFQA_CFG_MASK]		= { .type = NLA_U32 },
1588 	[NFQA_CFG_FLAGS]	= { .type = NLA_U32 },
1589 };
1590 
1591 static const struct nf_queue_handler nfqh = {
1592 	.outfn		= nfqnl_enqueue_packet,
1593 	.nf_hook_drop	= nfqnl_nf_hook_drop,
1594 };
1595 
nfqnl_recv_config(struct sk_buff * skb,const struct nfnl_info * info,const struct nlattr * const nfqa[])1596 static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info,
1597 			     const struct nlattr * const nfqa[])
1598 {
1599 	struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
1600 	u_int16_t queue_num = ntohs(info->nfmsg->res_id);
1601 	struct nfqnl_msg_config_cmd *cmd = NULL;
1602 	struct nfqnl_instance *queue;
1603 	__u32 flags = 0, mask = 0;
1604 
1605 	WARN_ON_ONCE(!lockdep_nfnl_is_held(NFNL_SUBSYS_QUEUE));
1606 
1607 	if (nfqa[NFQA_CFG_CMD]) {
1608 		cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1609 
1610 		/* Obsolete commands without queue context */
1611 		switch (cmd->command) {
1612 		case NFQNL_CFG_CMD_PF_BIND: return 0;
1613 		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1614 		}
1615 	}
1616 
1617 	/* Check if we support these flags in first place, dependencies should
1618 	 * be there too not to break atomicity.
1619 	 */
1620 	if (nfqa[NFQA_CFG_FLAGS]) {
1621 		if (!nfqa[NFQA_CFG_MASK]) {
1622 			/* A mask is needed to specify which flags are being
1623 			 * changed.
1624 			 */
1625 			return -EINVAL;
1626 		}
1627 
1628 		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1629 		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1630 
1631 		if (flags >= NFQA_CFG_F_MAX)
1632 			return -EOPNOTSUPP;
1633 
1634 #if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1635 		if (flags & mask & NFQA_CFG_F_SECCTX)
1636 			return -EOPNOTSUPP;
1637 #endif
1638 		if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
1639 		    !rcu_access_pointer(nfnl_ct_hook)) {
1640 #ifdef CONFIG_MODULES
1641 			nfnl_unlock(NFNL_SUBSYS_QUEUE);
1642 			request_module("ip_conntrack_netlink");
1643 			nfnl_lock(NFNL_SUBSYS_QUEUE);
1644 			if (rcu_access_pointer(nfnl_ct_hook))
1645 				return -EAGAIN;
1646 #endif
1647 			return -EOPNOTSUPP;
1648 		}
1649 	}
1650 
1651 	/* Lookup queue under RCU. After peer_portid check (or for new queue
1652 	 * in BIND case), the queue is owned by the socket sending this message.
1653 	 * A socket cannot simultaneously send a message and close, so while
1654 	 * processing this CONFIG message, nfqnl_rcv_nl_event() (triggered by
1655 	 * socket close) cannot destroy this queue. Safe to use without RCU.
1656 	 */
1657 	rcu_read_lock();
1658 	queue = instance_lookup(q, queue_num);
1659 	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1660 		rcu_read_unlock();
1661 		return -EPERM;
1662 	}
1663 	rcu_read_unlock();
1664 
1665 	if (cmd != NULL) {
1666 		switch (cmd->command) {
1667 		case NFQNL_CFG_CMD_BIND:
1668 			if (queue)
1669 				return -EBUSY;
1670 			queue = instance_create(q, queue_num, NETLINK_CB(skb).portid);
1671 			if (IS_ERR(queue))
1672 				return PTR_ERR(queue);
1673 			break;
1674 		case NFQNL_CFG_CMD_UNBIND:
1675 			if (!queue)
1676 				return -ENODEV;
1677 			instance_destroy(q, queue);
1678 			return 0;
1679 		case NFQNL_CFG_CMD_PF_BIND:
1680 		case NFQNL_CFG_CMD_PF_UNBIND:
1681 			break;
1682 		default:
1683 			return -EOPNOTSUPP;
1684 		}
1685 	}
1686 
1687 	if (!queue)
1688 		return -ENODEV;
1689 
1690 	if (nfqa[NFQA_CFG_PARAMS]) {
1691 		struct nfqnl_msg_config_params *params =
1692 			nla_data(nfqa[NFQA_CFG_PARAMS]);
1693 
1694 		nfqnl_set_mode(queue, params->copy_mode,
1695 				ntohl(params->copy_range));
1696 	}
1697 
1698 	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1699 		__be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1700 
1701 		spin_lock_bh(&queue->lock);
1702 		queue->queue_maxlen = ntohl(*queue_maxlen);
1703 		spin_unlock_bh(&queue->lock);
1704 	}
1705 
1706 	if (nfqa[NFQA_CFG_FLAGS]) {
1707 		spin_lock_bh(&queue->lock);
1708 		queue->flags &= ~mask;
1709 		queue->flags |= flags & mask;
1710 		spin_unlock_bh(&queue->lock);
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1717 	[NFQNL_MSG_PACKET]	= {
1718 		.call		= nfqnl_recv_unsupp,
1719 		.type		= NFNL_CB_RCU,
1720 		.attr_count	= NFQA_MAX,
1721 	},
1722 	[NFQNL_MSG_VERDICT]	= {
1723 		.call		= nfqnl_recv_verdict,
1724 		.type		= NFNL_CB_RCU,
1725 		.attr_count	= NFQA_MAX,
1726 		.policy		= nfqa_verdict_policy
1727 	},
1728 	[NFQNL_MSG_CONFIG]	= {
1729 		.call		= nfqnl_recv_config,
1730 		.type		= NFNL_CB_MUTEX,
1731 		.attr_count	= NFQA_CFG_MAX,
1732 		.policy		= nfqa_cfg_policy
1733 	},
1734 	[NFQNL_MSG_VERDICT_BATCH] = {
1735 		.call		= nfqnl_recv_verdict_batch,
1736 		.type		= NFNL_CB_RCU,
1737 		.attr_count	= NFQA_MAX,
1738 		.policy		= nfqa_verdict_batch_policy
1739 	},
1740 };
1741 
1742 static const struct nfnetlink_subsystem nfqnl_subsys = {
1743 	.name		= "nf_queue",
1744 	.subsys_id	= NFNL_SUBSYS_QUEUE,
1745 	.cb_count	= NFQNL_MSG_MAX,
1746 	.cb		= nfqnl_cb,
1747 };
1748 
1749 #ifdef CONFIG_PROC_FS
1750 struct iter_state {
1751 	struct seq_net_private p;
1752 	unsigned int bucket;
1753 };
1754 
get_first(struct seq_file * seq)1755 static struct hlist_node *get_first(struct seq_file *seq)
1756 {
1757 	struct iter_state *st = seq->private;
1758 	struct net *net;
1759 	struct nfnl_queue_net *q;
1760 
1761 	if (!st)
1762 		return NULL;
1763 
1764 	net = seq_file_net(seq);
1765 	q = nfnl_queue_pernet(net);
1766 	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1767 		if (!hlist_empty(&q->instance_table[st->bucket]))
1768 			return q->instance_table[st->bucket].first;
1769 	}
1770 	return NULL;
1771 }
1772 
get_next(struct seq_file * seq,struct hlist_node * h)1773 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1774 {
1775 	struct iter_state *st = seq->private;
1776 	struct net *net = seq_file_net(seq);
1777 
1778 	h = h->next;
1779 	while (!h) {
1780 		struct nfnl_queue_net *q;
1781 
1782 		if (++st->bucket >= INSTANCE_BUCKETS)
1783 			return NULL;
1784 
1785 		q = nfnl_queue_pernet(net);
1786 		h = q->instance_table[st->bucket].first;
1787 	}
1788 	return h;
1789 }
1790 
get_idx(struct seq_file * seq,loff_t pos)1791 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1792 {
1793 	struct hlist_node *head;
1794 	head = get_first(seq);
1795 
1796 	if (head)
1797 		while (pos && (head = get_next(seq, head)))
1798 			pos--;
1799 	return pos ? NULL : head;
1800 }
1801 
seq_start(struct seq_file * s,loff_t * pos)1802 static void *seq_start(struct seq_file *s, loff_t *pos)
1803 	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1804 {
1805 	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1806 	return get_idx(s, *pos);
1807 }
1808 
seq_next(struct seq_file * s,void * v,loff_t * pos)1809 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1810 {
1811 	(*pos)++;
1812 	return get_next(s, v);
1813 }
1814 
seq_stop(struct seq_file * s,void * v)1815 static void seq_stop(struct seq_file *s, void *v)
1816 	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1817 {
1818 	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1819 }
1820 
seq_show(struct seq_file * s,void * v)1821 static int seq_show(struct seq_file *s, void *v)
1822 {
1823 	const struct nfqnl_instance *inst = v;
1824 
1825 	seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1826 		   inst->queue_num,
1827 		   inst->peer_portid, inst->queue_total,
1828 		   inst->copy_mode, inst->copy_range,
1829 		   inst->queue_dropped, inst->queue_user_dropped,
1830 		   inst->id_sequence, 1);
1831 	return 0;
1832 }
1833 
1834 static const struct seq_operations nfqnl_seq_ops = {
1835 	.start	= seq_start,
1836 	.next	= seq_next,
1837 	.stop	= seq_stop,
1838 	.show	= seq_show,
1839 };
1840 #endif /* PROC_FS */
1841 
nfnl_queue_net_init(struct net * net)1842 static int __net_init nfnl_queue_net_init(struct net *net)
1843 {
1844 	unsigned int i;
1845 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1846 
1847 	for (i = 0; i < INSTANCE_BUCKETS; i++)
1848 		INIT_HLIST_HEAD(&q->instance_table[i]);
1849 
1850 	spin_lock_init(&q->instances_lock);
1851 
1852 #ifdef CONFIG_PROC_FS
1853 	if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter,
1854 			&nfqnl_seq_ops, sizeof(struct iter_state)))
1855 		return -ENOMEM;
1856 #endif
1857 	return 0;
1858 }
1859 
nfnl_queue_net_exit(struct net * net)1860 static void __net_exit nfnl_queue_net_exit(struct net *net)
1861 {
1862 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1863 	unsigned int i;
1864 
1865 #ifdef CONFIG_PROC_FS
1866 	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1867 #endif
1868 	for (i = 0; i < INSTANCE_BUCKETS; i++)
1869 		WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
1870 }
1871 
1872 static struct pernet_operations nfnl_queue_net_ops = {
1873 	.init		= nfnl_queue_net_init,
1874 	.exit		= nfnl_queue_net_exit,
1875 	.id		= &nfnl_queue_net_id,
1876 	.size		= sizeof(struct nfnl_queue_net),
1877 };
1878 
nfnetlink_queue_init(void)1879 static int __init nfnetlink_queue_init(void)
1880 {
1881 	int status;
1882 
1883 	status = rhashtable_init(&nfqnl_packet_map, &nfqnl_rhashtable_params);
1884 	if (status < 0)
1885 		return status;
1886 
1887 	status = register_pernet_subsys(&nfnl_queue_net_ops);
1888 	if (status < 0) {
1889 		pr_err("failed to register pernet ops\n");
1890 		goto cleanup_rhashtable;
1891 	}
1892 
1893 	netlink_register_notifier(&nfqnl_rtnl_notifier);
1894 	status = nfnetlink_subsys_register(&nfqnl_subsys);
1895 	if (status < 0) {
1896 		pr_err("failed to create netlink socket\n");
1897 		goto cleanup_netlink_notifier;
1898 	}
1899 
1900 	status = register_netdevice_notifier(&nfqnl_dev_notifier);
1901 	if (status < 0) {
1902 		pr_err("failed to register netdevice notifier\n");
1903 		goto cleanup_netlink_subsys;
1904 	}
1905 
1906 	nf_register_queue_handler(&nfqh);
1907 
1908 	return status;
1909 
1910 cleanup_netlink_subsys:
1911 	nfnetlink_subsys_unregister(&nfqnl_subsys);
1912 cleanup_netlink_notifier:
1913 	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1914 	unregister_pernet_subsys(&nfnl_queue_net_ops);
1915 cleanup_rhashtable:
1916 	rhashtable_destroy(&nfqnl_packet_map);
1917 	return status;
1918 }
1919 
nfnetlink_queue_fini(void)1920 static void __exit nfnetlink_queue_fini(void)
1921 {
1922 	nf_unregister_queue_handler();
1923 	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1924 	nfnetlink_subsys_unregister(&nfqnl_subsys);
1925 	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1926 	unregister_pernet_subsys(&nfnl_queue_net_ops);
1927 
1928 	rhashtable_destroy(&nfqnl_packet_map);
1929 
1930 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1931 }
1932 
1933 MODULE_DESCRIPTION("netfilter packet queue handler");
1934 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1935 MODULE_LICENSE("GPL");
1936 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1937 
1938 module_init(nfnetlink_queue_init);
1939 module_exit(nfnetlink_queue_fini);
1940