xref: /linux/include/net/inet_frag.h (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 #ifndef __NET_FRAG_H__
2 #define __NET_FRAG_H__
3 
4 #include <linux/percpu_counter.h>
5 
6 struct netns_frags {
7 	int			nqueues;
8 	struct list_head	lru_list;
9 	spinlock_t		lru_lock;
10 
11 	/* The percpu_counter "mem" need to be cacheline aligned.
12 	 *  mem.count must not share cacheline with other writers
13 	 */
14 	struct percpu_counter   mem ____cacheline_aligned_in_smp;
15 
16 	/* sysctls */
17 	int			timeout;
18 	int			high_thresh;
19 	int			low_thresh;
20 };
21 
22 struct inet_frag_queue {
23 	spinlock_t		lock;
24 	struct timer_list	timer;      /* when will this queue expire? */
25 	struct list_head	lru_list;   /* lru list member */
26 	struct hlist_node	list;
27 	atomic_t		refcnt;
28 	struct sk_buff		*fragments; /* list of received fragments */
29 	struct sk_buff		*fragments_tail;
30 	ktime_t			stamp;
31 	int			len;        /* total length of orig datagram */
32 	int			meat;
33 	__u8			last_in;    /* first/last segment arrived? */
34 
35 #define INET_FRAG_COMPLETE	4
36 #define INET_FRAG_FIRST_IN	2
37 #define INET_FRAG_LAST_IN	1
38 
39 	u16			max_size;
40 
41 	struct netns_frags	*net;
42 };
43 
44 #define INETFRAGS_HASHSZ	1024
45 
46 /* averaged:
47  * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48  *	       rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
49  *	       struct frag_queue))
50  */
51 #define INETFRAGS_MAXDEPTH		128
52 
53 struct inet_frag_bucket {
54 	struct hlist_head	chain;
55 	spinlock_t		chain_lock;
56 };
57 
58 struct inet_frags {
59 	struct inet_frag_bucket	hash[INETFRAGS_HASHSZ];
60 	/* This rwlock is a global lock (seperate per IPv4, IPv6 and
61 	 * netfilter). Important to keep this on a seperate cacheline.
62 	 * Its primarily a rebuild protection rwlock.
63 	 */
64 	rwlock_t		lock ____cacheline_aligned_in_smp;
65 	int			secret_interval;
66 	struct timer_list	secret_timer;
67 
68 	/* The first call to hashfn is responsible to initialize
69 	 * rnd. This is best done with net_get_random_once.
70 	 */
71 	u32			rnd;
72 	int			qsize;
73 
74 	unsigned int		(*hashfn)(struct inet_frag_queue *);
75 	bool			(*match)(struct inet_frag_queue *q, void *arg);
76 	void			(*constructor)(struct inet_frag_queue *q,
77 						void *arg);
78 	void			(*destructor)(struct inet_frag_queue *);
79 	void			(*skb_free)(struct sk_buff *);
80 	void			(*frag_expire)(unsigned long data);
81 };
82 
83 void inet_frags_init(struct inet_frags *);
84 void inet_frags_fini(struct inet_frags *);
85 
86 void inet_frags_init_net(struct netns_frags *nf);
87 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
88 
89 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
90 void inet_frag_destroy(struct inet_frag_queue *q,
91 				struct inet_frags *f, int *work);
92 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
93 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
94 		struct inet_frags *f, void *key, unsigned int hash)
95 	__releases(&f->lock);
96 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
97 				   const char *prefix);
98 
99 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
100 {
101 	if (atomic_dec_and_test(&q->refcnt))
102 		inet_frag_destroy(q, f, NULL);
103 }
104 
105 /* Memory Tracking Functions. */
106 
107 /* The default percpu_counter batch size is not big enough to scale to
108  * fragmentation mem acct sizes.
109  * The mem size of a 64K fragment is approx:
110  *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
111  */
112 static unsigned int frag_percpu_counter_batch = 130000;
113 
114 static inline int frag_mem_limit(struct netns_frags *nf)
115 {
116 	return percpu_counter_read(&nf->mem);
117 }
118 
119 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
120 {
121 	__percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
122 }
123 
124 static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
125 {
126 	__percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
127 }
128 
129 static inline void init_frag_mem_limit(struct netns_frags *nf)
130 {
131 	percpu_counter_init(&nf->mem, 0);
132 }
133 
134 static inline int sum_frag_mem_limit(struct netns_frags *nf)
135 {
136 	int res;
137 
138 	local_bh_disable();
139 	res = percpu_counter_sum_positive(&nf->mem);
140 	local_bh_enable();
141 
142 	return res;
143 }
144 
145 static inline void inet_frag_lru_move(struct inet_frag_queue *q)
146 {
147 	spin_lock(&q->net->lru_lock);
148 	if (!list_empty(&q->lru_list))
149 		list_move_tail(&q->lru_list, &q->net->lru_list);
150 	spin_unlock(&q->net->lru_lock);
151 }
152 
153 static inline void inet_frag_lru_del(struct inet_frag_queue *q)
154 {
155 	spin_lock(&q->net->lru_lock);
156 	list_del_init(&q->lru_list);
157 	q->net->nqueues--;
158 	spin_unlock(&q->net->lru_lock);
159 }
160 
161 static inline void inet_frag_lru_add(struct netns_frags *nf,
162 				     struct inet_frag_queue *q)
163 {
164 	spin_lock(&nf->lru_lock);
165 	list_add_tail(&q->lru_list, &nf->lru_list);
166 	q->net->nqueues++;
167 	spin_unlock(&nf->lru_lock);
168 }
169 
170 /* RFC 3168 support :
171  * We want to check ECN values of all fragments, do detect invalid combinations.
172  * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
173  */
174 #define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
175 #define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
176 #define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
177 #define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */
178 
179 extern const u8 ip_frag_ecn_table[16];
180 
181 #endif
182