1 #ifndef __NET_FRAG_H__ 2 #define __NET_FRAG_H__ 3 4 #include <linux/percpu_counter.h> 5 6 struct netns_frags { 7 int nqueues; 8 struct list_head lru_list; 9 spinlock_t lru_lock; 10 11 /* The percpu_counter "mem" need to be cacheline aligned. 12 * mem.count must not share cacheline with other writers 13 */ 14 struct percpu_counter mem ____cacheline_aligned_in_smp; 15 16 /* sysctls */ 17 int timeout; 18 int high_thresh; 19 int low_thresh; 20 }; 21 22 struct inet_frag_queue { 23 spinlock_t lock; 24 struct timer_list timer; /* when will this queue expire? */ 25 struct list_head lru_list; /* lru list member */ 26 struct hlist_node list; 27 atomic_t refcnt; 28 struct sk_buff *fragments; /* list of received fragments */ 29 struct sk_buff *fragments_tail; 30 ktime_t stamp; 31 int len; /* total length of orig datagram */ 32 int meat; 33 __u8 last_in; /* first/last segment arrived? */ 34 35 #define INET_FRAG_COMPLETE 4 36 #define INET_FRAG_FIRST_IN 2 37 #define INET_FRAG_LAST_IN 1 38 39 u16 max_size; 40 41 struct netns_frags *net; 42 }; 43 44 #define INETFRAGS_HASHSZ 64 45 46 /* averaged: 47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / 48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or 49 * struct frag_queue)) 50 */ 51 #define INETFRAGS_MAXDEPTH 128 52 53 struct inet_frags { 54 struct hlist_head hash[INETFRAGS_HASHSZ]; 55 /* This rwlock is a global lock (seperate per IPv4, IPv6 and 56 * netfilter). Important to keep this on a seperate cacheline. 57 */ 58 rwlock_t lock ____cacheline_aligned_in_smp; 59 int secret_interval; 60 struct timer_list secret_timer; 61 u32 rnd; 62 int qsize; 63 64 unsigned int (*hashfn)(struct inet_frag_queue *); 65 bool (*match)(struct inet_frag_queue *q, void *arg); 66 void (*constructor)(struct inet_frag_queue *q, 67 void *arg); 68 void (*destructor)(struct inet_frag_queue *); 69 void (*skb_free)(struct sk_buff *); 70 void (*frag_expire)(unsigned long data); 71 }; 72 73 void inet_frags_init(struct inet_frags *); 74 void inet_frags_fini(struct inet_frags *); 75 76 void inet_frags_init_net(struct netns_frags *nf); 77 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); 78 79 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); 80 void inet_frag_destroy(struct inet_frag_queue *q, 81 struct inet_frags *f, int *work); 82 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); 83 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, 84 struct inet_frags *f, void *key, unsigned int hash) 85 __releases(&f->lock); 86 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, 87 const char *prefix); 88 89 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) 90 { 91 if (atomic_dec_and_test(&q->refcnt)) 92 inet_frag_destroy(q, f, NULL); 93 } 94 95 /* Memory Tracking Functions. */ 96 97 /* The default percpu_counter batch size is not big enough to scale to 98 * fragmentation mem acct sizes. 99 * The mem size of a 64K fragment is approx: 100 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes 101 */ 102 static unsigned int frag_percpu_counter_batch = 130000; 103 104 static inline int frag_mem_limit(struct netns_frags *nf) 105 { 106 return percpu_counter_read(&nf->mem); 107 } 108 109 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 110 { 111 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 112 } 113 114 static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 115 { 116 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 117 } 118 119 static inline void init_frag_mem_limit(struct netns_frags *nf) 120 { 121 percpu_counter_init(&nf->mem, 0); 122 } 123 124 static inline int sum_frag_mem_limit(struct netns_frags *nf) 125 { 126 int res; 127 128 local_bh_disable(); 129 res = percpu_counter_sum_positive(&nf->mem); 130 local_bh_enable(); 131 132 return res; 133 } 134 135 static inline void inet_frag_lru_move(struct inet_frag_queue *q) 136 { 137 spin_lock(&q->net->lru_lock); 138 list_move_tail(&q->lru_list, &q->net->lru_list); 139 spin_unlock(&q->net->lru_lock); 140 } 141 142 static inline void inet_frag_lru_del(struct inet_frag_queue *q) 143 { 144 spin_lock(&q->net->lru_lock); 145 list_del(&q->lru_list); 146 spin_unlock(&q->net->lru_lock); 147 } 148 149 static inline void inet_frag_lru_add(struct netns_frags *nf, 150 struct inet_frag_queue *q) 151 { 152 spin_lock(&nf->lru_lock); 153 list_add_tail(&q->lru_list, &nf->lru_list); 154 spin_unlock(&nf->lru_lock); 155 } 156 #endif 157