1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic INET transport hashtables 7 * 8 * Authors: Lotsa people, from code originally in tcp 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #include <linux/config.h> 17 #include <linux/module.h> 18 #include <linux/random.h> 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/wait.h> 22 23 #include <net/inet_connection_sock.h> 24 #include <net/inet_hashtables.h> 25 #include <net/ip.h> 26 27 /* 28 * Allocate and initialize a new local port bind bucket. 29 * The bindhash mutex for snum's hash chain must be held here. 30 */ 31 struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, 32 struct inet_bind_hashbucket *head, 33 const unsigned short snum) 34 { 35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); 36 37 if (tb != NULL) { 38 tb->port = snum; 39 tb->fastreuse = 0; 40 INIT_HLIST_HEAD(&tb->owners); 41 hlist_add_head(&tb->node, &head->chain); 42 } 43 return tb; 44 } 45 46 /* 47 * Caller must hold hashbucket lock for this tb with local BH disabled 48 */ 49 void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) 50 { 51 if (hlist_empty(&tb->owners)) { 52 __hlist_del(&tb->node); 53 kmem_cache_free(cachep, tb); 54 } 55 } 56 57 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 58 const unsigned short snum) 59 { 60 inet_sk(sk)->num = snum; 61 sk_add_bind_node(sk, &tb->owners); 62 inet_csk(sk)->icsk_bind_hash = tb; 63 } 64 65 /* 66 * Get rid of any references to a local port held by the given sock. 67 */ 68 static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) 69 { 70 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); 71 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 72 struct inet_bind_bucket *tb; 73 74 spin_lock(&head->lock); 75 tb = inet_csk(sk)->icsk_bind_hash; 76 __sk_del_bind_node(sk); 77 inet_csk(sk)->icsk_bind_hash = NULL; 78 inet_sk(sk)->num = 0; 79 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 80 spin_unlock(&head->lock); 81 } 82 83 void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) 84 { 85 local_bh_disable(); 86 __inet_put_port(hashinfo, sk); 87 local_bh_enable(); 88 } 89 90 EXPORT_SYMBOL(inet_put_port); 91 92 /* 93 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP. 94 * Look, when several writers sleep and reader wakes them up, all but one 95 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 96 * this, _but_ remember, it adds useless work on UP machines (wake up each 97 * exclusive lock release). It should be ifdefed really. 98 */ 99 void inet_listen_wlock(struct inet_hashinfo *hashinfo) 100 { 101 write_lock(&hashinfo->lhash_lock); 102 103 if (atomic_read(&hashinfo->lhash_users)) { 104 DEFINE_WAIT(wait); 105 106 for (;;) { 107 prepare_to_wait_exclusive(&hashinfo->lhash_wait, 108 &wait, TASK_UNINTERRUPTIBLE); 109 if (!atomic_read(&hashinfo->lhash_users)) 110 break; 111 write_unlock_bh(&hashinfo->lhash_lock); 112 schedule(); 113 write_lock_bh(&hashinfo->lhash_lock); 114 } 115 116 finish_wait(&hashinfo->lhash_wait, &wait); 117 } 118 } 119 120 EXPORT_SYMBOL(inet_listen_wlock); 121 122 /* 123 * Don't inline this cruft. Here are some nice properties to exploit here. The 124 * BSD API does not allow a listening sock to specify the remote port nor the 125 * remote address for the connection. So always assume those are both 126 * wildcarded during the search since they can never be otherwise. 127 */ 128 struct sock *__inet_lookup_listener(const struct hlist_head *head, const u32 daddr, 129 const unsigned short hnum, const int dif) 130 { 131 struct sock *result = NULL, *sk; 132 const struct hlist_node *node; 133 int hiscore = -1; 134 135 sk_for_each(sk, node, head) { 136 const struct inet_sock *inet = inet_sk(sk); 137 138 if (inet->num == hnum && !ipv6_only_sock(sk)) { 139 const __u32 rcv_saddr = inet->rcv_saddr; 140 int score = sk->sk_family == PF_INET ? 1 : 0; 141 142 if (rcv_saddr) { 143 if (rcv_saddr != daddr) 144 continue; 145 score += 2; 146 } 147 if (sk->sk_bound_dev_if) { 148 if (sk->sk_bound_dev_if != dif) 149 continue; 150 score += 2; 151 } 152 if (score == 5) 153 return sk; 154 if (score > hiscore) { 155 hiscore = score; 156 result = sk; 157 } 158 } 159 } 160 return result; 161 } 162 163 EXPORT_SYMBOL_GPL(__inet_lookup_listener); 164 165 /* called with local bh disabled */ 166 static int __inet_check_established(struct inet_timewait_death_row *death_row, 167 struct sock *sk, __u16 lport, 168 struct inet_timewait_sock **twp) 169 { 170 struct inet_hashinfo *hinfo = death_row->hashinfo; 171 struct inet_sock *inet = inet_sk(sk); 172 u32 daddr = inet->rcv_saddr; 173 u32 saddr = inet->daddr; 174 int dif = sk->sk_bound_dev_if; 175 INET_ADDR_COOKIE(acookie, saddr, daddr) 176 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 177 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); 178 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 179 struct sock *sk2; 180 const struct hlist_node *node; 181 struct inet_timewait_sock *tw; 182 183 prefetch(head->chain.first); 184 write_lock(&head->lock); 185 186 /* Check TIME-WAIT sockets first. */ 187 sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { 188 tw = inet_twsk(sk2); 189 190 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { 191 if (twsk_unique(sk, sk2, twp)) 192 goto unique; 193 else 194 goto not_unique; 195 } 196 } 197 tw = NULL; 198 199 /* And established part... */ 200 sk_for_each(sk2, node, &head->chain) { 201 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) 202 goto not_unique; 203 } 204 205 unique: 206 /* Must record num and sport now. Otherwise we will see 207 * in hash table socket with a funny identity. */ 208 inet->num = lport; 209 inet->sport = htons(lport); 210 sk->sk_hash = hash; 211 BUG_TRAP(sk_unhashed(sk)); 212 __sk_add_node(sk, &head->chain); 213 sock_prot_inc_use(sk->sk_prot); 214 write_unlock(&head->lock); 215 216 if (twp) { 217 *twp = tw; 218 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 219 } else if (tw) { 220 /* Silly. Should hash-dance instead... */ 221 inet_twsk_deschedule(tw, death_row); 222 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 223 224 inet_twsk_put(tw); 225 } 226 227 return 0; 228 229 not_unique: 230 write_unlock(&head->lock); 231 return -EADDRNOTAVAIL; 232 } 233 234 static inline u32 inet_sk_port_offset(const struct sock *sk) 235 { 236 const struct inet_sock *inet = inet_sk(sk); 237 return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr, 238 inet->dport); 239 } 240 241 /* 242 * Bind a port for a connect operation and hash it. 243 */ 244 int inet_hash_connect(struct inet_timewait_death_row *death_row, 245 struct sock *sk) 246 { 247 struct inet_hashinfo *hinfo = death_row->hashinfo; 248 const unsigned short snum = inet_sk(sk)->num; 249 struct inet_bind_hashbucket *head; 250 struct inet_bind_bucket *tb; 251 int ret; 252 253 if (!snum) { 254 int low = sysctl_local_port_range[0]; 255 int high = sysctl_local_port_range[1]; 256 int range = high - low; 257 int i; 258 int port; 259 static u32 hint; 260 u32 offset = hint + inet_sk_port_offset(sk); 261 struct hlist_node *node; 262 struct inet_timewait_sock *tw = NULL; 263 264 local_bh_disable(); 265 for (i = 1; i <= range; i++) { 266 port = low + (i + offset) % range; 267 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; 268 spin_lock(&head->lock); 269 270 /* Does not bother with rcv_saddr checks, 271 * because the established check is already 272 * unique enough. 273 */ 274 inet_bind_bucket_for_each(tb, node, &head->chain) { 275 if (tb->port == port) { 276 BUG_TRAP(!hlist_empty(&tb->owners)); 277 if (tb->fastreuse >= 0) 278 goto next_port; 279 if (!__inet_check_established(death_row, 280 sk, port, 281 &tw)) 282 goto ok; 283 goto next_port; 284 } 285 } 286 287 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port); 288 if (!tb) { 289 spin_unlock(&head->lock); 290 break; 291 } 292 tb->fastreuse = -1; 293 goto ok; 294 295 next_port: 296 spin_unlock(&head->lock); 297 } 298 local_bh_enable(); 299 300 return -EADDRNOTAVAIL; 301 302 ok: 303 hint += i; 304 305 /* Head lock still held and bh's disabled */ 306 inet_bind_hash(sk, tb, port); 307 if (sk_unhashed(sk)) { 308 inet_sk(sk)->sport = htons(port); 309 __inet_hash(hinfo, sk, 0); 310 } 311 spin_unlock(&head->lock); 312 313 if (tw) { 314 inet_twsk_deschedule(tw, death_row); 315 inet_twsk_put(tw); 316 } 317 318 ret = 0; 319 goto out; 320 } 321 322 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; 323 tb = inet_csk(sk)->icsk_bind_hash; 324 spin_lock_bh(&head->lock); 325 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 326 __inet_hash(hinfo, sk, 0); 327 spin_unlock_bh(&head->lock); 328 return 0; 329 } else { 330 spin_unlock(&head->lock); 331 /* No definite answer... Walk to established hash table */ 332 ret = __inet_check_established(death_row, sk, snum, NULL); 333 out: 334 local_bh_enable(); 335 return ret; 336 } 337 } 338 339 EXPORT_SYMBOL_GPL(inet_hash_connect); 340