xref: /linux/net/ipv4/inet_connection_sock.c (revision f8324e20f8289dffc646d64366332e05eaacab25)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Support for INET connection oriented protocols.
7  *
8  * Authors:	See the TCP sources
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or(at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/jhash.h>
18 
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
25 #include <net/xfrm.h>
26 
27 #ifdef INET_CSK_DEBUG
28 const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30 #endif
31 
32 /*
33  * This struct holds the first and last local port number.
34  */
35 struct local_ports sysctl_local_ports __read_mostly = {
36 	.lock = SEQLOCK_UNLOCKED,
37 	.range = { 32768, 61000 },
38 };
39 
40 unsigned long *sysctl_local_reserved_ports;
41 EXPORT_SYMBOL(sysctl_local_reserved_ports);
42 
43 void inet_get_local_port_range(int *low, int *high)
44 {
45 	unsigned seq;
46 	do {
47 		seq = read_seqbegin(&sysctl_local_ports.lock);
48 
49 		*low = sysctl_local_ports.range[0];
50 		*high = sysctl_local_ports.range[1];
51 	} while (read_seqretry(&sysctl_local_ports.lock, seq));
52 }
53 EXPORT_SYMBOL(inet_get_local_port_range);
54 
55 int inet_csk_bind_conflict(const struct sock *sk,
56 			   const struct inet_bind_bucket *tb)
57 {
58 	const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
59 	struct sock *sk2;
60 	struct hlist_node *node;
61 	int reuse = sk->sk_reuse;
62 
63 	/*
64 	 * Unlike other sk lookup places we do not check
65 	 * for sk_net here, since _all_ the socks listed
66 	 * in tb->owners list belong to the same net - the
67 	 * one this bucket belongs to.
68 	 */
69 
70 	sk_for_each_bound(sk2, node, &tb->owners) {
71 		if (sk != sk2 &&
72 		    !inet_v6_ipv6only(sk2) &&
73 		    (!sk->sk_bound_dev_if ||
74 		     !sk2->sk_bound_dev_if ||
75 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
76 			if (!reuse || !sk2->sk_reuse ||
77 			    sk2->sk_state == TCP_LISTEN) {
78 				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
79 				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
80 				    sk2_rcv_saddr == sk_rcv_saddr)
81 					break;
82 			}
83 		}
84 	}
85 	return node != NULL;
86 }
87 
88 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
89 
90 /* Obtain a reference to a local port for the given sock,
91  * if snum is zero it means select any available local port.
92  */
93 int inet_csk_get_port(struct sock *sk, unsigned short snum)
94 {
95 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
96 	struct inet_bind_hashbucket *head;
97 	struct hlist_node *node;
98 	struct inet_bind_bucket *tb;
99 	int ret, attempts = 5;
100 	struct net *net = sock_net(sk);
101 	int smallest_size = -1, smallest_rover;
102 
103 	local_bh_disable();
104 	if (!snum) {
105 		int remaining, rover, low, high;
106 
107 again:
108 		inet_get_local_port_range(&low, &high);
109 		remaining = (high - low) + 1;
110 		smallest_rover = rover = net_random() % remaining + low;
111 
112 		smallest_size = -1;
113 		do {
114 			if (inet_is_reserved_local_port(rover))
115 				goto next_nolock;
116 			head = &hashinfo->bhash[inet_bhashfn(net, rover,
117 					hashinfo->bhash_size)];
118 			spin_lock(&head->lock);
119 			inet_bind_bucket_for_each(tb, node, &head->chain)
120 				if (net_eq(ib_net(tb), net) && tb->port == rover) {
121 					if (tb->fastreuse > 0 &&
122 					    sk->sk_reuse &&
123 					    sk->sk_state != TCP_LISTEN &&
124 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
125 						smallest_size = tb->num_owners;
126 						smallest_rover = rover;
127 						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
128 							spin_unlock(&head->lock);
129 							snum = smallest_rover;
130 							goto have_snum;
131 						}
132 					}
133 					goto next;
134 				}
135 			break;
136 		next:
137 			spin_unlock(&head->lock);
138 		next_nolock:
139 			if (++rover > high)
140 				rover = low;
141 		} while (--remaining > 0);
142 
143 		/* Exhausted local port range during search?  It is not
144 		 * possible for us to be holding one of the bind hash
145 		 * locks if this test triggers, because if 'remaining'
146 		 * drops to zero, we broke out of the do/while loop at
147 		 * the top level, not from the 'break;' statement.
148 		 */
149 		ret = 1;
150 		if (remaining <= 0) {
151 			if (smallest_size != -1) {
152 				snum = smallest_rover;
153 				goto have_snum;
154 			}
155 			goto fail;
156 		}
157 		/* OK, here is the one we will use.  HEAD is
158 		 * non-NULL and we hold it's mutex.
159 		 */
160 		snum = rover;
161 	} else {
162 have_snum:
163 		head = &hashinfo->bhash[inet_bhashfn(net, snum,
164 				hashinfo->bhash_size)];
165 		spin_lock(&head->lock);
166 		inet_bind_bucket_for_each(tb, node, &head->chain)
167 			if (net_eq(ib_net(tb), net) && tb->port == snum)
168 				goto tb_found;
169 	}
170 	tb = NULL;
171 	goto tb_not_found;
172 tb_found:
173 	if (!hlist_empty(&tb->owners)) {
174 		if (tb->fastreuse > 0 &&
175 		    sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
176 		    smallest_size == -1) {
177 			goto success;
178 		} else {
179 			ret = 1;
180 			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
181 				if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
182 				    smallest_size != -1 && --attempts >= 0) {
183 					spin_unlock(&head->lock);
184 					goto again;
185 				}
186 				goto fail_unlock;
187 			}
188 		}
189 	}
190 tb_not_found:
191 	ret = 1;
192 	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
193 					net, head, snum)) == NULL)
194 		goto fail_unlock;
195 	if (hlist_empty(&tb->owners)) {
196 		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
197 			tb->fastreuse = 1;
198 		else
199 			tb->fastreuse = 0;
200 	} else if (tb->fastreuse &&
201 		   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
202 		tb->fastreuse = 0;
203 success:
204 	if (!inet_csk(sk)->icsk_bind_hash)
205 		inet_bind_hash(sk, tb, snum);
206 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
207 	ret = 0;
208 
209 fail_unlock:
210 	spin_unlock(&head->lock);
211 fail:
212 	local_bh_enable();
213 	return ret;
214 }
215 
216 EXPORT_SYMBOL_GPL(inet_csk_get_port);
217 
218 /*
219  * Wait for an incoming connection, avoid race conditions. This must be called
220  * with the socket locked.
221  */
222 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
223 {
224 	struct inet_connection_sock *icsk = inet_csk(sk);
225 	DEFINE_WAIT(wait);
226 	int err;
227 
228 	/*
229 	 * True wake-one mechanism for incoming connections: only
230 	 * one process gets woken up, not the 'whole herd'.
231 	 * Since we do not 'race & poll' for established sockets
232 	 * anymore, the common case will execute the loop only once.
233 	 *
234 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
235 	 * after any current non-exclusive waiters, and we know that
236 	 * it will always _stay_ after any new non-exclusive waiters
237 	 * because all non-exclusive waiters are added at the
238 	 * beginning of the wait-queue. As such, it's ok to "drop"
239 	 * our exclusiveness temporarily when we get woken up without
240 	 * having to remove and re-insert us on the wait queue.
241 	 */
242 	for (;;) {
243 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
244 					  TASK_INTERRUPTIBLE);
245 		release_sock(sk);
246 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
247 			timeo = schedule_timeout(timeo);
248 		lock_sock(sk);
249 		err = 0;
250 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
251 			break;
252 		err = -EINVAL;
253 		if (sk->sk_state != TCP_LISTEN)
254 			break;
255 		err = sock_intr_errno(timeo);
256 		if (signal_pending(current))
257 			break;
258 		err = -EAGAIN;
259 		if (!timeo)
260 			break;
261 	}
262 	finish_wait(sk_sleep(sk), &wait);
263 	return err;
264 }
265 
266 /*
267  * This will accept the next outstanding connection.
268  */
269 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
270 {
271 	struct inet_connection_sock *icsk = inet_csk(sk);
272 	struct sock *newsk;
273 	int error;
274 
275 	lock_sock(sk);
276 
277 	/* We need to make sure that this socket is listening,
278 	 * and that it has something pending.
279 	 */
280 	error = -EINVAL;
281 	if (sk->sk_state != TCP_LISTEN)
282 		goto out_err;
283 
284 	/* Find already established connection */
285 	if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
286 		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
287 
288 		/* If this is a non blocking socket don't sleep */
289 		error = -EAGAIN;
290 		if (!timeo)
291 			goto out_err;
292 
293 		error = inet_csk_wait_for_connect(sk, timeo);
294 		if (error)
295 			goto out_err;
296 	}
297 
298 	newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
299 	WARN_ON(newsk->sk_state == TCP_SYN_RECV);
300 out:
301 	release_sock(sk);
302 	return newsk;
303 out_err:
304 	newsk = NULL;
305 	*err = error;
306 	goto out;
307 }
308 
309 EXPORT_SYMBOL(inet_csk_accept);
310 
311 /*
312  * Using different timers for retransmit, delayed acks and probes
313  * We may wish use just one timer maintaining a list of expire jiffies
314  * to optimize.
315  */
316 void inet_csk_init_xmit_timers(struct sock *sk,
317 			       void (*retransmit_handler)(unsigned long),
318 			       void (*delack_handler)(unsigned long),
319 			       void (*keepalive_handler)(unsigned long))
320 {
321 	struct inet_connection_sock *icsk = inet_csk(sk);
322 
323 	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
324 			(unsigned long)sk);
325 	setup_timer(&icsk->icsk_delack_timer, delack_handler,
326 			(unsigned long)sk);
327 	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
328 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
329 }
330 
331 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
332 
333 void inet_csk_clear_xmit_timers(struct sock *sk)
334 {
335 	struct inet_connection_sock *icsk = inet_csk(sk);
336 
337 	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
338 
339 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
340 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
341 	sk_stop_timer(sk, &sk->sk_timer);
342 }
343 
344 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
345 
346 void inet_csk_delete_keepalive_timer(struct sock *sk)
347 {
348 	sk_stop_timer(sk, &sk->sk_timer);
349 }
350 
351 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
352 
353 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
354 {
355 	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
356 }
357 
358 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
359 
360 struct dst_entry *inet_csk_route_req(struct sock *sk,
361 				     const struct request_sock *req)
362 {
363 	struct rtable *rt;
364 	const struct inet_request_sock *ireq = inet_rsk(req);
365 	struct ip_options *opt = inet_rsk(req)->opt;
366 	struct flowi fl = { .oif = sk->sk_bound_dev_if,
367 			    .mark = sk->sk_mark,
368 			    .nl_u = { .ip4_u =
369 				      { .daddr = ((opt && opt->srr) ?
370 						  opt->faddr :
371 						  ireq->rmt_addr),
372 					.saddr = ireq->loc_addr,
373 					.tos = RT_CONN_FLAGS(sk) } },
374 			    .proto = sk->sk_protocol,
375 			    .flags = inet_sk_flowi_flags(sk),
376 			    .uli_u = { .ports =
377 				       { .sport = inet_sk(sk)->inet_sport,
378 					 .dport = ireq->rmt_port } } };
379 	struct net *net = sock_net(sk);
380 
381 	security_req_classify_flow(req, &fl);
382 	if (ip_route_output_flow(net, &rt, &fl, sk, 0))
383 		goto no_route;
384 	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
385 		goto route_err;
386 	return &rt->u.dst;
387 
388 route_err:
389 	ip_rt_put(rt);
390 no_route:
391 	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
392 	return NULL;
393 }
394 
395 EXPORT_SYMBOL_GPL(inet_csk_route_req);
396 
397 static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
398 				 const u32 rnd, const u32 synq_hsize)
399 {
400 	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
401 }
402 
403 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
404 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
405 #else
406 #define AF_INET_FAMILY(fam) 1
407 #endif
408 
409 struct request_sock *inet_csk_search_req(const struct sock *sk,
410 					 struct request_sock ***prevp,
411 					 const __be16 rport, const __be32 raddr,
412 					 const __be32 laddr)
413 {
414 	const struct inet_connection_sock *icsk = inet_csk(sk);
415 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
416 	struct request_sock *req, **prev;
417 
418 	for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
419 						    lopt->nr_table_entries)];
420 	     (req = *prev) != NULL;
421 	     prev = &req->dl_next) {
422 		const struct inet_request_sock *ireq = inet_rsk(req);
423 
424 		if (ireq->rmt_port == rport &&
425 		    ireq->rmt_addr == raddr &&
426 		    ireq->loc_addr == laddr &&
427 		    AF_INET_FAMILY(req->rsk_ops->family)) {
428 			WARN_ON(req->sk);
429 			*prevp = prev;
430 			break;
431 		}
432 	}
433 
434 	return req;
435 }
436 
437 EXPORT_SYMBOL_GPL(inet_csk_search_req);
438 
439 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
440 				   unsigned long timeout)
441 {
442 	struct inet_connection_sock *icsk = inet_csk(sk);
443 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
444 	const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
445 				     lopt->hash_rnd, lopt->nr_table_entries);
446 
447 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
448 	inet_csk_reqsk_queue_added(sk, timeout);
449 }
450 
451 /* Only thing we need from tcp.h */
452 extern int sysctl_tcp_synack_retries;
453 
454 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
455 
456 /* Decide when to expire the request and when to resend SYN-ACK */
457 static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
458 				  const int max_retries,
459 				  const u8 rskq_defer_accept,
460 				  int *expire, int *resend)
461 {
462 	if (!rskq_defer_accept) {
463 		*expire = req->retrans >= thresh;
464 		*resend = 1;
465 		return;
466 	}
467 	*expire = req->retrans >= thresh &&
468 		  (!inet_rsk(req)->acked || req->retrans >= max_retries);
469 	/*
470 	 * Do not resend while waiting for data after ACK,
471 	 * start to resend on end of deferring period to give
472 	 * last chance for data or ACK to create established socket.
473 	 */
474 	*resend = !inet_rsk(req)->acked ||
475 		  req->retrans >= rskq_defer_accept - 1;
476 }
477 
478 void inet_csk_reqsk_queue_prune(struct sock *parent,
479 				const unsigned long interval,
480 				const unsigned long timeout,
481 				const unsigned long max_rto)
482 {
483 	struct inet_connection_sock *icsk = inet_csk(parent);
484 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
485 	struct listen_sock *lopt = queue->listen_opt;
486 	int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
487 	int thresh = max_retries;
488 	unsigned long now = jiffies;
489 	struct request_sock **reqp, *req;
490 	int i, budget;
491 
492 	if (lopt == NULL || lopt->qlen == 0)
493 		return;
494 
495 	/* Normally all the openreqs are young and become mature
496 	 * (i.e. converted to established socket) for first timeout.
497 	 * If synack was not acknowledged for 3 seconds, it means
498 	 * one of the following things: synack was lost, ack was lost,
499 	 * rtt is high or nobody planned to ack (i.e. synflood).
500 	 * When server is a bit loaded, queue is populated with old
501 	 * open requests, reducing effective size of queue.
502 	 * When server is well loaded, queue size reduces to zero
503 	 * after several minutes of work. It is not synflood,
504 	 * it is normal operation. The solution is pruning
505 	 * too old entries overriding normal timeout, when
506 	 * situation becomes dangerous.
507 	 *
508 	 * Essentially, we reserve half of room for young
509 	 * embrions; and abort old ones without pity, if old
510 	 * ones are about to clog our table.
511 	 */
512 	if (lopt->qlen>>(lopt->max_qlen_log-1)) {
513 		int young = (lopt->qlen_young<<1);
514 
515 		while (thresh > 2) {
516 			if (lopt->qlen < young)
517 				break;
518 			thresh--;
519 			young <<= 1;
520 		}
521 	}
522 
523 	if (queue->rskq_defer_accept)
524 		max_retries = queue->rskq_defer_accept;
525 
526 	budget = 2 * (lopt->nr_table_entries / (timeout / interval));
527 	i = lopt->clock_hand;
528 
529 	do {
530 		reqp=&lopt->syn_table[i];
531 		while ((req = *reqp) != NULL) {
532 			if (time_after_eq(now, req->expires)) {
533 				int expire = 0, resend = 0;
534 
535 				syn_ack_recalc(req, thresh, max_retries,
536 					       queue->rskq_defer_accept,
537 					       &expire, &resend);
538 				if (req->rsk_ops->syn_ack_timeout)
539 					req->rsk_ops->syn_ack_timeout(parent, req);
540 				if (!expire &&
541 				    (!resend ||
542 				     !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
543 				     inet_rsk(req)->acked)) {
544 					unsigned long timeo;
545 
546 					if (req->retrans++ == 0)
547 						lopt->qlen_young--;
548 					timeo = min((timeout << req->retrans), max_rto);
549 					req->expires = now + timeo;
550 					reqp = &req->dl_next;
551 					continue;
552 				}
553 
554 				/* Drop this request */
555 				inet_csk_reqsk_queue_unlink(parent, req, reqp);
556 				reqsk_queue_removed(queue, req);
557 				reqsk_free(req);
558 				continue;
559 			}
560 			reqp = &req->dl_next;
561 		}
562 
563 		i = (i + 1) & (lopt->nr_table_entries - 1);
564 
565 	} while (--budget > 0);
566 
567 	lopt->clock_hand = i;
568 
569 	if (lopt->qlen)
570 		inet_csk_reset_keepalive_timer(parent, interval);
571 }
572 
573 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
574 
575 struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
576 			    const gfp_t priority)
577 {
578 	struct sock *newsk = sk_clone(sk, priority);
579 
580 	if (newsk != NULL) {
581 		struct inet_connection_sock *newicsk = inet_csk(newsk);
582 
583 		newsk->sk_state = TCP_SYN_RECV;
584 		newicsk->icsk_bind_hash = NULL;
585 
586 		inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
587 		inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
588 		inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
589 		newsk->sk_write_space = sk_stream_write_space;
590 
591 		newicsk->icsk_retransmits = 0;
592 		newicsk->icsk_backoff	  = 0;
593 		newicsk->icsk_probes_out  = 0;
594 
595 		/* Deinitialize accept_queue to trap illegal accesses. */
596 		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
597 
598 		security_inet_csk_clone(newsk, req);
599 	}
600 	return newsk;
601 }
602 
603 EXPORT_SYMBOL_GPL(inet_csk_clone);
604 
605 /*
606  * At this point, there should be no process reference to this
607  * socket, and thus no user references at all.  Therefore we
608  * can assume the socket waitqueue is inactive and nobody will
609  * try to jump onto it.
610  */
611 void inet_csk_destroy_sock(struct sock *sk)
612 {
613 	WARN_ON(sk->sk_state != TCP_CLOSE);
614 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
615 
616 	/* It cannot be in hash table! */
617 	WARN_ON(!sk_unhashed(sk));
618 
619 	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
620 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
621 
622 	sk->sk_prot->destroy(sk);
623 
624 	sk_stream_kill_queues(sk);
625 
626 	xfrm_sk_free_policy(sk);
627 
628 	sk_refcnt_debug_release(sk);
629 
630 	percpu_counter_dec(sk->sk_prot->orphan_count);
631 	sock_put(sk);
632 }
633 
634 EXPORT_SYMBOL(inet_csk_destroy_sock);
635 
636 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
637 {
638 	struct inet_sock *inet = inet_sk(sk);
639 	struct inet_connection_sock *icsk = inet_csk(sk);
640 	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
641 
642 	if (rc != 0)
643 		return rc;
644 
645 	sk->sk_max_ack_backlog = 0;
646 	sk->sk_ack_backlog = 0;
647 	inet_csk_delack_init(sk);
648 
649 	/* There is race window here: we announce ourselves listening,
650 	 * but this transition is still not validated by get_port().
651 	 * It is OK, because this socket enters to hash table only
652 	 * after validation is complete.
653 	 */
654 	sk->sk_state = TCP_LISTEN;
655 	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
656 		inet->inet_sport = htons(inet->inet_num);
657 
658 		sk_dst_reset(sk);
659 		sk->sk_prot->hash(sk);
660 
661 		return 0;
662 	}
663 
664 	sk->sk_state = TCP_CLOSE;
665 	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
666 	return -EADDRINUSE;
667 }
668 
669 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
670 
671 /*
672  *	This routine closes sockets which have been at least partially
673  *	opened, but not yet accepted.
674  */
675 void inet_csk_listen_stop(struct sock *sk)
676 {
677 	struct inet_connection_sock *icsk = inet_csk(sk);
678 	struct request_sock *acc_req;
679 	struct request_sock *req;
680 
681 	inet_csk_delete_keepalive_timer(sk);
682 
683 	/* make all the listen_opt local to us */
684 	acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
685 
686 	/* Following specs, it would be better either to send FIN
687 	 * (and enter FIN-WAIT-1, it is normal close)
688 	 * or to send active reset (abort).
689 	 * Certainly, it is pretty dangerous while synflood, but it is
690 	 * bad justification for our negligence 8)
691 	 * To be honest, we are not able to make either
692 	 * of the variants now.			--ANK
693 	 */
694 	reqsk_queue_destroy(&icsk->icsk_accept_queue);
695 
696 	while ((req = acc_req) != NULL) {
697 		struct sock *child = req->sk;
698 
699 		acc_req = req->dl_next;
700 
701 		local_bh_disable();
702 		bh_lock_sock(child);
703 		WARN_ON(sock_owned_by_user(child));
704 		sock_hold(child);
705 
706 		sk->sk_prot->disconnect(child, O_NONBLOCK);
707 
708 		sock_orphan(child);
709 
710 		percpu_counter_inc(sk->sk_prot->orphan_count);
711 
712 		inet_csk_destroy_sock(child);
713 
714 		bh_unlock_sock(child);
715 		local_bh_enable();
716 		sock_put(child);
717 
718 		sk_acceptq_removed(sk);
719 		__reqsk_free(req);
720 	}
721 	WARN_ON(sk->sk_ack_backlog);
722 }
723 
724 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
725 
726 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
727 {
728 	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
729 	const struct inet_sock *inet = inet_sk(sk);
730 
731 	sin->sin_family		= AF_INET;
732 	sin->sin_addr.s_addr	= inet->inet_daddr;
733 	sin->sin_port		= inet->inet_dport;
734 }
735 
736 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
737 
738 #ifdef CONFIG_COMPAT
739 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
740 			       char __user *optval, int __user *optlen)
741 {
742 	const struct inet_connection_sock *icsk = inet_csk(sk);
743 
744 	if (icsk->icsk_af_ops->compat_getsockopt != NULL)
745 		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
746 							    optval, optlen);
747 	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
748 					     optval, optlen);
749 }
750 
751 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
752 
753 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
754 			       char __user *optval, unsigned int optlen)
755 {
756 	const struct inet_connection_sock *icsk = inet_csk(sk);
757 
758 	if (icsk->icsk_af_ops->compat_setsockopt != NULL)
759 		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
760 							    optval, optlen);
761 	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
762 					     optval, optlen);
763 }
764 
765 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
766 #endif
767