xref: /linux/net/ipv4/inet_connection_sock.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Support for INET connection oriented protocols.
7  *
8  * Authors:	See the TCP sources
9  *
10  *		This program is free software; you can redistribute it and/or
11  *		modify it under the terms of the GNU General Public License
12  *		as published by the Free Software Foundation; either version
13  *		2 of the License, or(at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/jhash.h>
18 
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
25 #include <net/xfrm.h>
26 
27 #ifdef INET_CSK_DEBUG
28 const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
30 #endif
31 
32 /*
33  * This array holds the first and last local port number.
34  * For high-usage systems, use sysctl to change this to
35  * 32768-61000
36  */
37 int sysctl_local_port_range[2] = { 1024, 4999 };
38 
39 int inet_csk_bind_conflict(const struct sock *sk,
40 			   const struct inet_bind_bucket *tb)
41 {
42 	const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
43 	struct sock *sk2;
44 	struct hlist_node *node;
45 	int reuse = sk->sk_reuse;
46 
47 	sk_for_each_bound(sk2, node, &tb->owners) {
48 		if (sk != sk2 &&
49 		    !inet_v6_ipv6only(sk2) &&
50 		    (!sk->sk_bound_dev_if ||
51 		     !sk2->sk_bound_dev_if ||
52 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
53 			if (!reuse || !sk2->sk_reuse ||
54 			    sk2->sk_state == TCP_LISTEN) {
55 				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
56 				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
57 				    sk2_rcv_saddr == sk_rcv_saddr)
58 					break;
59 			}
60 		}
61 	}
62 	return node != NULL;
63 }
64 
65 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
66 
67 /* Obtain a reference to a local port for the given sock,
68  * if snum is zero it means select any available local port.
69  */
70 int inet_csk_get_port(struct inet_hashinfo *hashinfo,
71 		      struct sock *sk, unsigned short snum,
72 		      int (*bind_conflict)(const struct sock *sk,
73 					   const struct inet_bind_bucket *tb))
74 {
75 	struct inet_bind_hashbucket *head;
76 	struct hlist_node *node;
77 	struct inet_bind_bucket *tb;
78 	int ret;
79 
80 	local_bh_disable();
81 	if (!snum) {
82 		int low = sysctl_local_port_range[0];
83 		int high = sysctl_local_port_range[1];
84 		int remaining = (high - low) + 1;
85 		int rover = net_random() % (high - low) + low;
86 
87 		do {
88 			head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)];
89 			spin_lock(&head->lock);
90 			inet_bind_bucket_for_each(tb, node, &head->chain)
91 				if (tb->port == rover)
92 					goto next;
93 			break;
94 		next:
95 			spin_unlock(&head->lock);
96 			if (++rover > high)
97 				rover = low;
98 		} while (--remaining > 0);
99 
100 		/* Exhausted local port range during search?  It is not
101 		 * possible for us to be holding one of the bind hash
102 		 * locks if this test triggers, because if 'remaining'
103 		 * drops to zero, we broke out of the do/while loop at
104 		 * the top level, not from the 'break;' statement.
105 		 */
106 		ret = 1;
107 		if (remaining <= 0)
108 			goto fail;
109 
110 		/* OK, here is the one we will use.  HEAD is
111 		 * non-NULL and we hold it's mutex.
112 		 */
113 		snum = rover;
114 	} else {
115 		head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)];
116 		spin_lock(&head->lock);
117 		inet_bind_bucket_for_each(tb, node, &head->chain)
118 			if (tb->port == snum)
119 				goto tb_found;
120 	}
121 	tb = NULL;
122 	goto tb_not_found;
123 tb_found:
124 	if (!hlist_empty(&tb->owners)) {
125 		if (sk->sk_reuse > 1)
126 			goto success;
127 		if (tb->fastreuse > 0 &&
128 		    sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
129 			goto success;
130 		} else {
131 			ret = 1;
132 			if (bind_conflict(sk, tb))
133 				goto fail_unlock;
134 		}
135 	}
136 tb_not_found:
137 	ret = 1;
138 	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL)
139 		goto fail_unlock;
140 	if (hlist_empty(&tb->owners)) {
141 		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
142 			tb->fastreuse = 1;
143 		else
144 			tb->fastreuse = 0;
145 	} else if (tb->fastreuse &&
146 		   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
147 		tb->fastreuse = 0;
148 success:
149 	if (!inet_csk(sk)->icsk_bind_hash)
150 		inet_bind_hash(sk, tb, snum);
151 	BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
152 	ret = 0;
153 
154 fail_unlock:
155 	spin_unlock(&head->lock);
156 fail:
157 	local_bh_enable();
158 	return ret;
159 }
160 
161 EXPORT_SYMBOL_GPL(inet_csk_get_port);
162 
163 /*
164  * Wait for an incoming connection, avoid race conditions. This must be called
165  * with the socket locked.
166  */
167 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
168 {
169 	struct inet_connection_sock *icsk = inet_csk(sk);
170 	DEFINE_WAIT(wait);
171 	int err;
172 
173 	/*
174 	 * True wake-one mechanism for incoming connections: only
175 	 * one process gets woken up, not the 'whole herd'.
176 	 * Since we do not 'race & poll' for established sockets
177 	 * anymore, the common case will execute the loop only once.
178 	 *
179 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
180 	 * after any current non-exclusive waiters, and we know that
181 	 * it will always _stay_ after any new non-exclusive waiters
182 	 * because all non-exclusive waiters are added at the
183 	 * beginning of the wait-queue. As such, it's ok to "drop"
184 	 * our exclusiveness temporarily when we get woken up without
185 	 * having to remove and re-insert us on the wait queue.
186 	 */
187 	for (;;) {
188 		prepare_to_wait_exclusive(sk->sk_sleep, &wait,
189 					  TASK_INTERRUPTIBLE);
190 		release_sock(sk);
191 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
192 			timeo = schedule_timeout(timeo);
193 		lock_sock(sk);
194 		err = 0;
195 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
196 			break;
197 		err = -EINVAL;
198 		if (sk->sk_state != TCP_LISTEN)
199 			break;
200 		err = sock_intr_errno(timeo);
201 		if (signal_pending(current))
202 			break;
203 		err = -EAGAIN;
204 		if (!timeo)
205 			break;
206 	}
207 	finish_wait(sk->sk_sleep, &wait);
208 	return err;
209 }
210 
211 /*
212  * This will accept the next outstanding connection.
213  */
214 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
215 {
216 	struct inet_connection_sock *icsk = inet_csk(sk);
217 	struct sock *newsk;
218 	int error;
219 
220 	lock_sock(sk);
221 
222 	/* We need to make sure that this socket is listening,
223 	 * and that it has something pending.
224 	 */
225 	error = -EINVAL;
226 	if (sk->sk_state != TCP_LISTEN)
227 		goto out_err;
228 
229 	/* Find already established connection */
230 	if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
231 		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
232 
233 		/* If this is a non blocking socket don't sleep */
234 		error = -EAGAIN;
235 		if (!timeo)
236 			goto out_err;
237 
238 		error = inet_csk_wait_for_connect(sk, timeo);
239 		if (error)
240 			goto out_err;
241 	}
242 
243 	newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
244 	BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
245 out:
246 	release_sock(sk);
247 	return newsk;
248 out_err:
249 	newsk = NULL;
250 	*err = error;
251 	goto out;
252 }
253 
254 EXPORT_SYMBOL(inet_csk_accept);
255 
256 /*
257  * Using different timers for retransmit, delayed acks and probes
258  * We may wish use just one timer maintaining a list of expire jiffies
259  * to optimize.
260  */
261 void inet_csk_init_xmit_timers(struct sock *sk,
262 			       void (*retransmit_handler)(unsigned long),
263 			       void (*delack_handler)(unsigned long),
264 			       void (*keepalive_handler)(unsigned long))
265 {
266 	struct inet_connection_sock *icsk = inet_csk(sk);
267 
268 	init_timer(&icsk->icsk_retransmit_timer);
269 	init_timer(&icsk->icsk_delack_timer);
270 	init_timer(&sk->sk_timer);
271 
272 	icsk->icsk_retransmit_timer.function = retransmit_handler;
273 	icsk->icsk_delack_timer.function     = delack_handler;
274 	sk->sk_timer.function		     = keepalive_handler;
275 
276 	icsk->icsk_retransmit_timer.data =
277 		icsk->icsk_delack_timer.data =
278 			sk->sk_timer.data  = (unsigned long)sk;
279 
280 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
281 }
282 
283 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
284 
285 void inet_csk_clear_xmit_timers(struct sock *sk)
286 {
287 	struct inet_connection_sock *icsk = inet_csk(sk);
288 
289 	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
290 
291 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
292 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
293 	sk_stop_timer(sk, &sk->sk_timer);
294 }
295 
296 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
297 
298 void inet_csk_delete_keepalive_timer(struct sock *sk)
299 {
300 	sk_stop_timer(sk, &sk->sk_timer);
301 }
302 
303 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
304 
305 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
306 {
307 	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
308 }
309 
310 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
311 
312 struct dst_entry* inet_csk_route_req(struct sock *sk,
313 				     const struct request_sock *req)
314 {
315 	struct rtable *rt;
316 	const struct inet_request_sock *ireq = inet_rsk(req);
317 	struct ip_options *opt = inet_rsk(req)->opt;
318 	struct flowi fl = { .oif = sk->sk_bound_dev_if,
319 			    .nl_u = { .ip4_u =
320 				      { .daddr = ((opt && opt->srr) ?
321 						  opt->faddr :
322 						  ireq->rmt_addr),
323 					.saddr = ireq->loc_addr,
324 					.tos = RT_CONN_FLAGS(sk) } },
325 			    .proto = sk->sk_protocol,
326 			    .uli_u = { .ports =
327 				       { .sport = inet_sk(sk)->sport,
328 					 .dport = ireq->rmt_port } } };
329 
330 	security_req_classify_flow(req, &fl);
331 	if (ip_route_output_flow(&rt, &fl, sk, 0)) {
332 		IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
333 		return NULL;
334 	}
335 	if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
336 		ip_rt_put(rt);
337 		IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
338 		return NULL;
339 	}
340 	return &rt->u.dst;
341 }
342 
343 EXPORT_SYMBOL_GPL(inet_csk_route_req);
344 
345 static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
346 				 const u32 rnd, const u32 synq_hsize)
347 {
348 	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
349 }
350 
351 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
352 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
353 #else
354 #define AF_INET_FAMILY(fam) 1
355 #endif
356 
357 struct request_sock *inet_csk_search_req(const struct sock *sk,
358 					 struct request_sock ***prevp,
359 					 const __be16 rport, const __be32 raddr,
360 					 const __be32 laddr)
361 {
362 	const struct inet_connection_sock *icsk = inet_csk(sk);
363 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
364 	struct request_sock *req, **prev;
365 
366 	for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
367 						    lopt->nr_table_entries)];
368 	     (req = *prev) != NULL;
369 	     prev = &req->dl_next) {
370 		const struct inet_request_sock *ireq = inet_rsk(req);
371 
372 		if (ireq->rmt_port == rport &&
373 		    ireq->rmt_addr == raddr &&
374 		    ireq->loc_addr == laddr &&
375 		    AF_INET_FAMILY(req->rsk_ops->family)) {
376 			BUG_TRAP(!req->sk);
377 			*prevp = prev;
378 			break;
379 		}
380 	}
381 
382 	return req;
383 }
384 
385 EXPORT_SYMBOL_GPL(inet_csk_search_req);
386 
387 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
388 				   unsigned long timeout)
389 {
390 	struct inet_connection_sock *icsk = inet_csk(sk);
391 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
392 	const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
393 				     lopt->hash_rnd, lopt->nr_table_entries);
394 
395 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
396 	inet_csk_reqsk_queue_added(sk, timeout);
397 }
398 
399 /* Only thing we need from tcp.h */
400 extern int sysctl_tcp_synack_retries;
401 
402 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
403 
404 void inet_csk_reqsk_queue_prune(struct sock *parent,
405 				const unsigned long interval,
406 				const unsigned long timeout,
407 				const unsigned long max_rto)
408 {
409 	struct inet_connection_sock *icsk = inet_csk(parent);
410 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
411 	struct listen_sock *lopt = queue->listen_opt;
412 	int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
413 	int thresh = max_retries;
414 	unsigned long now = jiffies;
415 	struct request_sock **reqp, *req;
416 	int i, budget;
417 
418 	if (lopt == NULL || lopt->qlen == 0)
419 		return;
420 
421 	/* Normally all the openreqs are young and become mature
422 	 * (i.e. converted to established socket) for first timeout.
423 	 * If synack was not acknowledged for 3 seconds, it means
424 	 * one of the following things: synack was lost, ack was lost,
425 	 * rtt is high or nobody planned to ack (i.e. synflood).
426 	 * When server is a bit loaded, queue is populated with old
427 	 * open requests, reducing effective size of queue.
428 	 * When server is well loaded, queue size reduces to zero
429 	 * after several minutes of work. It is not synflood,
430 	 * it is normal operation. The solution is pruning
431 	 * too old entries overriding normal timeout, when
432 	 * situation becomes dangerous.
433 	 *
434 	 * Essentially, we reserve half of room for young
435 	 * embrions; and abort old ones without pity, if old
436 	 * ones are about to clog our table.
437 	 */
438 	if (lopt->qlen>>(lopt->max_qlen_log-1)) {
439 		int young = (lopt->qlen_young<<1);
440 
441 		while (thresh > 2) {
442 			if (lopt->qlen < young)
443 				break;
444 			thresh--;
445 			young <<= 1;
446 		}
447 	}
448 
449 	if (queue->rskq_defer_accept)
450 		max_retries = queue->rskq_defer_accept;
451 
452 	budget = 2 * (lopt->nr_table_entries / (timeout / interval));
453 	i = lopt->clock_hand;
454 
455 	do {
456 		reqp=&lopt->syn_table[i];
457 		while ((req = *reqp) != NULL) {
458 			if (time_after_eq(now, req->expires)) {
459 				if ((req->retrans < thresh ||
460 				     (inet_rsk(req)->acked && req->retrans < max_retries))
461 				    && !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) {
462 					unsigned long timeo;
463 
464 					if (req->retrans++ == 0)
465 						lopt->qlen_young--;
466 					timeo = min((timeout << req->retrans), max_rto);
467 					req->expires = now + timeo;
468 					reqp = &req->dl_next;
469 					continue;
470 				}
471 
472 				/* Drop this request */
473 				inet_csk_reqsk_queue_unlink(parent, req, reqp);
474 				reqsk_queue_removed(queue, req);
475 				reqsk_free(req);
476 				continue;
477 			}
478 			reqp = &req->dl_next;
479 		}
480 
481 		i = (i + 1) & (lopt->nr_table_entries - 1);
482 
483 	} while (--budget > 0);
484 
485 	lopt->clock_hand = i;
486 
487 	if (lopt->qlen)
488 		inet_csk_reset_keepalive_timer(parent, interval);
489 }
490 
491 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
492 
493 struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
494 			    const gfp_t priority)
495 {
496 	struct sock *newsk = sk_clone(sk, priority);
497 
498 	if (newsk != NULL) {
499 		struct inet_connection_sock *newicsk = inet_csk(newsk);
500 
501 		newsk->sk_state = TCP_SYN_RECV;
502 		newicsk->icsk_bind_hash = NULL;
503 
504 		inet_sk(newsk)->dport = inet_rsk(req)->rmt_port;
505 		newsk->sk_write_space = sk_stream_write_space;
506 
507 		newicsk->icsk_retransmits = 0;
508 		newicsk->icsk_backoff	  = 0;
509 		newicsk->icsk_probes_out  = 0;
510 
511 		/* Deinitialize accept_queue to trap illegal accesses. */
512 		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
513 
514 		security_inet_csk_clone(newsk, req);
515 	}
516 	return newsk;
517 }
518 
519 EXPORT_SYMBOL_GPL(inet_csk_clone);
520 
521 /*
522  * At this point, there should be no process reference to this
523  * socket, and thus no user references at all.  Therefore we
524  * can assume the socket waitqueue is inactive and nobody will
525  * try to jump onto it.
526  */
527 void inet_csk_destroy_sock(struct sock *sk)
528 {
529 	BUG_TRAP(sk->sk_state == TCP_CLOSE);
530 	BUG_TRAP(sock_flag(sk, SOCK_DEAD));
531 
532 	/* It cannot be in hash table! */
533 	BUG_TRAP(sk_unhashed(sk));
534 
535 	/* If it has not 0 inet_sk(sk)->num, it must be bound */
536 	BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
537 
538 	sk->sk_prot->destroy(sk);
539 
540 	sk_stream_kill_queues(sk);
541 
542 	xfrm_sk_free_policy(sk);
543 
544 	sk_refcnt_debug_release(sk);
545 
546 	atomic_dec(sk->sk_prot->orphan_count);
547 	sock_put(sk);
548 }
549 
550 EXPORT_SYMBOL(inet_csk_destroy_sock);
551 
552 int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
553 {
554 	struct inet_sock *inet = inet_sk(sk);
555 	struct inet_connection_sock *icsk = inet_csk(sk);
556 	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
557 
558 	if (rc != 0)
559 		return rc;
560 
561 	sk->sk_max_ack_backlog = 0;
562 	sk->sk_ack_backlog = 0;
563 	inet_csk_delack_init(sk);
564 
565 	/* There is race window here: we announce ourselves listening,
566 	 * but this transition is still not validated by get_port().
567 	 * It is OK, because this socket enters to hash table only
568 	 * after validation is complete.
569 	 */
570 	sk->sk_state = TCP_LISTEN;
571 	if (!sk->sk_prot->get_port(sk, inet->num)) {
572 		inet->sport = htons(inet->num);
573 
574 		sk_dst_reset(sk);
575 		sk->sk_prot->hash(sk);
576 
577 		return 0;
578 	}
579 
580 	sk->sk_state = TCP_CLOSE;
581 	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
582 	return -EADDRINUSE;
583 }
584 
585 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
586 
587 /*
588  *	This routine closes sockets which have been at least partially
589  *	opened, but not yet accepted.
590  */
591 void inet_csk_listen_stop(struct sock *sk)
592 {
593 	struct inet_connection_sock *icsk = inet_csk(sk);
594 	struct request_sock *acc_req;
595 	struct request_sock *req;
596 
597 	inet_csk_delete_keepalive_timer(sk);
598 
599 	/* make all the listen_opt local to us */
600 	acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
601 
602 	/* Following specs, it would be better either to send FIN
603 	 * (and enter FIN-WAIT-1, it is normal close)
604 	 * or to send active reset (abort).
605 	 * Certainly, it is pretty dangerous while synflood, but it is
606 	 * bad justification for our negligence 8)
607 	 * To be honest, we are not able to make either
608 	 * of the variants now.			--ANK
609 	 */
610 	reqsk_queue_destroy(&icsk->icsk_accept_queue);
611 
612 	while ((req = acc_req) != NULL) {
613 		struct sock *child = req->sk;
614 
615 		acc_req = req->dl_next;
616 
617 		local_bh_disable();
618 		bh_lock_sock(child);
619 		BUG_TRAP(!sock_owned_by_user(child));
620 		sock_hold(child);
621 
622 		sk->sk_prot->disconnect(child, O_NONBLOCK);
623 
624 		sock_orphan(child);
625 
626 		atomic_inc(sk->sk_prot->orphan_count);
627 
628 		inet_csk_destroy_sock(child);
629 
630 		bh_unlock_sock(child);
631 		local_bh_enable();
632 		sock_put(child);
633 
634 		sk_acceptq_removed(sk);
635 		__reqsk_free(req);
636 	}
637 	BUG_TRAP(!sk->sk_ack_backlog);
638 }
639 
640 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
641 
642 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
643 {
644 	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
645 	const struct inet_sock *inet = inet_sk(sk);
646 
647 	sin->sin_family		= AF_INET;
648 	sin->sin_addr.s_addr	= inet->daddr;
649 	sin->sin_port		= inet->dport;
650 }
651 
652 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
653 
654 int inet_csk_ctl_sock_create(struct socket **sock, unsigned short family,
655 			     unsigned short type, unsigned char protocol)
656 {
657 	int rc = sock_create_kern(family, type, protocol, sock);
658 
659 	if (rc == 0) {
660 		(*sock)->sk->sk_allocation = GFP_ATOMIC;
661 		inet_sk((*sock)->sk)->uc_ttl = -1;
662 		/*
663 		 * Unhash it so that IP input processing does not even see it,
664 		 * we do not wish this socket to see incoming packets.
665 		 */
666 		(*sock)->sk->sk_prot->unhash((*sock)->sk);
667 	}
668 	return rc;
669 }
670 
671 EXPORT_SYMBOL_GPL(inet_csk_ctl_sock_create);
672 
673 #ifdef CONFIG_COMPAT
674 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
675 			       char __user *optval, int __user *optlen)
676 {
677 	const struct inet_connection_sock *icsk = inet_csk(sk);
678 
679 	if (icsk->icsk_af_ops->compat_getsockopt != NULL)
680 		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
681 							    optval, optlen);
682 	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
683 					     optval, optlen);
684 }
685 
686 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
687 
688 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
689 			       char __user *optval, int optlen)
690 {
691 	const struct inet_connection_sock *icsk = inet_csk(sk);
692 
693 	if (icsk->icsk_af_ops->compat_setsockopt != NULL)
694 		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
695 							    optval, optlen);
696 	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
697 					     optval, optlen);
698 }
699 
700 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
701 #endif
702