tcp_minisocks.c (65f27f38446e1976cc98fd3004b110fedcddd189) tcp_minisocks.c (cfb6eeb4c860592edd123fdea908d23c6ad1c7dc)
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $

--- 31 unchanged lines hidden (view full) ---

40struct inet_timewait_death_row tcp_death_row = {
41 .sysctl_max_tw_buckets = NR_FILE * 2,
42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
43 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
44 .hashinfo = &tcp_hashinfo,
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row),
47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $

--- 31 unchanged lines hidden (view full) ---

40struct inet_timewait_death_row tcp_death_row = {
41 .sysctl_max_tw_buckets = NR_FILE * 2,
42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
43 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
44 .hashinfo = &tcp_hashinfo,
45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
46 (unsigned long)&tcp_death_row),
47 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
48 inet_twdr_twkill_work),
48 inet_twdr_twkill_work,
49 &tcp_death_row),
49/* Short-time timewait calendar */
50
51 .twcal_hand = -1,
52 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
53 (unsigned long)&tcp_death_row),
54};
55
56EXPORT_SYMBOL_GPL(tcp_death_row);

--- 243 unchanged lines hidden (view full) ---

300
301 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
302 tw6 = inet6_twsk((struct sock *)tw);
303 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
304 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
305 tw->tw_ipv6only = np->ipv6only;
306 }
307#endif
50/* Short-time timewait calendar */
51
52 .twcal_hand = -1,
53 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
54 (unsigned long)&tcp_death_row),
55};
56
57EXPORT_SYMBOL_GPL(tcp_death_row);

--- 243 unchanged lines hidden (view full) ---

301
302 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
303 tw6 = inet6_twsk((struct sock *)tw);
304 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
305 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
306 tw->tw_ipv6only = np->ipv6only;
307 }
308#endif
309
310#ifdef CONFIG_TCP_MD5SIG
311 /*
312 * The timewait bucket does not have the key DB from the
313 * sock structure. We just make a quick copy of the
314 * md5 key being used (if indeed we are using one)
315 * so the timewait ack generating code has the key.
316 */
317 do {
318 struct tcp_md5sig_key *key;
319 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
320 tcptw->tw_md5_keylen = 0;
321 key = tp->af_specific->md5_lookup(sk, sk);
322 if (key != NULL) {
323 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
324 tcptw->tw_md5_keylen = key->keylen;
325 if (tcp_alloc_md5sig_pool() == NULL)
326 BUG();
327 }
328 } while(0);
329#endif
330
308 /* Linkage updates. */
309 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
310
311 /* Get the TIME_WAIT timeout firing. */
312 if (timeo < rto)
313 timeo = rto;
314
315 if (recycle_ok) {

--- 15 unchanged lines hidden (view full) ---

331 if (net_ratelimit())
332 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
333 }
334
335 tcp_update_metrics(sk);
336 tcp_done(sk);
337}
338
331 /* Linkage updates. */
332 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
333
334 /* Get the TIME_WAIT timeout firing. */
335 if (timeo < rto)
336 timeo = rto;
337
338 if (recycle_ok) {

--- 15 unchanged lines hidden (view full) ---

354 if (net_ratelimit())
355 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
356 }
357
358 tcp_update_metrics(sk);
359 tcp_done(sk);
360}
361
362void tcp_twsk_destructor(struct sock *sk)
363{
364 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
365#ifdef CONFIG_TCP_MD5SIG
366 if (twsk->tw_md5_keylen)
367 tcp_put_md5sig_pool();
368#endif
369}
370
371EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
372
339/* This is not only more efficient than what we used to do, it eliminates
340 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
341 *
342 * Actually, we could lots of memory writes here. tp of listening
343 * socket contains all necessary default parameters.
344 */
345struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
346{

--- 82 unchanged lines hidden (view full) ---

429 if (newtp->rx_opt.tstamp_ok) {
430 newtp->rx_opt.ts_recent = req->ts_recent;
431 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
432 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
433 } else {
434 newtp->rx_opt.ts_recent_stamp = 0;
435 newtp->tcp_header_len = sizeof(struct tcphdr);
436 }
373/* This is not only more efficient than what we used to do, it eliminates
374 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
375 *
376 * Actually, we could lots of memory writes here. tp of listening
377 * socket contains all necessary default parameters.
378 */
379struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
380{

--- 82 unchanged lines hidden (view full) ---

463 if (newtp->rx_opt.tstamp_ok) {
464 newtp->rx_opt.ts_recent = req->ts_recent;
465 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
466 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
467 } else {
468 newtp->rx_opt.ts_recent_stamp = 0;
469 newtp->tcp_header_len = sizeof(struct tcphdr);
470 }
471#ifdef CONFIG_TCP_MD5SIG
472 newtp->md5sig_info = NULL; /*XXX*/
473 if (newtp->af_specific->md5_lookup(sk, newsk))
474 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
475#endif
437 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
438 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
439 newtp->rx_opt.mss_clamp = req->mss;
440 TCP_ECN_openreq_child(newtp, req);
441
442 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
443 }
444 return newsk;

--- 166 unchanged lines hidden (view full) ---

611 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
612 * ESTABLISHED STATE. If it will be dropped after
613 * socket is created, wait for troubles.
614 */
615 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
616 req, NULL);
617 if (child == NULL)
618 goto listen_overflow;
476 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
477 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
478 newtp->rx_opt.mss_clamp = req->mss;
479 TCP_ECN_openreq_child(newtp, req);
480
481 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
482 }
483 return newsk;

--- 166 unchanged lines hidden (view full) ---

650 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
651 * ESTABLISHED STATE. If it will be dropped after
652 * socket is created, wait for troubles.
653 */
654 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
655 req, NULL);
656 if (child == NULL)
657 goto listen_overflow;
658#ifdef CONFIG_TCP_MD5SIG
659 else {
660 /* Copy over the MD5 key from the original socket */
661 struct tcp_md5sig_key *key;
662 struct tcp_sock *tp = tcp_sk(sk);
663 key = tp->af_specific->md5_lookup(sk, child);
664 if (key != NULL) {
665 /*
666 * We're using one, so create a matching key on the
667 * newsk structure. If we fail to get memory then we
668 * end up not copying the key across. Shucks.
669 */
670 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
671 if (newkey) {
672 if (!tcp_alloc_md5sig_pool())
673 BUG();
674 memcpy(newkey, key->key, key->keylen);
675 tp->af_specific->md5_add(child, child,
676 newkey,
677 key->keylen);
678 }
679 }
680 }
681#endif
619
620 inet_csk_reqsk_queue_unlink(sk, req, prev);
621 inet_csk_reqsk_queue_removed(sk, req);
622
623 inet_csk_reqsk_queue_add(sk, req, child);
624 return child;
625
626 listen_overflow:
627 if (!sysctl_tcp_abort_on_overflow) {
628 inet_rsk(req)->acked = 1;
629 return NULL;
630 }
631
632 embryonic_reset:
633 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
634 if (!(flg & TCP_FLAG_RST))
682
683 inet_csk_reqsk_queue_unlink(sk, req, prev);
684 inet_csk_reqsk_queue_removed(sk, req);
685
686 inet_csk_reqsk_queue_add(sk, req, child);
687 return child;
688
689 listen_overflow:
690 if (!sysctl_tcp_abort_on_overflow) {
691 inet_rsk(req)->acked = 1;
692 return NULL;
693 }
694
695 embryonic_reset:
696 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
697 if (!(flg & TCP_FLAG_RST))
635 req->rsk_ops->send_reset(skb);
698 req->rsk_ops->send_reset(sk, skb);
636
637 inet_csk_reqsk_queue_drop(sk, req, prev);
638 return NULL;
639}
640
641/*
642 * Queue segment on the new socket if the new socket is active,
643 * otherwise we just shortcircuit this and continue with

--- 32 unchanged lines hidden ---
699
700 inet_csk_reqsk_queue_drop(sk, req, prev);
701 return NULL;
702}
703
704/*
705 * Queue segment on the new socket if the new socket is active,
706 * otherwise we just shortcircuit this and continue with

--- 32 unchanged lines hidden ---