1 /* 2 * net/dccp/minisocks.c 3 * 4 * An implementation of the DCCP protocol 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/dccp.h> 14 #include <linux/skbuff.h> 15 #include <linux/timer.h> 16 17 #include <net/sock.h> 18 #include <net/xfrm.h> 19 #include <net/inet_timewait_sock.h> 20 21 #include "ackvec.h" 22 #include "ccid.h" 23 #include "dccp.h" 24 #include "feat.h" 25 26 struct inet_timewait_death_row dccp_death_row = { 27 .sysctl_max_tw_buckets = NR_FILE * 2, 28 .period = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, 29 .death_lock = SPIN_LOCK_UNLOCKED, 30 .hashinfo = &dccp_hashinfo, 31 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 32 (unsigned long)&dccp_death_row), 33 .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, 34 inet_twdr_twkill_work, 35 &dccp_death_row), 36 /* Short-time timewait calendar */ 37 38 .twcal_hand = -1, 39 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, 40 (unsigned long)&dccp_death_row), 41 }; 42 43 EXPORT_SYMBOL_GPL(dccp_death_row); 44 45 void dccp_time_wait(struct sock *sk, int state, int timeo) 46 { 47 struct inet_timewait_sock *tw = NULL; 48 49 if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets) 50 tw = inet_twsk_alloc(sk, state); 51 52 if (tw != NULL) { 53 const struct inet_connection_sock *icsk = inet_csk(sk); 54 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 55 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 56 if (tw->tw_family == PF_INET6) { 57 const struct ipv6_pinfo *np = inet6_sk(sk); 58 struct inet6_timewait_sock *tw6; 59 60 tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); 61 tw6 = inet6_twsk((struct sock *)tw); 62 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); 63 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); 64 tw->tw_ipv6only = np->ipv6only; 65 } 66 #endif 67 /* Linkage updates. */ 68 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo); 69 70 /* Get the TIME_WAIT timeout firing. */ 71 if (timeo < rto) 72 timeo = rto; 73 74 tw->tw_timeout = DCCP_TIMEWAIT_LEN; 75 if (state == DCCP_TIME_WAIT) 76 timeo = DCCP_TIMEWAIT_LEN; 77 78 inet_twsk_schedule(tw, &dccp_death_row, timeo, 79 DCCP_TIMEWAIT_LEN); 80 inet_twsk_put(tw); 81 } else { 82 /* Sorry, if we're out of memory, just CLOSE this 83 * socket up. We've got bigger problems than 84 * non-graceful socket closings. 85 */ 86 LIMIT_NETDEBUG(KERN_INFO "DCCP: time wait bucket " 87 "table overflow\n"); 88 } 89 90 dccp_done(sk); 91 } 92 93 struct sock *dccp_create_openreq_child(struct sock *sk, 94 const struct request_sock *req, 95 const struct sk_buff *skb) 96 { 97 /* 98 * Step 3: Process LISTEN state 99 * 100 * // Generate a new socket and switch to that socket 101 * Set S := new socket for this port pair 102 */ 103 struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); 104 105 if (newsk != NULL) { 106 const struct dccp_request_sock *dreq = dccp_rsk(req); 107 struct inet_connection_sock *newicsk = inet_csk(sk); 108 struct dccp_sock *newdp = dccp_sk(newsk); 109 struct dccp_minisock *newdmsk = dccp_msk(newsk); 110 111 newdp->dccps_role = DCCP_ROLE_SERVER; 112 newdp->dccps_hc_rx_ackvec = NULL; 113 newdp->dccps_service_list = NULL; 114 newdp->dccps_service = dreq->dreq_service; 115 newicsk->icsk_rto = DCCP_TIMEOUT_INIT; 116 do_gettimeofday(&newdp->dccps_epoch); 117 118 if (dccp_feat_clone(sk, newsk)) 119 goto out_free; 120 121 if (newdmsk->dccpms_send_ack_vector) { 122 newdp->dccps_hc_rx_ackvec = 123 dccp_ackvec_alloc(GFP_ATOMIC); 124 if (unlikely(newdp->dccps_hc_rx_ackvec == NULL)) 125 goto out_free; 126 } 127 128 newdp->dccps_hc_rx_ccid = 129 ccid_hc_rx_new(newdmsk->dccpms_rx_ccid, 130 newsk, GFP_ATOMIC); 131 newdp->dccps_hc_tx_ccid = 132 ccid_hc_tx_new(newdmsk->dccpms_tx_ccid, 133 newsk, GFP_ATOMIC); 134 if (unlikely(newdp->dccps_hc_rx_ccid == NULL || 135 newdp->dccps_hc_tx_ccid == NULL)) { 136 dccp_ackvec_free(newdp->dccps_hc_rx_ackvec); 137 ccid_hc_rx_delete(newdp->dccps_hc_rx_ccid, newsk); 138 ccid_hc_tx_delete(newdp->dccps_hc_tx_ccid, newsk); 139 out_free: 140 /* It is still raw copy of parent, so invalidate 141 * destructor and make plain sk_free() */ 142 newsk->sk_destruct = NULL; 143 sk_free(newsk); 144 return NULL; 145 } 146 147 /* 148 * Step 3: Process LISTEN state 149 * 150 * Choose S.ISS (initial seqno) or set from Init Cookie 151 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init 152 * Cookie 153 */ 154 155 /* See dccp_v4_conn_request */ 156 newdmsk->dccpms_sequence_window = req->rcv_wnd; 157 158 newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr; 159 dccp_update_gsr(newsk, dreq->dreq_isr); 160 161 newdp->dccps_iss = dreq->dreq_iss; 162 dccp_update_gss(newsk, dreq->dreq_iss); 163 164 /* 165 * SWL and AWL are initially adjusted so that they are not less than 166 * the initial Sequence Numbers received and sent, respectively: 167 * SWL := max(GSR + 1 - floor(W/4), ISR), 168 * AWL := max(GSS - W' + 1, ISS). 169 * These adjustments MUST be applied only at the beginning of the 170 * connection. 171 */ 172 dccp_set_seqno(&newdp->dccps_swl, 173 max48(newdp->dccps_swl, newdp->dccps_isr)); 174 dccp_set_seqno(&newdp->dccps_awl, 175 max48(newdp->dccps_awl, newdp->dccps_iss)); 176 177 dccp_init_xmit_timers(newsk); 178 179 DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS); 180 } 181 return newsk; 182 } 183 184 EXPORT_SYMBOL_GPL(dccp_create_openreq_child); 185 186 /* 187 * Process an incoming packet for RESPOND sockets represented 188 * as an request_sock. 189 */ 190 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, 191 struct request_sock *req, 192 struct request_sock **prev) 193 { 194 struct sock *child = NULL; 195 196 /* Check for retransmitted REQUEST */ 197 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 198 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, 199 dccp_rsk(req)->dreq_isr)) { 200 struct dccp_request_sock *dreq = dccp_rsk(req); 201 202 dccp_pr_debug("Retransmitted REQUEST\n"); 203 /* Send another RESPONSE packet */ 204 dccp_set_seqno(&dreq->dreq_iss, dreq->dreq_iss + 1); 205 dccp_set_seqno(&dreq->dreq_isr, 206 DCCP_SKB_CB(skb)->dccpd_seq); 207 req->rsk_ops->rtx_syn_ack(sk, req, NULL); 208 } 209 /* Network Duplicate, discard packet */ 210 return NULL; 211 } 212 213 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 214 215 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK && 216 dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK) 217 goto drop; 218 219 /* Invalid ACK */ 220 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dccp_rsk(req)->dreq_iss) { 221 dccp_pr_debug("Invalid ACK number: ack_seq=%llu, " 222 "dreq_iss=%llu\n", 223 (unsigned long long) 224 DCCP_SKB_CB(skb)->dccpd_ack_seq, 225 (unsigned long long) 226 dccp_rsk(req)->dreq_iss); 227 goto drop; 228 } 229 230 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); 231 if (child == NULL) 232 goto listen_overflow; 233 234 /* FIXME: deal with options */ 235 236 inet_csk_reqsk_queue_unlink(sk, req, prev); 237 inet_csk_reqsk_queue_removed(sk, req); 238 inet_csk_reqsk_queue_add(sk, req, child); 239 out: 240 return child; 241 listen_overflow: 242 dccp_pr_debug("listen_overflow!\n"); 243 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 244 drop: 245 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) 246 req->rsk_ops->send_reset(skb); 247 248 inet_csk_reqsk_queue_drop(sk, req, prev); 249 goto out; 250 } 251 252 EXPORT_SYMBOL_GPL(dccp_check_req); 253 254 /* 255 * Queue segment on the new socket if the new socket is active, 256 * otherwise we just shortcircuit this and continue with 257 * the new socket. 258 */ 259 int dccp_child_process(struct sock *parent, struct sock *child, 260 struct sk_buff *skb) 261 { 262 int ret = 0; 263 const int state = child->sk_state; 264 265 if (!sock_owned_by_user(child)) { 266 ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb), 267 skb->len); 268 269 /* Wakeup parent, send SIGIO */ 270 if (state == DCCP_RESPOND && child->sk_state != state) 271 parent->sk_data_ready(parent, 0); 272 } else { 273 /* Alas, it is possible again, because we do lookup 274 * in main socket hash table and lock on listening 275 * socket does not protect us more. 276 */ 277 sk_add_backlog(child, skb); 278 } 279 280 bh_unlock_sock(child); 281 sock_put(child); 282 return ret; 283 } 284 285 EXPORT_SYMBOL_GPL(dccp_child_process); 286