1 /* 2 * net/dccp/packet_history.c 3 * 4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK 5 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand. 6 * 7 * An implementation of the DCCP protocol 8 * 9 * This code has been developed by the University of Waikato WAND 10 * research group. For further information please see http://www.wand.net.nz/ 11 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz 12 * 13 * This code also uses code from Lulea University, rereleased as GPL by its 14 * authors: 15 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 16 * 17 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft 18 * and to make it work as a loadable module in the DCCP stack written by 19 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>. 20 * 21 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 36 */ 37 38 #include <linux/string.h> 39 #include <linux/slab.h> 40 #include "packet_history.h" 41 #include "../../dccp.h" 42 43 /** 44 * tfrc_tx_hist_entry - Simple singly-linked TX history list 45 * @next: next oldest entry (LIFO order) 46 * @seqno: sequence number of this entry 47 * @stamp: send time of packet with sequence number @seqno 48 */ 49 struct tfrc_tx_hist_entry { 50 struct tfrc_tx_hist_entry *next; 51 u64 seqno; 52 ktime_t stamp; 53 }; 54 55 /* 56 * Transmitter History Routines 57 */ 58 static struct kmem_cache *tfrc_tx_hist_slab; 59 60 int __init tfrc_tx_packet_history_init(void) 61 { 62 tfrc_tx_hist_slab = kmem_cache_create("tfrc_tx_hist", 63 sizeof(struct tfrc_tx_hist_entry), 64 0, SLAB_HWCACHE_ALIGN, NULL); 65 return tfrc_tx_hist_slab == NULL ? -ENOBUFS : 0; 66 } 67 68 void tfrc_tx_packet_history_exit(void) 69 { 70 if (tfrc_tx_hist_slab != NULL) { 71 kmem_cache_destroy(tfrc_tx_hist_slab); 72 tfrc_tx_hist_slab = NULL; 73 } 74 } 75 76 static struct tfrc_tx_hist_entry * 77 tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno) 78 { 79 while (head != NULL && head->seqno != seqno) 80 head = head->next; 81 82 return head; 83 } 84 85 int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno) 86 { 87 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any()); 88 89 if (entry == NULL) 90 return -ENOBUFS; 91 entry->seqno = seqno; 92 entry->stamp = ktime_get_real(); 93 entry->next = *headp; 94 *headp = entry; 95 return 0; 96 } 97 EXPORT_SYMBOL_GPL(tfrc_tx_hist_add); 98 99 void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) 100 { 101 struct tfrc_tx_hist_entry *head = *headp; 102 103 while (head != NULL) { 104 struct tfrc_tx_hist_entry *next = head->next; 105 106 kmem_cache_free(tfrc_tx_hist_slab, head); 107 head = next; 108 } 109 110 *headp = NULL; 111 } 112 EXPORT_SYMBOL_GPL(tfrc_tx_hist_purge); 113 114 u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, 115 const ktime_t now) 116 { 117 u32 rtt = 0; 118 struct tfrc_tx_hist_entry *packet = tfrc_tx_hist_find_entry(head, seqno); 119 120 if (packet != NULL) { 121 rtt = ktime_us_delta(now, packet->stamp); 122 /* 123 * Garbage-collect older (irrelevant) entries: 124 */ 125 tfrc_tx_hist_purge(&packet->next); 126 } 127 128 return rtt; 129 } 130 EXPORT_SYMBOL_GPL(tfrc_tx_hist_rtt); 131 132 133 /* 134 * Receiver History Routines 135 */ 136 static struct kmem_cache *tfrc_rx_hist_slab; 137 138 int __init tfrc_rx_packet_history_init(void) 139 { 140 tfrc_rx_hist_slab = kmem_cache_create("tfrc_rxh_cache", 141 sizeof(struct tfrc_rx_hist_entry), 142 0, SLAB_HWCACHE_ALIGN, NULL); 143 return tfrc_rx_hist_slab == NULL ? -ENOBUFS : 0; 144 } 145 146 void tfrc_rx_packet_history_exit(void) 147 { 148 if (tfrc_rx_hist_slab != NULL) { 149 kmem_cache_destroy(tfrc_rx_hist_slab); 150 tfrc_rx_hist_slab = NULL; 151 } 152 } 153 154 static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, 155 const struct sk_buff *skb, 156 const u64 ndp) 157 { 158 const struct dccp_hdr *dh = dccp_hdr(skb); 159 160 entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq; 161 entry->tfrchrx_ccval = dh->dccph_ccval; 162 entry->tfrchrx_type = dh->dccph_type; 163 entry->tfrchrx_ndp = ndp; 164 entry->tfrchrx_tstamp = ktime_get_real(); 165 } 166 167 void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, 168 const struct sk_buff *skb, 169 const u64 ndp) 170 { 171 struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h); 172 173 tfrc_rx_hist_entry_from_skb(entry, skb, ndp); 174 } 175 EXPORT_SYMBOL_GPL(tfrc_rx_hist_add_packet); 176 177 /* has the packet contained in skb been seen before? */ 178 int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) 179 { 180 const u64 seq = DCCP_SKB_CB(skb)->dccpd_seq; 181 int i; 182 183 if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0) 184 return 1; 185 186 for (i = 1; i <= h->loss_count; i++) 187 if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq) 188 return 1; 189 190 return 0; 191 } 192 EXPORT_SYMBOL_GPL(tfrc_rx_hist_duplicate); 193 194 static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) 195 { 196 const u8 idx_a = tfrc_rx_hist_index(h, a), 197 idx_b = tfrc_rx_hist_index(h, b); 198 struct tfrc_rx_hist_entry *tmp = h->ring[idx_a]; 199 200 h->ring[idx_a] = h->ring[idx_b]; 201 h->ring[idx_b] = tmp; 202 } 203 204 /* 205 * Private helper functions for loss detection. 206 * 207 * In the descriptions, `Si' refers to the sequence number of entry number i, 208 * whose NDP count is `Ni' (lower case is used for variables). 209 * Note: All __xxx_loss functions expect that a test against duplicates has been 210 * performed already: the seqno of the skb must not be less than the seqno 211 * of loss_prev; and it must not equal that of any valid history entry. 212 */ 213 static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1) 214 { 215 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 216 s1 = DCCP_SKB_CB(skb)->dccpd_seq; 217 218 if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */ 219 h->loss_count = 1; 220 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1); 221 } 222 } 223 224 static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2) 225 { 226 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 227 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 228 s2 = DCCP_SKB_CB(skb)->dccpd_seq; 229 230 if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */ 231 h->loss_count = 2; 232 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2); 233 return; 234 } 235 236 /* S0 < S2 < S1 */ 237 238 if (dccp_loss_free(s0, s2, n2)) { 239 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; 240 241 if (dccp_loss_free(s2, s1, n1)) { 242 /* hole is filled: S0, S2, and S1 are consecutive */ 243 h->loss_count = 0; 244 h->loss_start = tfrc_rx_hist_index(h, 1); 245 } else 246 /* gap between S2 and S1: just update loss_prev */ 247 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2); 248 249 } else { /* gap between S0 and S2 */ 250 /* 251 * Reorder history to insert S2 between S0 and S1 252 */ 253 tfrc_rx_hist_swap(h, 0, 3); 254 h->loss_start = tfrc_rx_hist_index(h, 3); 255 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2); 256 h->loss_count = 2; 257 } 258 } 259 260 /* return 1 if a new loss event has been identified */ 261 static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) 262 { 263 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 264 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 265 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, 266 s3 = DCCP_SKB_CB(skb)->dccpd_seq; 267 268 if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */ 269 h->loss_count = 3; 270 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3); 271 return 1; 272 } 273 274 /* S3 < S2 */ 275 276 if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */ 277 /* 278 * Reorder history to insert S3 between S1 and S2 279 */ 280 tfrc_rx_hist_swap(h, 2, 3); 281 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3); 282 h->loss_count = 3; 283 return 1; 284 } 285 286 /* S0 < S3 < S1 */ 287 288 if (dccp_loss_free(s0, s3, n3)) { 289 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; 290 291 if (dccp_loss_free(s3, s1, n1)) { 292 /* hole between S0 and S1 filled by S3 */ 293 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp; 294 295 if (dccp_loss_free(s1, s2, n2)) { 296 /* entire hole filled by S0, S3, S1, S2 */ 297 h->loss_start = tfrc_rx_hist_index(h, 2); 298 h->loss_count = 0; 299 } else { 300 /* gap remains between S1 and S2 */ 301 h->loss_start = tfrc_rx_hist_index(h, 1); 302 h->loss_count = 1; 303 } 304 305 } else /* gap exists between S3 and S1, loss_count stays at 2 */ 306 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3); 307 308 return 0; 309 } 310 311 /* 312 * The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3 313 * Reorder history to insert S3 between S0 and S1. 314 */ 315 tfrc_rx_hist_swap(h, 0, 3); 316 h->loss_start = tfrc_rx_hist_index(h, 3); 317 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3); 318 h->loss_count = 3; 319 320 return 1; 321 } 322 323 /* recycle RX history records to continue loss detection if necessary */ 324 static void __three_after_loss(struct tfrc_rx_hist *h) 325 { 326 /* 327 * At this stage we know already that there is a gap between S0 and S1 328 * (since S0 was the highest sequence number received before detecting 329 * the loss). To recycle the loss record, it is thus only necessary to 330 * check for other possible gaps between S1/S2 and between S2/S3. 331 */ 332 u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 333 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, 334 s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno; 335 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp, 336 n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp; 337 338 if (dccp_loss_free(s1, s2, n2)) { 339 340 if (dccp_loss_free(s2, s3, n3)) { 341 /* no gap between S2 and S3: entire hole is filled */ 342 h->loss_start = tfrc_rx_hist_index(h, 3); 343 h->loss_count = 0; 344 } else { 345 /* gap between S2 and S3 */ 346 h->loss_start = tfrc_rx_hist_index(h, 2); 347 h->loss_count = 1; 348 } 349 350 } else { /* gap between S1 and S2 */ 351 h->loss_start = tfrc_rx_hist_index(h, 1); 352 h->loss_count = 2; 353 } 354 } 355 356 /** 357 * tfrc_rx_handle_loss - Loss detection and further processing 358 * @h: The non-empty RX history object 359 * @lh: Loss Intervals database to update 360 * @skb: Currently received packet 361 * @ndp: The NDP count belonging to @skb 362 * @calc_first_li: Caller-dependent computation of first loss interval in @lh 363 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add) 364 * Chooses action according to pending loss, updates LI database when a new 365 * loss was detected, and does required post-processing. Returns 1 when caller 366 * should send feedback, 0 otherwise. 367 * Since it also takes care of reordering during loss detection and updates the 368 * records accordingly, the caller should not perform any more RX history 369 * operations when loss_count is greater than 0 after calling this function. 370 */ 371 int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, 372 struct tfrc_loss_hist *lh, 373 struct sk_buff *skb, const u64 ndp, 374 u32 (*calc_first_li)(struct sock *), struct sock *sk) 375 { 376 int is_new_loss = 0; 377 378 if (h->loss_count == 0) { 379 __do_track_loss(h, skb, ndp); 380 } else if (h->loss_count == 1) { 381 __one_after_loss(h, skb, ndp); 382 } else if (h->loss_count != 2) { 383 DCCP_BUG("invalid loss_count %d", h->loss_count); 384 } else if (__two_after_loss(h, skb, ndp)) { 385 /* 386 * Update Loss Interval database and recycle RX records 387 */ 388 is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk); 389 __three_after_loss(h); 390 } 391 return is_new_loss; 392 } 393 EXPORT_SYMBOL_GPL(tfrc_rx_handle_loss); 394 395 int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) 396 { 397 int i; 398 399 for (i = 0; i <= TFRC_NDUPACK; i++) { 400 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); 401 if (h->ring[i] == NULL) 402 goto out_free; 403 } 404 405 h->loss_count = h->loss_start = 0; 406 return 0; 407 408 out_free: 409 while (i-- != 0) { 410 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); 411 h->ring[i] = NULL; 412 } 413 return -ENOBUFS; 414 } 415 EXPORT_SYMBOL_GPL(tfrc_rx_hist_alloc); 416 417 void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) 418 { 419 int i; 420 421 for (i = 0; i <= TFRC_NDUPACK; ++i) 422 if (h->ring[i] != NULL) { 423 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); 424 h->ring[i] = NULL; 425 } 426 } 427 EXPORT_SYMBOL_GPL(tfrc_rx_hist_purge); 428 429 /** 430 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against 431 */ 432 static inline struct tfrc_rx_hist_entry * 433 tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h) 434 { 435 return h->ring[0]; 436 } 437 438 /** 439 * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry 440 */ 441 static inline struct tfrc_rx_hist_entry * 442 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h) 443 { 444 return h->ring[h->rtt_sample_prev]; 445 } 446 447 /** 448 * tfrc_rx_hist_sample_rtt - Sample RTT from timestamp / CCVal 449 * Based on ideas presented in RFC 4342, 8.1. Returns 0 if it was not able 450 * to compute a sample with given data - calling function should check this. 451 */ 452 u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb) 453 { 454 u32 sample = 0, 455 delta_v = SUB16(dccp_hdr(skb)->dccph_ccval, 456 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); 457 458 if (delta_v < 1 || delta_v > 4) { /* unsuitable CCVal delta */ 459 if (h->rtt_sample_prev == 2) { /* previous candidate stored */ 460 sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, 461 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); 462 if (sample) 463 sample = 4 / sample * 464 ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp, 465 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp); 466 else /* 467 * FIXME: This condition is in principle not 468 * possible but occurs when CCID is used for 469 * two-way data traffic. I have tried to trace 470 * it, but the cause does not seem to be here. 471 */ 472 DCCP_BUG("please report to dccp@vger.kernel.org" 473 " => prev = %u, last = %u", 474 tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, 475 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); 476 } else if (delta_v < 1) { 477 h->rtt_sample_prev = 1; 478 goto keep_ref_for_next_time; 479 } 480 481 } else if (delta_v == 4) /* optimal match */ 482 sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp)); 483 else { /* suboptimal match */ 484 h->rtt_sample_prev = 2; 485 goto keep_ref_for_next_time; 486 } 487 488 if (unlikely(sample > DCCP_SANE_RTT_MAX)) { 489 DCCP_WARN("RTT sample %u too large, using max\n", sample); 490 sample = DCCP_SANE_RTT_MAX; 491 } 492 493 h->rtt_sample_prev = 0; /* use current entry as next reference */ 494 keep_ref_for_next_time: 495 496 return sample; 497 } 498 EXPORT_SYMBOL_GPL(tfrc_rx_hist_sample_rtt); 499