xref: /linux/net/rxrpc/skbuff.c (revision 7c66e12136c2fa421ae75497e02728f252108a1b)
1 /* ar-skbuff.c: socket buffer destruction handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <net/sock.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
20 
21 #define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
22 
23 /*
24  * Note the allocation or reception of a socket buffer.
25  */
26 void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
27 {
28 	const void *here = __builtin_return_address(0);
29 	int n = atomic_inc_return(select_skb_count(op));
30 	trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
31 }
32 
33 /*
34  * Note the re-emergence of a socket buffer from a queue or buffer.
35  */
36 void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
37 {
38 	const void *here = __builtin_return_address(0);
39 	if (skb) {
40 		int n = atomic_read(select_skb_count(op));
41 		trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
42 	}
43 }
44 
45 /*
46  * Note the addition of a ref on a socket buffer.
47  */
48 void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
49 {
50 	const void *here = __builtin_return_address(0);
51 	int n = atomic_inc_return(select_skb_count(op));
52 	trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
53 	skb_get(skb);
54 }
55 
56 /*
57  * Note the destruction of a socket buffer.
58  */
59 void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
60 {
61 	const void *here = __builtin_return_address(0);
62 	if (skb) {
63 		int n;
64 		CHECK_SLAB_OKAY(&skb->users);
65 		n = atomic_dec_return(select_skb_count(op));
66 		trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
67 		kfree_skb(skb);
68 	}
69 }
70 
71 /*
72  * Note the injected loss of a socket buffer.
73  */
74 void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
75 {
76 	const void *here = __builtin_return_address(0);
77 	if (skb) {
78 		int n;
79 		CHECK_SLAB_OKAY(&skb->users);
80 		n = atomic_dec_return(select_skb_count(op));
81 		trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
82 		kfree_skb(skb);
83 	}
84 }
85 
86 /*
87  * Clear a queue of socket buffers.
88  */
89 void rxrpc_purge_queue(struct sk_buff_head *list)
90 {
91 	const void *here = __builtin_return_address(0);
92 	struct sk_buff *skb;
93 	while ((skb = skb_dequeue((list))) != NULL) {
94 		int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
95 		trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
96 				atomic_read(&skb->users), n, here);
97 		kfree_skb(skb);
98 	}
99 }
100