1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC Tx data buffering.
3 *
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/slab.h>
11 #include "ar-internal.h"
12
13 static atomic_t rxrpc_txbuf_debug_ids;
14 atomic_t rxrpc_nr_txbuf;
15
16 /*
17 * Allocate and partially initialise a data transmission buffer.
18 */
rxrpc_alloc_data_txbuf(struct rxrpc_call * call,size_t data_size,size_t data_align,gfp_t gfp)19 struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size,
20 size_t data_align, gfp_t gfp)
21 {
22 struct rxrpc_txbuf *txb;
23 size_t total, doff, jsize = sizeof(struct rxrpc_jumbo_header);
24 void *buf;
25
26 txb = kzalloc(sizeof(*txb), gfp);
27 if (!txb)
28 return NULL;
29
30 /* We put a jumbo header in the buffer, but not a full wire header to
31 * avoid delayed-corruption problems with zerocopy.
32 */
33 doff = round_up(jsize, data_align);
34 total = doff + data_size;
35
36 data_align = umax(data_align, L1_CACHE_BYTES);
37 mutex_lock(&call->conn->tx_data_alloc_lock);
38 buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
39 data_align);
40 mutex_unlock(&call->conn->tx_data_alloc_lock);
41 if (!buf) {
42 kfree(txb);
43 return NULL;
44 }
45
46 refcount_set(&txb->ref, 1);
47 txb->call_debug_id = call->debug_id;
48 txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
49 txb->alloc_size = data_size;
50 txb->space = data_size;
51 txb->offset = 0;
52 txb->flags = call->conn->out_clientflag;
53 txb->seq = call->send_top + 1;
54 txb->data = buf + doff;
55
56 trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 1,
57 rxrpc_txbuf_alloc_data);
58
59 atomic_inc(&rxrpc_nr_txbuf);
60 return txb;
61 }
62
rxrpc_get_txbuf(struct rxrpc_txbuf * txb,enum rxrpc_txbuf_trace what)63 void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
64 {
65 int r;
66
67 __refcount_inc(&txb->ref, &r);
68 trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r + 1, what);
69 }
70
rxrpc_see_txbuf(struct rxrpc_txbuf * txb,enum rxrpc_txbuf_trace what)71 void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
72 {
73 int r = refcount_read(&txb->ref);
74
75 trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
76 }
77
rxrpc_free_txbuf(struct rxrpc_txbuf * txb)78 static void rxrpc_free_txbuf(struct rxrpc_txbuf *txb)
79 {
80 trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
81 rxrpc_txbuf_free);
82 if (txb->data)
83 page_frag_free(txb->data);
84 kfree(txb);
85 atomic_dec(&rxrpc_nr_txbuf);
86 }
87
rxrpc_put_txbuf(struct rxrpc_txbuf * txb,enum rxrpc_txbuf_trace what)88 void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
89 {
90 unsigned int debug_id, call_debug_id;
91 rxrpc_seq_t seq;
92 bool dead;
93 int r;
94
95 if (txb) {
96 debug_id = txb->debug_id;
97 call_debug_id = txb->call_debug_id;
98 seq = txb->seq;
99 dead = __refcount_dec_and_test(&txb->ref, &r);
100 trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
101 if (dead)
102 rxrpc_free_txbuf(txb);
103 }
104 }
105