xref: /linux/net/rds/tcp_recv.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /*
2  * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <net/tcp.h>
36 #include <trace/events/sock.h>
37 
38 #include "rds.h"
39 #include "tcp.h"
40 
41 static struct kmem_cache *rds_tcp_incoming_slab;
42 
43 static void rds_tcp_inc_purge(struct rds_incoming *inc)
44 {
45 	struct rds_tcp_incoming *tinc;
46 	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
47 	rdsdebug("purging tinc %p inc %p\n", tinc, inc);
48 	skb_queue_purge(&tinc->ti_skb_list);
49 }
50 
51 void rds_tcp_inc_free(struct rds_incoming *inc)
52 {
53 	struct rds_tcp_incoming *tinc;
54 	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
55 	rds_tcp_inc_purge(inc);
56 	rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
57 	kmem_cache_free(rds_tcp_incoming_slab, tinc);
58 }
59 
60 /*
61  * this is pretty lame, but, whatever.
62  */
63 int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
64 {
65 	struct rds_tcp_incoming *tinc;
66 	struct sk_buff *skb;
67 	int ret = 0;
68 
69 	if (!iov_iter_count(to))
70 		goto out;
71 
72 	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
73 
74 	skb_queue_walk(&tinc->ti_skb_list, skb) {
75 		unsigned long to_copy, skb_off;
76 		for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
77 			to_copy = iov_iter_count(to);
78 			to_copy = min(to_copy, skb->len - skb_off);
79 
80 			if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
81 				return -EFAULT;
82 
83 			rds_stats_add(s_copy_to_user, to_copy);
84 			ret += to_copy;
85 
86 			if (!iov_iter_count(to))
87 				goto out;
88 		}
89 	}
90 out:
91 	return ret;
92 }
93 
94 /*
95  * We have a series of skbs that have fragmented pieces of the congestion
96  * bitmap.  They must add up to the exact size of the congestion bitmap.  We
97  * use the skb helpers to copy those into the pages that make up the in-memory
98  * congestion bitmap for the remote address of this connection.  We then tell
99  * the congestion core that the bitmap has been changed so that it can wake up
100  * sleepers.
101  *
102  * This is racing with sending paths which are using test_bit to see if the
103  * bitmap indicates that their recipient is congested.
104  */
105 
106 static void rds_tcp_cong_recv(struct rds_connection *conn,
107 			      struct rds_tcp_incoming *tinc)
108 {
109 	struct sk_buff *skb;
110 	unsigned int to_copy, skb_off;
111 	unsigned int map_off;
112 	unsigned int map_page;
113 	struct rds_cong_map *map;
114 	int ret;
115 
116 	/* catch completely corrupt packets */
117 	if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
118 		return;
119 
120 	map_page = 0;
121 	map_off = 0;
122 	map = conn->c_fcong;
123 
124 	skb_queue_walk(&tinc->ti_skb_list, skb) {
125 		skb_off = 0;
126 		while (skb_off < skb->len) {
127 			to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
128 					skb->len - skb_off);
129 
130 			BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
131 
132 			/* only returns 0 or -error */
133 			ret = skb_copy_bits(skb, skb_off,
134 				(void *)map->m_page_addrs[map_page] + map_off,
135 				to_copy);
136 			BUG_ON(ret != 0);
137 
138 			skb_off += to_copy;
139 			map_off += to_copy;
140 			if (map_off == PAGE_SIZE) {
141 				map_off = 0;
142 				map_page++;
143 			}
144 		}
145 	}
146 
147 	rds_cong_map_updated(map, ~(u64) 0);
148 }
149 
150 struct rds_tcp_desc_arg {
151 	struct rds_conn_path *conn_path;
152 	gfp_t gfp;
153 };
154 
155 static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
156 			     unsigned int offset, size_t len)
157 {
158 	struct rds_tcp_desc_arg *arg = desc->arg.data;
159 	struct rds_conn_path *cp = arg->conn_path;
160 	struct rds_tcp_connection *tc = cp->cp_transport_data;
161 	struct rds_tcp_incoming *tinc = tc->t_tinc;
162 	struct sk_buff *clone;
163 	size_t left = len, to_copy;
164 
165 	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
166 		 len);
167 
168 	/*
169 	 * tcp_read_sock() interprets partial progress as an indication to stop
170 	 * processing.
171 	 */
172 	while (left) {
173 		if (!tinc) {
174 			tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
175 						arg->gfp);
176 			if (!tinc) {
177 				desc->error = -ENOMEM;
178 				goto out;
179 			}
180 			tc->t_tinc = tinc;
181 			rdsdebug("allocated tinc %p\n", tinc);
182 			rds_inc_path_init(&tinc->ti_inc, cp,
183 					  &cp->cp_conn->c_faddr);
184 			tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
185 					local_clock();
186 
187 			/*
188 			 * XXX * we might be able to use the __ variants when
189 			 * we've already serialized at a higher level.
190 			 */
191 			skb_queue_head_init(&tinc->ti_skb_list);
192 		}
193 
194 		if (left && tc->t_tinc_hdr_rem) {
195 			to_copy = min(tc->t_tinc_hdr_rem, left);
196 			rdsdebug("copying %zu header from skb %p\n", to_copy,
197 				 skb);
198 			skb_copy_bits(skb, offset,
199 				      (char *)&tinc->ti_inc.i_hdr +
200 						sizeof(struct rds_header) -
201 						tc->t_tinc_hdr_rem,
202 				      to_copy);
203 			tc->t_tinc_hdr_rem -= to_copy;
204 			left -= to_copy;
205 			offset += to_copy;
206 
207 			if (tc->t_tinc_hdr_rem == 0) {
208 				/* could be 0 for a 0 len message */
209 				tc->t_tinc_data_rem =
210 					be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
211 				tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
212 					local_clock();
213 			}
214 		}
215 
216 		if (left && tc->t_tinc_data_rem) {
217 			to_copy = min(tc->t_tinc_data_rem, left);
218 
219 			clone = pskb_extract(skb, offset, to_copy, arg->gfp);
220 			if (!clone) {
221 				desc->error = -ENOMEM;
222 				goto out;
223 			}
224 
225 			skb_queue_tail(&tinc->ti_skb_list, clone);
226 
227 			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
228 				 "clone %p data %p len %d\n",
229 				 skb, skb->data, skb->len, offset, to_copy,
230 				 clone, clone->data, clone->len);
231 
232 			tc->t_tinc_data_rem -= to_copy;
233 			left -= to_copy;
234 			offset += to_copy;
235 		}
236 
237 		if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
238 			struct rds_connection *conn = cp->cp_conn;
239 
240 			if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
241 				rds_tcp_cong_recv(conn, tinc);
242 			else
243 				rds_recv_incoming(conn, &conn->c_faddr,
244 						  &conn->c_laddr,
245 						  &tinc->ti_inc,
246 						  arg->gfp);
247 
248 			tc->t_tinc_hdr_rem = sizeof(struct rds_header);
249 			tc->t_tinc_data_rem = 0;
250 			tc->t_tinc = NULL;
251 			rds_inc_put(&tinc->ti_inc);
252 			tinc = NULL;
253 		}
254 	}
255 out:
256 	rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
257 		 len, left, skb->len,
258 		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
259 	return len - left;
260 }
261 
262 /* the caller has to hold the sock lock */
263 static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
264 {
265 	struct rds_tcp_connection *tc = cp->cp_transport_data;
266 	struct socket *sock = tc->t_sock;
267 	read_descriptor_t desc;
268 	struct rds_tcp_desc_arg arg;
269 
270 	/* It's like glib in the kernel! */
271 	arg.conn_path = cp;
272 	arg.gfp = gfp;
273 	desc.arg.data = &arg;
274 	desc.error = 0;
275 	desc.count = 1; /* give more than one skb per call */
276 
277 	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
278 	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
279 		 desc.error);
280 
281 	return desc.error;
282 }
283 
284 /*
285  * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
286  * data_ready.
287  *
288  * if we fail to allocate we're in trouble.. blindly wait some time before
289  * trying again to see if the VM can free up something for us.
290  */
291 int rds_tcp_recv_path(struct rds_conn_path *cp)
292 {
293 	struct rds_tcp_connection *tc = cp->cp_transport_data;
294 	struct socket *sock = tc->t_sock;
295 	int ret = 0;
296 
297 	rdsdebug("recv worker path [%d] tc %p sock %p\n",
298 		 cp->cp_index, tc, sock);
299 
300 	lock_sock(sock->sk);
301 	ret = rds_tcp_read_sock(cp, GFP_KERNEL);
302 	release_sock(sock->sk);
303 
304 	return ret;
305 }
306 
307 void rds_tcp_data_ready(struct sock *sk)
308 {
309 	void (*ready)(struct sock *sk);
310 	struct rds_conn_path *cp;
311 	struct rds_tcp_connection *tc;
312 
313 	trace_sk_data_ready(sk);
314 	rdsdebug("data ready sk %p\n", sk);
315 
316 	read_lock_bh(&sk->sk_callback_lock);
317 	cp = sk->sk_user_data;
318 	if (!cp) { /* check for teardown race */
319 		ready = sk->sk_data_ready;
320 		goto out;
321 	}
322 
323 	tc = cp->cp_transport_data;
324 	ready = tc->t_orig_data_ready;
325 	rds_tcp_stats_inc(s_tcp_data_ready_calls);
326 
327 	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
328 		rcu_read_lock();
329 		if (!rds_destroy_pending(cp->cp_conn))
330 			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
331 		rcu_read_unlock();
332 	}
333 out:
334 	read_unlock_bh(&sk->sk_callback_lock);
335 	ready(sk);
336 }
337 
338 int rds_tcp_recv_init(void)
339 {
340 	rds_tcp_incoming_slab = KMEM_CACHE(rds_tcp_incoming, 0);
341 	if (!rds_tcp_incoming_slab)
342 		return -ENOMEM;
343 	return 0;
344 }
345 
346 void rds_tcp_recv_exit(void)
347 {
348 	kmem_cache_destroy(rds_tcp_incoming_slab);
349 }
350