xref: /linux/net/smc/smc_tx.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  * Manage send buffer.
6  * Producer:
7  * Copy user space data into send buffer, if send buffer space available.
8  * Consumer:
9  * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
10  *
11  * Copyright IBM Corp. 2016
12  *
13  * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
14  */
15 
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
20 
21 #include <net/sock.h>
22 
23 #include "smc.h"
24 #include "smc_wr.h"
25 #include "smc_cdc.h"
26 #include "smc_tx.h"
27 
28 #define SMC_TX_WORK_DELAY	HZ
29 
30 /***************************** sndbuf producer *******************************/
31 
32 /* callback implementation for sk.sk_write_space()
33  * to wakeup sndbuf producers that blocked with smc_tx_wait_memory().
34  * called under sk_socket lock.
35  */
36 static void smc_tx_write_space(struct sock *sk)
37 {
38 	struct socket *sock = sk->sk_socket;
39 	struct smc_sock *smc = smc_sk(sk);
40 	struct socket_wq *wq;
41 
42 	/* similar to sk_stream_write_space */
43 	if (atomic_read(&smc->conn.sndbuf_space) && sock) {
44 		clear_bit(SOCK_NOSPACE, &sock->flags);
45 		rcu_read_lock();
46 		wq = rcu_dereference(sk->sk_wq);
47 		if (skwq_has_sleeper(wq))
48 			wake_up_interruptible_poll(&wq->wait,
49 						   POLLOUT | POLLWRNORM |
50 						   POLLWRBAND);
51 		if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
52 			sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
53 		rcu_read_unlock();
54 	}
55 }
56 
57 /* Wakeup sndbuf producers that blocked with smc_tx_wait_memory().
58  * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
59  */
60 void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
61 {
62 	if (smc->sk.sk_socket &&
63 	    test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
64 		smc->sk.sk_write_space(&smc->sk);
65 }
66 
67 /* blocks sndbuf producer until at least one byte of free space available */
68 static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
69 {
70 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
71 	struct smc_connection *conn = &smc->conn;
72 	struct sock *sk = &smc->sk;
73 	bool noblock;
74 	long timeo;
75 	int rc = 0;
76 
77 	/* similar to sk_stream_wait_memory */
78 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
79 	noblock = timeo ? false : true;
80 	add_wait_queue(sk_sleep(sk), &wait);
81 	while (1) {
82 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
83 		if (sk->sk_err ||
84 		    (sk->sk_shutdown & SEND_SHUTDOWN) ||
85 		    conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
86 			rc = -EPIPE;
87 			break;
88 		}
89 		if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
90 			rc = -ECONNRESET;
91 			break;
92 		}
93 		if (!timeo) {
94 			if (noblock)
95 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
96 			rc = -EAGAIN;
97 			break;
98 		}
99 		if (signal_pending(current)) {
100 			rc = sock_intr_errno(timeo);
101 			break;
102 		}
103 		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
104 		if (atomic_read(&conn->sndbuf_space))
105 			break; /* at least 1 byte of free space available */
106 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
107 		sk->sk_write_pending++;
108 		sk_wait_event(sk, &timeo,
109 			      sk->sk_err ||
110 			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
111 			      smc_cdc_rxed_any_close_or_senddone(conn) ||
112 			      atomic_read(&conn->sndbuf_space),
113 			      &wait);
114 		sk->sk_write_pending--;
115 	}
116 	remove_wait_queue(sk_sleep(sk), &wait);
117 	return rc;
118 }
119 
120 /* sndbuf producer: main API called by socket layer.
121  * called under sock lock.
122  */
123 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
124 {
125 	size_t copylen, send_done = 0, send_remaining = len;
126 	size_t chunk_len, chunk_off, chunk_len_sum;
127 	struct smc_connection *conn = &smc->conn;
128 	union smc_host_cursor prep;
129 	struct sock *sk = &smc->sk;
130 	char *sndbuf_base;
131 	int tx_cnt_prep;
132 	int writespace;
133 	int rc, chunk;
134 
135 	/* This should be in poll */
136 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
137 
138 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
139 		rc = -EPIPE;
140 		goto out_err;
141 	}
142 
143 	while (msg_data_left(msg)) {
144 		if (sk->sk_state == SMC_INIT)
145 			return -ENOTCONN;
146 		if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
147 		    (smc->sk.sk_err == ECONNABORTED) ||
148 		    conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
149 			return -EPIPE;
150 		if (smc_cdc_rxed_any_close(conn))
151 			return send_done ?: -ECONNRESET;
152 
153 		if (!atomic_read(&conn->sndbuf_space)) {
154 			rc = smc_tx_wait_memory(smc, msg->msg_flags);
155 			if (rc) {
156 				if (send_done)
157 					return send_done;
158 				goto out_err;
159 			}
160 			continue;
161 		}
162 
163 		/* initialize variables for 1st iteration of subsequent loop */
164 		/* could be just 1 byte, even after smc_tx_wait_memory above */
165 		writespace = atomic_read(&conn->sndbuf_space);
166 		/* not more than what user space asked for */
167 		copylen = min_t(size_t, send_remaining, writespace);
168 		/* determine start of sndbuf */
169 		sndbuf_base = conn->sndbuf_desc->cpu_addr;
170 		smc_curs_write(&prep,
171 			       smc_curs_read(&conn->tx_curs_prep, conn),
172 			       conn);
173 		tx_cnt_prep = prep.count;
174 		/* determine chunks where to write into sndbuf */
175 		/* either unwrapped case, or 1st chunk of wrapped case */
176 		chunk_len = min_t(size_t,
177 				  copylen, conn->sndbuf_size - tx_cnt_prep);
178 		chunk_len_sum = chunk_len;
179 		chunk_off = tx_cnt_prep;
180 		smc_sndbuf_sync_sg_for_cpu(conn);
181 		for (chunk = 0; chunk < 2; chunk++) {
182 			rc = memcpy_from_msg(sndbuf_base + chunk_off,
183 					     msg, chunk_len);
184 			if (rc) {
185 				smc_sndbuf_sync_sg_for_device(conn);
186 				if (send_done)
187 					return send_done;
188 				goto out_err;
189 			}
190 			send_done += chunk_len;
191 			send_remaining -= chunk_len;
192 
193 			if (chunk_len_sum == copylen)
194 				break; /* either on 1st or 2nd iteration */
195 			/* prepare next (== 2nd) iteration */
196 			chunk_len = copylen - chunk_len; /* remainder */
197 			chunk_len_sum += chunk_len;
198 			chunk_off = 0; /* modulo offset in send ring buffer */
199 		}
200 		smc_sndbuf_sync_sg_for_device(conn);
201 		/* update cursors */
202 		smc_curs_add(conn->sndbuf_size, &prep, copylen);
203 		smc_curs_write(&conn->tx_curs_prep,
204 			       smc_curs_read(&prep, conn),
205 			       conn);
206 		/* increased in send tasklet smc_cdc_tx_handler() */
207 		smp_mb__before_atomic();
208 		atomic_sub(copylen, &conn->sndbuf_space);
209 		/* guarantee 0 <= sndbuf_space <= sndbuf_size */
210 		smp_mb__after_atomic();
211 		/* since we just produced more new data into sndbuf,
212 		 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
213 		 */
214 		smc_tx_sndbuf_nonempty(conn);
215 	} /* while (msg_data_left(msg)) */
216 
217 	return send_done;
218 
219 out_err:
220 	rc = sk_stream_error(sk, msg->msg_flags, rc);
221 	/* make sure we wake any epoll edge trigger waiter */
222 	if (unlikely(rc == -EAGAIN))
223 		sk->sk_write_space(sk);
224 	return rc;
225 }
226 
227 /***************************** sndbuf consumer *******************************/
228 
229 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
230 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
231 			     int num_sges, struct ib_sge sges[])
232 {
233 	struct smc_link_group *lgr = conn->lgr;
234 	struct ib_send_wr *failed_wr = NULL;
235 	struct ib_rdma_wr rdma_wr;
236 	struct smc_link *link;
237 	int rc;
238 
239 	memset(&rdma_wr, 0, sizeof(rdma_wr));
240 	link = &lgr->lnk[SMC_SINGLE_LINK];
241 	rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
242 	rdma_wr.wr.sg_list = sges;
243 	rdma_wr.wr.num_sge = num_sges;
244 	rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
245 	rdma_wr.remote_addr =
246 		lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
247 		/* RMBE within RMB */
248 		((conn->peer_conn_idx - 1) * conn->peer_rmbe_size) +
249 		/* offset within RMBE */
250 		peer_rmbe_offset;
251 	rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
252 	rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
253 	if (rc)
254 		conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
255 	return rc;
256 }
257 
258 /* sndbuf consumer */
259 static inline void smc_tx_advance_cursors(struct smc_connection *conn,
260 					  union smc_host_cursor *prod,
261 					  union smc_host_cursor *sent,
262 					  size_t len)
263 {
264 	smc_curs_add(conn->peer_rmbe_size, prod, len);
265 	/* increased in recv tasklet smc_cdc_msg_rcv() */
266 	smp_mb__before_atomic();
267 	/* data in flight reduces usable snd_wnd */
268 	atomic_sub(len, &conn->peer_rmbe_space);
269 	/* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
270 	smp_mb__after_atomic();
271 	smc_curs_add(conn->sndbuf_size, sent, len);
272 }
273 
274 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
275  * usable snd_wnd as max transmit
276  */
277 static int smc_tx_rdma_writes(struct smc_connection *conn)
278 {
279 	size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
280 	size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
281 	union smc_host_cursor sent, prep, prod, cons;
282 	struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
283 	struct smc_link_group *lgr = conn->lgr;
284 	int to_send, rmbespace;
285 	struct smc_link *link;
286 	dma_addr_t dma_addr;
287 	int num_sges;
288 	int rc;
289 
290 	/* source: sndbuf */
291 	smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
292 	smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
293 	/* cf. wmem_alloc - (snd_max - snd_una) */
294 	to_send = smc_curs_diff(conn->sndbuf_size, &sent, &prep);
295 	if (to_send <= 0)
296 		return 0;
297 
298 	/* destination: RMBE */
299 	/* cf. snd_wnd */
300 	rmbespace = atomic_read(&conn->peer_rmbe_space);
301 	if (rmbespace <= 0)
302 		return 0;
303 	smc_curs_write(&prod,
304 		       smc_curs_read(&conn->local_tx_ctrl.prod, conn),
305 		       conn);
306 	smc_curs_write(&cons,
307 		       smc_curs_read(&conn->local_rx_ctrl.cons, conn),
308 		       conn);
309 
310 	/* if usable snd_wnd closes ask peer to advertise once it opens again */
311 	conn->local_tx_ctrl.prod_flags.write_blocked = (to_send >= rmbespace);
312 	/* cf. usable snd_wnd */
313 	len = min(to_send, rmbespace);
314 
315 	/* initialize variables for first iteration of subsequent nested loop */
316 	link = &lgr->lnk[SMC_SINGLE_LINK];
317 	dst_off = prod.count;
318 	if (prod.wrap == cons.wrap) {
319 		/* the filled destination area is unwrapped,
320 		 * hence the available free destination space is wrapped
321 		 * and we need 2 destination chunks of sum len; start with 1st
322 		 * which is limited by what's available in sndbuf
323 		 */
324 		dst_len = min_t(size_t,
325 				conn->peer_rmbe_size - prod.count, len);
326 	} else {
327 		/* the filled destination area is wrapped,
328 		 * hence the available free destination space is unwrapped
329 		 * and we need a single destination chunk of entire len
330 		 */
331 		dst_len = len;
332 	}
333 	dst_len_sum = dst_len;
334 	src_off = sent.count;
335 	/* dst_len determines the maximum src_len */
336 	if (sent.count + dst_len <= conn->sndbuf_size) {
337 		/* unwrapped src case: single chunk of entire dst_len */
338 		src_len = dst_len;
339 	} else {
340 		/* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
341 		src_len = conn->sndbuf_size - sent.count;
342 	}
343 	src_len_sum = src_len;
344 	dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
345 	for (dstchunk = 0; dstchunk < 2; dstchunk++) {
346 		num_sges = 0;
347 		for (srcchunk = 0; srcchunk < 2; srcchunk++) {
348 			sges[srcchunk].addr = dma_addr + src_off;
349 			sges[srcchunk].length = src_len;
350 			sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
351 			num_sges++;
352 			src_off += src_len;
353 			if (src_off >= conn->sndbuf_size)
354 				src_off -= conn->sndbuf_size;
355 						/* modulo in send ring */
356 			if (src_len_sum == dst_len)
357 				break; /* either on 1st or 2nd iteration */
358 			/* prepare next (== 2nd) iteration */
359 			src_len = dst_len - src_len; /* remainder */
360 			src_len_sum += src_len;
361 		}
362 		rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
363 		if (rc)
364 			return rc;
365 		if (dst_len_sum == len)
366 			break; /* either on 1st or 2nd iteration */
367 		/* prepare next (== 2nd) iteration */
368 		dst_off = 0; /* modulo offset in RMBE ring buffer */
369 		dst_len = len - dst_len; /* remainder */
370 		dst_len_sum += dst_len;
371 		src_len = min_t(int,
372 				dst_len, conn->sndbuf_size - sent.count);
373 		src_len_sum = src_len;
374 	}
375 
376 	smc_tx_advance_cursors(conn, &prod, &sent, len);
377 	/* update connection's cursors with advanced local cursors */
378 	smc_curs_write(&conn->local_tx_ctrl.prod,
379 		       smc_curs_read(&prod, conn),
380 		       conn);
381 							/* dst: peer RMBE */
382 	smc_curs_write(&conn->tx_curs_sent,
383 		       smc_curs_read(&sent, conn),
384 		       conn);
385 							/* src: local sndbuf */
386 
387 	return 0;
388 }
389 
390 /* Wakeup sndbuf consumers from any context (IRQ or process)
391  * since there is more data to transmit; usable snd_wnd as max transmit
392  */
393 int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
394 {
395 	struct smc_cdc_tx_pend *pend;
396 	struct smc_wr_buf *wr_buf;
397 	int rc;
398 
399 	spin_lock_bh(&conn->send_lock);
400 	rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
401 	if (rc < 0) {
402 		if (rc == -EBUSY) {
403 			struct smc_sock *smc =
404 				container_of(conn, struct smc_sock, conn);
405 
406 			if (smc->sk.sk_err == ECONNABORTED) {
407 				rc = sock_error(&smc->sk);
408 				goto out_unlock;
409 			}
410 			rc = 0;
411 			schedule_delayed_work(&conn->tx_work,
412 					      SMC_TX_WORK_DELAY);
413 		}
414 		goto out_unlock;
415 	}
416 
417 	rc = smc_tx_rdma_writes(conn);
418 	if (rc) {
419 		smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
420 				   (struct smc_wr_tx_pend_priv *)pend);
421 		goto out_unlock;
422 	}
423 
424 	rc = smc_cdc_msg_send(conn, wr_buf, pend);
425 
426 out_unlock:
427 	spin_unlock_bh(&conn->send_lock);
428 	return rc;
429 }
430 
431 /* Wakeup sndbuf consumers from process context
432  * since there is more data to transmit
433  */
434 static void smc_tx_work(struct work_struct *work)
435 {
436 	struct smc_connection *conn = container_of(to_delayed_work(work),
437 						   struct smc_connection,
438 						   tx_work);
439 	struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
440 	int rc;
441 
442 	lock_sock(&smc->sk);
443 	rc = smc_tx_sndbuf_nonempty(conn);
444 	if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
445 	    !atomic_read(&conn->bytes_to_rcv))
446 		conn->local_rx_ctrl.prod_flags.write_blocked = 0;
447 	release_sock(&smc->sk);
448 }
449 
450 void smc_tx_consumer_update(struct smc_connection *conn)
451 {
452 	union smc_host_cursor cfed, cons;
453 	struct smc_cdc_tx_pend *pend;
454 	struct smc_wr_buf *wr_buf;
455 	int to_confirm, rc;
456 
457 	smc_curs_write(&cons,
458 		       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
459 		       conn);
460 	smc_curs_write(&cfed,
461 		       smc_curs_read(&conn->rx_curs_confirmed, conn),
462 		       conn);
463 	to_confirm = smc_curs_diff(conn->rmbe_size, &cfed, &cons);
464 
465 	if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
466 	    ((to_confirm > conn->rmbe_update_limit) &&
467 	     ((to_confirm > (conn->rmbe_size / 2)) ||
468 	      conn->local_rx_ctrl.prod_flags.write_blocked))) {
469 		rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
470 		if (!rc)
471 			rc = smc_cdc_msg_send(conn, wr_buf, pend);
472 		if (rc < 0) {
473 			schedule_delayed_work(&conn->tx_work,
474 					      SMC_TX_WORK_DELAY);
475 			return;
476 		}
477 		smc_curs_write(&conn->rx_curs_confirmed,
478 			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
479 			       conn);
480 		conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
481 	}
482 	if (conn->local_rx_ctrl.prod_flags.write_blocked &&
483 	    !atomic_read(&conn->bytes_to_rcv))
484 		conn->local_rx_ctrl.prod_flags.write_blocked = 0;
485 }
486 
487 /***************************** send initialize *******************************/
488 
489 /* Initialize send properties on connection establishment. NB: not __init! */
490 void smc_tx_init(struct smc_sock *smc)
491 {
492 	smc->sk.sk_write_space = smc_tx_write_space;
493 	INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
494 	spin_lock_init(&smc->conn.send_lock);
495 }
496