1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Manage RMBE 6 * copy new RMBE data into user space 7 * 8 * Copyright IBM Corp. 2016 9 * 10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 11 */ 12 13 #include <linux/net.h> 14 #include <linux/rcupdate.h> 15 #include <linux/sched/signal.h> 16 17 #include <net/sock.h> 18 #include <trace/events/sock.h> 19 20 #include "smc.h" 21 #include "smc_core.h" 22 #include "smc_cdc.h" 23 #include "smc_tx.h" /* smc_tx_consumer_update() */ 24 #include "smc_rx.h" 25 #include "smc_stats.h" 26 #include "smc_tracepoint.h" 27 28 /* callback implementation to wakeup consumers blocked with smc_rx_wait(). 29 * indirectly called by smc_cdc_msg_recv_action(). 30 */ 31 static void smc_rx_wake_up(struct sock *sk) 32 { 33 struct socket_wq *wq; 34 35 trace_sk_data_ready(sk); 36 37 /* derived from sock_def_readable() */ 38 /* called already in smc_listen_work() */ 39 rcu_read_lock(); 40 wq = rcu_dereference(sk->sk_wq); 41 if (skwq_has_sleeper(wq)) 42 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 43 EPOLLRDNORM | EPOLLRDBAND); 44 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 45 if ((sk->sk_shutdown == SHUTDOWN_MASK) || 46 (sk->sk_state == SMC_CLOSED)) 47 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 48 rcu_read_unlock(); 49 } 50 51 /* Update consumer cursor 52 * @conn connection to update 53 * @cons consumer cursor 54 * @len number of Bytes consumed 55 * Returns: 56 * 1 if we should end our receive, 0 otherwise 57 */ 58 static int smc_rx_update_consumer(struct smc_sock *smc, 59 union smc_host_cursor cons, size_t len) 60 { 61 struct smc_connection *conn = &smc->conn; 62 struct sock *sk = &smc->sk; 63 bool force = false; 64 int diff, rc = 0; 65 66 smc_curs_add(conn->rmb_desc->len, &cons, len); 67 68 /* did we process urgent data? */ 69 if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) { 70 diff = smc_curs_comp(conn->rmb_desc->len, &cons, 71 &conn->urg_curs); 72 if (sock_flag(sk, SOCK_URGINLINE)) { 73 if (diff == 0) { 74 force = true; 75 rc = 1; 76 conn->urg_state = SMC_URG_READ; 77 } 78 } else { 79 if (diff == 1) { 80 /* skip urgent byte */ 81 force = true; 82 smc_curs_add(conn->rmb_desc->len, &cons, 1); 83 conn->urg_rx_skip_pend = false; 84 } else if (diff < -1) 85 /* we read past urgent byte */ 86 conn->urg_state = SMC_URG_READ; 87 } 88 } 89 90 smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn); 91 92 /* send consumer cursor update if required */ 93 /* similar to advertising new TCP rcv_wnd if required */ 94 smc_tx_consumer_update(conn, force); 95 96 return rc; 97 } 98 99 static void smc_rx_update_cons(struct smc_sock *smc, size_t len) 100 { 101 struct smc_connection *conn = &smc->conn; 102 union smc_host_cursor cons; 103 104 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); 105 smc_rx_update_consumer(smc, cons, len); 106 } 107 108 struct smc_spd_priv { 109 struct smc_sock *smc; 110 size_t len; 111 }; 112 113 static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe, 114 struct pipe_buffer *buf) 115 { 116 struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private; 117 struct smc_sock *smc = priv->smc; 118 struct smc_connection *conn; 119 struct sock *sk = &smc->sk; 120 121 if (sk->sk_state == SMC_CLOSED || 122 sk->sk_state == SMC_PEERFINCLOSEWAIT || 123 sk->sk_state == SMC_APPFINCLOSEWAIT) 124 goto out; 125 conn = &smc->conn; 126 lock_sock(sk); 127 smc_rx_update_cons(smc, priv->len); 128 release_sock(sk); 129 if (atomic_sub_and_test(priv->len, &conn->splice_pending)) 130 smc_rx_wake_up(sk); 131 out: 132 kfree(priv); 133 put_page(buf->page); 134 sock_put(sk); 135 } 136 137 static const struct pipe_buf_operations smc_pipe_ops = { 138 .release = smc_rx_pipe_buf_release, 139 .get = generic_pipe_buf_get 140 }; 141 142 static void smc_rx_spd_release(struct splice_pipe_desc *spd, 143 unsigned int i) 144 { 145 put_page(spd->pages[i]); 146 } 147 148 static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len, 149 struct smc_sock *smc) 150 { 151 struct smc_link_group *lgr = smc->conn.lgr; 152 int offset = offset_in_page(src); 153 struct partial_page *partial; 154 struct splice_pipe_desc spd; 155 struct smc_spd_priv **priv; 156 struct page **pages; 157 int bytes, nr_pages; 158 int i; 159 160 nr_pages = !lgr->is_smcd && smc->conn.rmb_desc->is_vm ? 161 PAGE_ALIGN(len + offset) / PAGE_SIZE : 1; 162 163 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); 164 if (!pages) 165 goto out; 166 partial = kcalloc(nr_pages, sizeof(*partial), GFP_KERNEL); 167 if (!partial) 168 goto out_page; 169 priv = kcalloc(nr_pages, sizeof(*priv), GFP_KERNEL); 170 if (!priv) 171 goto out_part; 172 for (i = 0; i < nr_pages; i++) { 173 priv[i] = kzalloc(sizeof(**priv), GFP_KERNEL); 174 if (!priv[i]) 175 goto out_priv; 176 } 177 178 if (lgr->is_smcd || 179 (!lgr->is_smcd && !smc->conn.rmb_desc->is_vm)) { 180 /* smcd or smcr that uses physically contiguous RMBs */ 181 priv[0]->len = len; 182 priv[0]->smc = smc; 183 partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr; 184 partial[0].len = len; 185 partial[0].private = (unsigned long)priv[0]; 186 pages[0] = smc->conn.rmb_desc->pages; 187 } else { 188 int size, left = len; 189 void *buf = src; 190 /* smcr that uses virtually contiguous RMBs*/ 191 for (i = 0; i < nr_pages; i++) { 192 size = min_t(int, PAGE_SIZE - offset, left); 193 priv[i]->len = size; 194 priv[i]->smc = smc; 195 pages[i] = vmalloc_to_page(buf); 196 partial[i].offset = offset; 197 partial[i].len = size; 198 partial[i].private = (unsigned long)priv[i]; 199 buf += size / sizeof(*buf); 200 left -= size; 201 offset = 0; 202 } 203 } 204 spd.nr_pages_max = nr_pages; 205 spd.nr_pages = nr_pages; 206 spd.pages = pages; 207 spd.partial = partial; 208 spd.ops = &smc_pipe_ops; 209 spd.spd_release = smc_rx_spd_release; 210 211 bytes = splice_to_pipe(pipe, &spd); 212 if (bytes > 0) { 213 sock_hold(&smc->sk); 214 if (!lgr->is_smcd && smc->conn.rmb_desc->is_vm) { 215 for (i = 0; i < PAGE_ALIGN(bytes + offset) / PAGE_SIZE; i++) 216 get_page(pages[i]); 217 } else { 218 get_page(smc->conn.rmb_desc->pages); 219 } 220 atomic_add(bytes, &smc->conn.splice_pending); 221 } 222 kfree(priv); 223 kfree(partial); 224 kfree(pages); 225 226 return bytes; 227 228 out_priv: 229 for (i = (i - 1); i >= 0; i--) 230 kfree(priv[i]); 231 kfree(priv); 232 out_part: 233 kfree(partial); 234 out_page: 235 kfree(pages); 236 out: 237 return -ENOMEM; 238 } 239 240 static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn) 241 { 242 return atomic_read(&conn->bytes_to_rcv) && 243 !atomic_read(&conn->splice_pending); 244 } 245 246 /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted 247 * @smc smc socket 248 * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout 249 * @fcrit add'l criterion to evaluate as function pointer 250 * Returns: 251 * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown. 252 * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted). 253 */ 254 int smc_rx_wait(struct smc_sock *smc, long *timeo, 255 int (*fcrit)(struct smc_connection *conn)) 256 { 257 DEFINE_WAIT_FUNC(wait, woken_wake_function); 258 struct smc_connection *conn = &smc->conn; 259 struct smc_cdc_conn_state_flags *cflags = 260 &conn->local_tx_ctrl.conn_state_flags; 261 struct sock *sk = &smc->sk; 262 int rc; 263 264 if (fcrit(conn)) 265 return 1; 266 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 267 add_wait_queue(sk_sleep(sk), &wait); 268 rc = sk_wait_event(sk, timeo, 269 sk->sk_err || 270 cflags->peer_conn_abort || 271 sk->sk_shutdown & RCV_SHUTDOWN || 272 conn->killed || 273 fcrit(conn), 274 &wait); 275 remove_wait_queue(sk_sleep(sk), &wait); 276 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 277 return rc; 278 } 279 280 static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len, 281 int flags) 282 { 283 struct smc_connection *conn = &smc->conn; 284 union smc_host_cursor cons; 285 struct sock *sk = &smc->sk; 286 int rc = 0; 287 288 if (sock_flag(sk, SOCK_URGINLINE) || 289 !(conn->urg_state == SMC_URG_VALID) || 290 conn->urg_state == SMC_URG_READ) 291 return -EINVAL; 292 293 SMC_STAT_INC(smc, urg_data_cnt); 294 if (conn->urg_state == SMC_URG_VALID) { 295 if (!(flags & MSG_PEEK)) 296 smc->conn.urg_state = SMC_URG_READ; 297 msg->msg_flags |= MSG_OOB; 298 if (len > 0) { 299 if (!(flags & MSG_TRUNC)) 300 rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1); 301 len = 1; 302 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); 303 if (smc_curs_diff(conn->rmb_desc->len, &cons, 304 &conn->urg_curs) > 1) 305 conn->urg_rx_skip_pend = true; 306 /* Urgent Byte was already accounted for, but trigger 307 * skipping the urgent byte in non-inline case 308 */ 309 if (!(flags & MSG_PEEK)) 310 smc_rx_update_consumer(smc, cons, 0); 311 } else { 312 msg->msg_flags |= MSG_TRUNC; 313 } 314 315 return rc ? -EFAULT : len; 316 } 317 318 if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN) 319 return 0; 320 321 return -EAGAIN; 322 } 323 324 static bool smc_rx_recvmsg_data_available(struct smc_sock *smc) 325 { 326 struct smc_connection *conn = &smc->conn; 327 328 if (smc_rx_data_available(conn)) 329 return true; 330 else if (conn->urg_state == SMC_URG_VALID) 331 /* we received a single urgent Byte - skip */ 332 smc_rx_update_cons(smc, 0); 333 return false; 334 } 335 336 /* smc_rx_recvmsg - receive data from RMBE 337 * @msg: copy data to receive buffer 338 * @pipe: copy data to pipe if set - indicates splice() call 339 * 340 * rcvbuf consumer: main API called by socket layer. 341 * Called under sk lock. 342 */ 343 int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, 344 struct pipe_inode_info *pipe, size_t len, int flags) 345 { 346 size_t copylen, read_done = 0, read_remaining = len; 347 size_t chunk_len, chunk_off, chunk_len_sum; 348 struct smc_connection *conn = &smc->conn; 349 int (*func)(struct smc_connection *conn); 350 union smc_host_cursor cons; 351 int readable, chunk; 352 char *rcvbuf_base; 353 struct sock *sk; 354 int splbytes; 355 long timeo; 356 int target; /* Read at least these many bytes */ 357 int rc; 358 359 if (unlikely(flags & MSG_ERRQUEUE)) 360 return -EINVAL; /* future work for sk.sk_family == AF_SMC */ 361 362 sk = &smc->sk; 363 if (sk->sk_state == SMC_LISTEN) 364 return -ENOTCONN; 365 if (flags & MSG_OOB) 366 return smc_rx_recv_urg(smc, msg, len, flags); 367 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 368 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 369 370 readable = atomic_read(&conn->bytes_to_rcv); 371 if (readable >= conn->rmb_desc->len) 372 SMC_STAT_RMB_RX_FULL(smc, !conn->lnk); 373 374 if (len < readable) 375 SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk); 376 /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */ 377 rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr; 378 379 do { /* while (read_remaining) */ 380 if (read_done >= target || (pipe && read_done)) 381 break; 382 383 if (conn->killed) 384 break; 385 386 if (smc_rx_recvmsg_data_available(smc)) 387 goto copy; 388 389 if (sk->sk_shutdown & RCV_SHUTDOWN) { 390 /* smc_cdc_msg_recv_action() could have run after 391 * above smc_rx_recvmsg_data_available() 392 */ 393 if (smc_rx_recvmsg_data_available(smc)) 394 goto copy; 395 break; 396 } 397 398 if (read_done) { 399 if (sk->sk_err || 400 sk->sk_state == SMC_CLOSED || 401 !timeo || 402 signal_pending(current)) 403 break; 404 } else { 405 if (sk->sk_err) { 406 read_done = sock_error(sk); 407 break; 408 } 409 if (sk->sk_state == SMC_CLOSED) { 410 if (!sock_flag(sk, SOCK_DONE)) { 411 /* This occurs when user tries to read 412 * from never connected socket. 413 */ 414 read_done = -ENOTCONN; 415 break; 416 } 417 break; 418 } 419 if (!timeo) 420 return -EAGAIN; 421 if (signal_pending(current)) { 422 read_done = sock_intr_errno(timeo); 423 break; 424 } 425 } 426 427 if (!smc_rx_data_available(conn)) { 428 smc_rx_wait(smc, &timeo, smc_rx_data_available); 429 continue; 430 } 431 432 copy: 433 /* initialize variables for 1st iteration of subsequent loop */ 434 /* could be just 1 byte, even after waiting on data above */ 435 readable = atomic_read(&conn->bytes_to_rcv); 436 splbytes = atomic_read(&conn->splice_pending); 437 if (!readable || (msg && splbytes)) { 438 if (splbytes) 439 func = smc_rx_data_available_and_no_splice_pend; 440 else 441 func = smc_rx_data_available; 442 smc_rx_wait(smc, &timeo, func); 443 continue; 444 } 445 446 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); 447 /* subsequent splice() calls pick up where previous left */ 448 if (splbytes) 449 smc_curs_add(conn->rmb_desc->len, &cons, splbytes); 450 if (conn->urg_state == SMC_URG_VALID && 451 sock_flag(&smc->sk, SOCK_URGINLINE) && 452 readable > 1) 453 readable--; /* always stop at urgent Byte */ 454 /* not more than what user space asked for */ 455 copylen = min_t(size_t, read_remaining, readable); 456 /* determine chunks where to read from rcvbuf */ 457 /* either unwrapped case, or 1st chunk of wrapped case */ 458 chunk_len = min_t(size_t, copylen, conn->rmb_desc->len - 459 cons.count); 460 chunk_len_sum = chunk_len; 461 chunk_off = cons.count; 462 smc_rmb_sync_sg_for_cpu(conn); 463 for (chunk = 0; chunk < 2; chunk++) { 464 if (!(flags & MSG_TRUNC)) { 465 if (msg) { 466 rc = memcpy_to_msg(msg, rcvbuf_base + 467 chunk_off, 468 chunk_len); 469 } else { 470 rc = smc_rx_splice(pipe, rcvbuf_base + 471 chunk_off, chunk_len, 472 smc); 473 } 474 if (rc < 0) { 475 if (!read_done) 476 read_done = -EFAULT; 477 goto out; 478 } 479 } 480 read_remaining -= chunk_len; 481 read_done += chunk_len; 482 483 if (chunk_len_sum == copylen) 484 break; /* either on 1st or 2nd iteration */ 485 /* prepare next (== 2nd) iteration */ 486 chunk_len = copylen - chunk_len; /* remainder */ 487 chunk_len_sum += chunk_len; 488 chunk_off = 0; /* modulo offset in recv ring buffer */ 489 } 490 491 /* update cursors */ 492 if (!(flags & MSG_PEEK)) { 493 /* increased in recv tasklet smc_cdc_msg_rcv() */ 494 smp_mb__before_atomic(); 495 atomic_sub(copylen, &conn->bytes_to_rcv); 496 /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */ 497 smp_mb__after_atomic(); 498 if (msg && smc_rx_update_consumer(smc, cons, copylen)) 499 goto out; 500 } 501 502 trace_smc_rx_recvmsg(smc, copylen); 503 } while (read_remaining); 504 out: 505 return read_done; 506 } 507 508 /* Initialize receive properties on connection establishment. NB: not __init! */ 509 void smc_rx_init(struct smc_sock *smc) 510 { 511 smc->sk.sk_data_ready = smc_rx_wake_up; 512 atomic_set(&smc->conn.splice_pending, 0); 513 smc->conn.urg_state = SMC_URG_READ; 514 } 515