1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/net/sunrpc/xprtsock.c 4 * 5 * Client-side transport implementation for sockets. 6 * 7 * TCP callback races fixes (C) 1998 Red Hat 8 * TCP send fixes (C) 1998 Red Hat 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 * 12 * Rewrite of larges part of the code in order to stabilize TCP stuff. 13 * Fix behaviour when socket buffer is full. 14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 15 * 16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 17 * 18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 19 * <gilles.quillard@bull.net> 20 */ 21 22 #include <linux/types.h> 23 #include <linux/string.h> 24 #include <linux/slab.h> 25 #include <linux/module.h> 26 #include <linux/capability.h> 27 #include <linux/pagemap.h> 28 #include <linux/errno.h> 29 #include <linux/socket.h> 30 #include <linux/in.h> 31 #include <linux/net.h> 32 #include <linux/mm.h> 33 #include <linux/un.h> 34 #include <linux/udp.h> 35 #include <linux/tcp.h> 36 #include <linux/sunrpc/clnt.h> 37 #include <linux/sunrpc/addr.h> 38 #include <linux/sunrpc/sched.h> 39 #include <linux/sunrpc/svcsock.h> 40 #include <linux/sunrpc/xprtsock.h> 41 #include <linux/file.h> 42 #ifdef CONFIG_SUNRPC_BACKCHANNEL 43 #include <linux/sunrpc/bc_xprt.h> 44 #endif 45 46 #include <net/sock.h> 47 #include <net/checksum.h> 48 #include <net/udp.h> 49 #include <net/tcp.h> 50 #include <net/tls.h> 51 #include <net/tls_prot.h> 52 #include <net/handshake.h> 53 54 #include <linux/bvec.h> 55 #include <linux/highmem.h> 56 #include <linux/uio.h> 57 #include <linux/sched/mm.h> 58 59 #include <trace/events/sock.h> 60 #include <trace/events/sunrpc.h> 61 62 #include "socklib.h" 63 #include "sunrpc.h" 64 65 static void xs_close(struct rpc_xprt *xprt); 66 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock); 67 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, 68 struct socket *sock); 69 70 /* 71 * xprtsock tunables 72 */ 73 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 74 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 75 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 76 77 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 78 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 79 80 #define XS_TCP_LINGER_TO (15U * HZ) 81 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 82 83 /* 84 * We can register our own files under /proc/sys/sunrpc by 85 * calling register_sysctl() again. The files in that 86 * directory become the union of all files registered there. 87 * 88 * We simply need to make sure that we don't collide with 89 * someone else's file names! 90 */ 91 92 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 93 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 94 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; 95 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 96 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 97 98 static struct ctl_table_header *sunrpc_table_header; 99 100 static struct xprt_class xs_local_transport; 101 static struct xprt_class xs_udp_transport; 102 static struct xprt_class xs_tcp_transport; 103 static struct xprt_class xs_tcp_tls_transport; 104 static struct xprt_class xs_bc_tcp_transport; 105 106 /* 107 * FIXME: changing the UDP slot table size should also resize the UDP 108 * socket buffers for existing UDP transports 109 */ 110 static struct ctl_table xs_tunables_table[] = { 111 { 112 .procname = "udp_slot_table_entries", 113 .data = &xprt_udp_slot_table_entries, 114 .maxlen = sizeof(unsigned int), 115 .mode = 0644, 116 .proc_handler = proc_dointvec_minmax, 117 .extra1 = &min_slot_table_size, 118 .extra2 = &max_slot_table_size 119 }, 120 { 121 .procname = "tcp_slot_table_entries", 122 .data = &xprt_tcp_slot_table_entries, 123 .maxlen = sizeof(unsigned int), 124 .mode = 0644, 125 .proc_handler = proc_dointvec_minmax, 126 .extra1 = &min_slot_table_size, 127 .extra2 = &max_slot_table_size 128 }, 129 { 130 .procname = "tcp_max_slot_table_entries", 131 .data = &xprt_max_tcp_slot_table_entries, 132 .maxlen = sizeof(unsigned int), 133 .mode = 0644, 134 .proc_handler = proc_dointvec_minmax, 135 .extra1 = &min_slot_table_size, 136 .extra2 = &max_tcp_slot_table_limit 137 }, 138 { 139 .procname = "min_resvport", 140 .data = &xprt_min_resvport, 141 .maxlen = sizeof(unsigned int), 142 .mode = 0644, 143 .proc_handler = proc_dointvec_minmax, 144 .extra1 = &xprt_min_resvport_limit, 145 .extra2 = &xprt_max_resvport_limit 146 }, 147 { 148 .procname = "max_resvport", 149 .data = &xprt_max_resvport, 150 .maxlen = sizeof(unsigned int), 151 .mode = 0644, 152 .proc_handler = proc_dointvec_minmax, 153 .extra1 = &xprt_min_resvport_limit, 154 .extra2 = &xprt_max_resvport_limit 155 }, 156 { 157 .procname = "tcp_fin_timeout", 158 .data = &xs_tcp_fin_timeout, 159 .maxlen = sizeof(xs_tcp_fin_timeout), 160 .mode = 0644, 161 .proc_handler = proc_dointvec_jiffies, 162 }, 163 { }, 164 }; 165 166 /* 167 * Wait duration for a reply from the RPC portmapper. 168 */ 169 #define XS_BIND_TO (60U * HZ) 170 171 /* 172 * Delay if a UDP socket connect error occurs. This is most likely some 173 * kind of resource problem on the local host. 174 */ 175 #define XS_UDP_REEST_TO (2U * HZ) 176 177 /* 178 * The reestablish timeout allows clients to delay for a bit before attempting 179 * to reconnect to a server that just dropped our connection. 180 * 181 * We implement an exponential backoff when trying to reestablish a TCP 182 * transport connection with the server. Some servers like to drop a TCP 183 * connection when they are overworked, so we start with a short timeout and 184 * increase over time if the server is down or not responding. 185 */ 186 #define XS_TCP_INIT_REEST_TO (3U * HZ) 187 188 /* 189 * TCP idle timeout; client drops the transport socket if it is idle 190 * for this long. Note that we also timeout UDP sockets to prevent 191 * holding port numbers when there is no RPC traffic. 192 */ 193 #define XS_IDLE_DISC_TO (5U * 60 * HZ) 194 195 /* 196 * TLS handshake timeout. 197 */ 198 #define XS_TLS_HANDSHAKE_TO (10U * HZ) 199 200 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 201 # undef RPC_DEBUG_DATA 202 # define RPCDBG_FACILITY RPCDBG_TRANS 203 #endif 204 205 #ifdef RPC_DEBUG_DATA 206 static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 207 { 208 u8 *buf = (u8 *) packet; 209 int j; 210 211 dprintk("RPC: %s\n", msg); 212 for (j = 0; j < count && j < 128; j += 4) { 213 if (!(j & 31)) { 214 if (j) 215 dprintk("\n"); 216 dprintk("0x%04x ", j); 217 } 218 dprintk("%02x%02x%02x%02x ", 219 buf[j], buf[j+1], buf[j+2], buf[j+3]); 220 } 221 dprintk("\n"); 222 } 223 #else 224 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 225 { 226 /* NOP */ 227 } 228 #endif 229 230 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 231 { 232 return (struct rpc_xprt *) sk->sk_user_data; 233 } 234 235 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 236 { 237 return (struct sockaddr *) &xprt->addr; 238 } 239 240 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt) 241 { 242 return (struct sockaddr_un *) &xprt->addr; 243 } 244 245 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 246 { 247 return (struct sockaddr_in *) &xprt->addr; 248 } 249 250 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 251 { 252 return (struct sockaddr_in6 *) &xprt->addr; 253 } 254 255 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 256 { 257 struct sockaddr *sap = xs_addr(xprt); 258 struct sockaddr_in6 *sin6; 259 struct sockaddr_in *sin; 260 struct sockaddr_un *sun; 261 char buf[128]; 262 263 switch (sap->sa_family) { 264 case AF_LOCAL: 265 sun = xs_addr_un(xprt); 266 if (sun->sun_path[0]) { 267 strscpy(buf, sun->sun_path, sizeof(buf)); 268 } else { 269 buf[0] = '@'; 270 strscpy(buf+1, sun->sun_path+1, sizeof(buf)-1); 271 } 272 xprt->address_strings[RPC_DISPLAY_ADDR] = 273 kstrdup(buf, GFP_KERNEL); 274 break; 275 case AF_INET: 276 (void)rpc_ntop(sap, buf, sizeof(buf)); 277 xprt->address_strings[RPC_DISPLAY_ADDR] = 278 kstrdup(buf, GFP_KERNEL); 279 sin = xs_addr_in(xprt); 280 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 281 break; 282 case AF_INET6: 283 (void)rpc_ntop(sap, buf, sizeof(buf)); 284 xprt->address_strings[RPC_DISPLAY_ADDR] = 285 kstrdup(buf, GFP_KERNEL); 286 sin6 = xs_addr_in6(xprt); 287 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 288 break; 289 default: 290 BUG(); 291 } 292 293 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 294 } 295 296 static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 297 { 298 struct sockaddr *sap = xs_addr(xprt); 299 char buf[128]; 300 301 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 302 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 303 304 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 305 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 306 } 307 308 static void xs_format_peer_addresses(struct rpc_xprt *xprt, 309 const char *protocol, 310 const char *netid) 311 { 312 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 313 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 314 xs_format_common_peer_addresses(xprt); 315 xs_format_common_peer_ports(xprt); 316 } 317 318 static void xs_update_peer_port(struct rpc_xprt *xprt) 319 { 320 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 321 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 322 323 xs_format_common_peer_ports(xprt); 324 } 325 326 static void xs_free_peer_addresses(struct rpc_xprt *xprt) 327 { 328 unsigned int i; 329 330 for (i = 0; i < RPC_DISPLAY_MAX; i++) 331 switch (i) { 332 case RPC_DISPLAY_PROTO: 333 case RPC_DISPLAY_NETID: 334 continue; 335 default: 336 kfree(xprt->address_strings[i]); 337 } 338 } 339 340 static size_t 341 xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp) 342 { 343 size_t i,n; 344 345 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES)) 346 return want; 347 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; 348 for (i = 0; i < n; i++) { 349 if (buf->pages[i]) 350 continue; 351 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); 352 if (!buf->pages[i]) { 353 i *= PAGE_SIZE; 354 return i > buf->page_base ? i - buf->page_base : 0; 355 } 356 } 357 return want; 358 } 359 360 static int 361 xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg, 362 struct cmsghdr *cmsg, int ret) 363 { 364 if (cmsg->cmsg_level == SOL_TLS && 365 cmsg->cmsg_type == TLS_GET_RECORD_TYPE) { 366 u8 content_type = *((u8 *)CMSG_DATA(cmsg)); 367 368 switch (content_type) { 369 case TLS_RECORD_TYPE_DATA: 370 /* TLS sets EOR at the end of each application data 371 * record, even though there might be more frames 372 * waiting to be decrypted. 373 */ 374 msg->msg_flags &= ~MSG_EOR; 375 break; 376 case TLS_RECORD_TYPE_ALERT: 377 ret = -ENOTCONN; 378 break; 379 default: 380 ret = -EAGAIN; 381 } 382 } 383 return ret; 384 } 385 386 static int 387 xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags) 388 { 389 union { 390 struct cmsghdr cmsg; 391 u8 buf[CMSG_SPACE(sizeof(u8))]; 392 } u; 393 int ret; 394 395 msg->msg_control = &u; 396 msg->msg_controllen = sizeof(u); 397 ret = sock_recvmsg(sock, msg, flags); 398 if (msg->msg_controllen != sizeof(u)) 399 ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret); 400 return ret; 401 } 402 403 static ssize_t 404 xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek) 405 { 406 ssize_t ret; 407 if (seek != 0) 408 iov_iter_advance(&msg->msg_iter, seek); 409 ret = xs_sock_recv_cmsg(sock, msg, flags); 410 return ret > 0 ? ret + seek : ret; 411 } 412 413 static ssize_t 414 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags, 415 struct kvec *kvec, size_t count, size_t seek) 416 { 417 iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count); 418 return xs_sock_recvmsg(sock, msg, flags, seek); 419 } 420 421 static ssize_t 422 xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags, 423 struct bio_vec *bvec, unsigned long nr, size_t count, 424 size_t seek) 425 { 426 iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count); 427 return xs_sock_recvmsg(sock, msg, flags, seek); 428 } 429 430 static ssize_t 431 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, 432 size_t count) 433 { 434 iov_iter_discard(&msg->msg_iter, ITER_DEST, count); 435 return xs_sock_recv_cmsg(sock, msg, flags); 436 } 437 438 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 439 static void 440 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 441 { 442 struct bvec_iter bi = { 443 .bi_size = count, 444 }; 445 struct bio_vec bv; 446 447 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); 448 for_each_bvec(bv, bvec, bi, bi) 449 flush_dcache_page(bv.bv_page); 450 } 451 #else 452 static inline void 453 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) 454 { 455 } 456 #endif 457 458 static ssize_t 459 xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, 460 struct xdr_buf *buf, size_t count, size_t seek, size_t *read) 461 { 462 size_t want, seek_init = seek, offset = 0; 463 ssize_t ret; 464 465 want = min_t(size_t, count, buf->head[0].iov_len); 466 if (seek < want) { 467 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); 468 if (ret <= 0) 469 goto sock_err; 470 offset += ret; 471 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 472 goto out; 473 if (ret != want) 474 goto out; 475 seek = 0; 476 } else { 477 seek -= want; 478 offset += want; 479 } 480 481 want = xs_alloc_sparse_pages( 482 buf, min_t(size_t, count - offset, buf->page_len), 483 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 484 if (seek < want) { 485 ret = xs_read_bvec(sock, msg, flags, buf->bvec, 486 xdr_buf_pagecount(buf), 487 want + buf->page_base, 488 seek + buf->page_base); 489 if (ret <= 0) 490 goto sock_err; 491 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); 492 ret -= buf->page_base; 493 offset += ret; 494 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 495 goto out; 496 if (ret != want) 497 goto out; 498 seek = 0; 499 } else { 500 seek -= want; 501 offset += want; 502 } 503 504 want = min_t(size_t, count - offset, buf->tail[0].iov_len); 505 if (seek < want) { 506 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); 507 if (ret <= 0) 508 goto sock_err; 509 offset += ret; 510 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 511 goto out; 512 if (ret != want) 513 goto out; 514 } else if (offset < seek_init) 515 offset = seek_init; 516 ret = -EMSGSIZE; 517 out: 518 *read = offset - seek_init; 519 return ret; 520 sock_err: 521 offset += seek; 522 goto out; 523 } 524 525 static void 526 xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf) 527 { 528 if (!transport->recv.copied) { 529 if (buf->head[0].iov_len >= transport->recv.offset) 530 memcpy(buf->head[0].iov_base, 531 &transport->recv.xid, 532 transport->recv.offset); 533 transport->recv.copied = transport->recv.offset; 534 } 535 } 536 537 static bool 538 xs_read_stream_request_done(struct sock_xprt *transport) 539 { 540 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT); 541 } 542 543 static void 544 xs_read_stream_check_eor(struct sock_xprt *transport, 545 struct msghdr *msg) 546 { 547 if (xs_read_stream_request_done(transport)) 548 msg->msg_flags |= MSG_EOR; 549 } 550 551 static ssize_t 552 xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg, 553 int flags, struct rpc_rqst *req) 554 { 555 struct xdr_buf *buf = &req->rq_private_buf; 556 size_t want, read; 557 ssize_t ret; 558 559 xs_read_header(transport, buf); 560 561 want = transport->recv.len - transport->recv.offset; 562 if (want != 0) { 563 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf, 564 transport->recv.copied + want, 565 transport->recv.copied, 566 &read); 567 transport->recv.offset += read; 568 transport->recv.copied += read; 569 } 570 571 if (transport->recv.offset == transport->recv.len) 572 xs_read_stream_check_eor(transport, msg); 573 574 if (want == 0) 575 return 0; 576 577 switch (ret) { 578 default: 579 break; 580 case -EFAULT: 581 case -EMSGSIZE: 582 msg->msg_flags |= MSG_TRUNC; 583 return read; 584 case 0: 585 return -ESHUTDOWN; 586 } 587 return ret < 0 ? ret : read; 588 } 589 590 static size_t 591 xs_read_stream_headersize(bool isfrag) 592 { 593 if (isfrag) 594 return sizeof(__be32); 595 return 3 * sizeof(__be32); 596 } 597 598 static ssize_t 599 xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg, 600 int flags, size_t want, size_t seek) 601 { 602 struct kvec kvec = { 603 .iov_base = &transport->recv.fraghdr, 604 .iov_len = want, 605 }; 606 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek); 607 } 608 609 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 610 static ssize_t 611 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) 612 { 613 struct rpc_xprt *xprt = &transport->xprt; 614 struct rpc_rqst *req; 615 ssize_t ret; 616 617 /* Is this transport associated with the backchannel? */ 618 if (!xprt->bc_serv) 619 return -ESHUTDOWN; 620 621 /* Look up and lock the request corresponding to the given XID */ 622 req = xprt_lookup_bc_request(xprt, transport->recv.xid); 623 if (!req) { 624 printk(KERN_WARNING "Callback slot table overflowed\n"); 625 return -ESHUTDOWN; 626 } 627 if (transport->recv.copied && !req->rq_private_buf.len) 628 return -ESHUTDOWN; 629 630 ret = xs_read_stream_request(transport, msg, flags, req); 631 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 632 xprt_complete_bc_request(req, transport->recv.copied); 633 else 634 req->rq_private_buf.len = transport->recv.copied; 635 636 return ret; 637 } 638 #else /* CONFIG_SUNRPC_BACKCHANNEL */ 639 static ssize_t 640 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags) 641 { 642 return -ESHUTDOWN; 643 } 644 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 645 646 static ssize_t 647 xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags) 648 { 649 struct rpc_xprt *xprt = &transport->xprt; 650 struct rpc_rqst *req; 651 ssize_t ret = 0; 652 653 /* Look up and lock the request corresponding to the given XID */ 654 spin_lock(&xprt->queue_lock); 655 req = xprt_lookup_rqst(xprt, transport->recv.xid); 656 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) { 657 msg->msg_flags |= MSG_TRUNC; 658 goto out; 659 } 660 xprt_pin_rqst(req); 661 spin_unlock(&xprt->queue_lock); 662 663 ret = xs_read_stream_request(transport, msg, flags, req); 664 665 spin_lock(&xprt->queue_lock); 666 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 667 xprt_complete_rqst(req->rq_task, transport->recv.copied); 668 else 669 req->rq_private_buf.len = transport->recv.copied; 670 xprt_unpin_rqst(req); 671 out: 672 spin_unlock(&xprt->queue_lock); 673 return ret; 674 } 675 676 static ssize_t 677 xs_read_stream(struct sock_xprt *transport, int flags) 678 { 679 struct msghdr msg = { 0 }; 680 size_t want, read = 0; 681 ssize_t ret = 0; 682 683 if (transport->recv.len == 0) { 684 want = xs_read_stream_headersize(transport->recv.copied != 0); 685 ret = xs_read_stream_header(transport, &msg, flags, want, 686 transport->recv.offset); 687 if (ret <= 0) 688 goto out_err; 689 transport->recv.offset = ret; 690 if (transport->recv.offset != want) 691 return transport->recv.offset; 692 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) & 693 RPC_FRAGMENT_SIZE_MASK; 694 transport->recv.offset -= sizeof(transport->recv.fraghdr); 695 read = ret; 696 } 697 698 switch (be32_to_cpu(transport->recv.calldir)) { 699 default: 700 msg.msg_flags |= MSG_TRUNC; 701 break; 702 case RPC_CALL: 703 ret = xs_read_stream_call(transport, &msg, flags); 704 break; 705 case RPC_REPLY: 706 ret = xs_read_stream_reply(transport, &msg, flags); 707 } 708 if (msg.msg_flags & MSG_TRUNC) { 709 transport->recv.calldir = cpu_to_be32(-1); 710 transport->recv.copied = -1; 711 } 712 if (ret < 0) 713 goto out_err; 714 read += ret; 715 if (transport->recv.offset < transport->recv.len) { 716 if (!(msg.msg_flags & MSG_TRUNC)) 717 return read; 718 msg.msg_flags = 0; 719 ret = xs_read_discard(transport->sock, &msg, flags, 720 transport->recv.len - transport->recv.offset); 721 if (ret <= 0) 722 goto out_err; 723 transport->recv.offset += ret; 724 read += ret; 725 if (transport->recv.offset != transport->recv.len) 726 return read; 727 } 728 if (xs_read_stream_request_done(transport)) { 729 trace_xs_stream_read_request(transport); 730 transport->recv.copied = 0; 731 } 732 transport->recv.offset = 0; 733 transport->recv.len = 0; 734 return read; 735 out_err: 736 return ret != 0 ? ret : -ESHUTDOWN; 737 } 738 739 static __poll_t xs_poll_socket(struct sock_xprt *transport) 740 { 741 return transport->sock->ops->poll(transport->file, transport->sock, 742 NULL); 743 } 744 745 static bool xs_poll_socket_readable(struct sock_xprt *transport) 746 { 747 __poll_t events = xs_poll_socket(transport); 748 749 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP); 750 } 751 752 static void xs_poll_check_readable(struct sock_xprt *transport) 753 { 754 755 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); 756 if (test_bit(XPRT_SOCK_IGNORE_RECV, &transport->sock_state)) 757 return; 758 if (!xs_poll_socket_readable(transport)) 759 return; 760 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 761 queue_work(xprtiod_workqueue, &transport->recv_worker); 762 } 763 764 static void xs_stream_data_receive(struct sock_xprt *transport) 765 { 766 size_t read = 0; 767 ssize_t ret = 0; 768 769 mutex_lock(&transport->recv_mutex); 770 if (transport->sock == NULL) 771 goto out; 772 for (;;) { 773 ret = xs_read_stream(transport, MSG_DONTWAIT); 774 if (ret < 0) 775 break; 776 read += ret; 777 cond_resched(); 778 } 779 if (ret == -ESHUTDOWN) 780 kernel_sock_shutdown(transport->sock, SHUT_RDWR); 781 else 782 xs_poll_check_readable(transport); 783 out: 784 mutex_unlock(&transport->recv_mutex); 785 trace_xs_stream_read_data(&transport->xprt, ret, read); 786 } 787 788 static void xs_stream_data_receive_workfn(struct work_struct *work) 789 { 790 struct sock_xprt *transport = 791 container_of(work, struct sock_xprt, recv_worker); 792 unsigned int pflags = memalloc_nofs_save(); 793 794 xs_stream_data_receive(transport); 795 memalloc_nofs_restore(pflags); 796 } 797 798 static void 799 xs_stream_reset_connect(struct sock_xprt *transport) 800 { 801 transport->recv.offset = 0; 802 transport->recv.len = 0; 803 transport->recv.copied = 0; 804 transport->xmit.offset = 0; 805 } 806 807 static void 808 xs_stream_start_connect(struct sock_xprt *transport) 809 { 810 transport->xprt.stat.connect_count++; 811 transport->xprt.stat.connect_start = jiffies; 812 } 813 814 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 815 816 /** 817 * xs_nospace - handle transmit was incomplete 818 * @req: pointer to RPC request 819 * @transport: pointer to struct sock_xprt 820 * 821 */ 822 static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) 823 { 824 struct rpc_xprt *xprt = &transport->xprt; 825 struct sock *sk = transport->inet; 826 int ret = -EAGAIN; 827 828 trace_rpc_socket_nospace(req, transport); 829 830 /* Protect against races with write_space */ 831 spin_lock(&xprt->transport_lock); 832 833 /* Don't race with disconnect */ 834 if (xprt_connected(xprt)) { 835 /* wait for more buffer space */ 836 set_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); 837 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 838 sk->sk_write_pending++; 839 xprt_wait_for_buffer_space(xprt); 840 } else 841 ret = -ENOTCONN; 842 843 spin_unlock(&xprt->transport_lock); 844 return ret; 845 } 846 847 static int xs_sock_nospace(struct rpc_rqst *req) 848 { 849 struct sock_xprt *transport = 850 container_of(req->rq_xprt, struct sock_xprt, xprt); 851 struct sock *sk = transport->inet; 852 int ret = -EAGAIN; 853 854 lock_sock(sk); 855 if (!sock_writeable(sk)) 856 ret = xs_nospace(req, transport); 857 release_sock(sk); 858 return ret; 859 } 860 861 static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) 862 { 863 struct sock_xprt *transport = 864 container_of(req->rq_xprt, struct sock_xprt, xprt); 865 struct sock *sk = transport->inet; 866 int ret = -EAGAIN; 867 868 if (vm_wait) 869 return -ENOBUFS; 870 lock_sock(sk); 871 if (!sk_stream_memory_free(sk)) 872 ret = xs_nospace(req, transport); 873 release_sock(sk); 874 return ret; 875 } 876 877 static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf) 878 { 879 return xdr_alloc_bvec(buf, rpc_task_gfp_mask()); 880 } 881 882 /* 883 * Determine if the previous message in the stream was aborted before it 884 * could complete transmission. 885 */ 886 static bool 887 xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req) 888 { 889 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0; 890 } 891 892 /* 893 * Return the stream record marker field for a record of length < 2^31-1 894 */ 895 static rpc_fraghdr 896 xs_stream_record_marker(struct xdr_buf *xdr) 897 { 898 if (!xdr->len) 899 return 0; 900 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len); 901 } 902 903 /** 904 * xs_local_send_request - write an RPC request to an AF_LOCAL socket 905 * @req: pointer to RPC request 906 * 907 * Return values: 908 * 0: The request has been sent 909 * EAGAIN: The socket was blocked, please call again later to 910 * complete the request 911 * ENOTCONN: Caller needs to invoke connect logic then call again 912 * other: Some other error occurred, the request was not sent 913 */ 914 static int xs_local_send_request(struct rpc_rqst *req) 915 { 916 struct rpc_xprt *xprt = req->rq_xprt; 917 struct sock_xprt *transport = 918 container_of(xprt, struct sock_xprt, xprt); 919 struct xdr_buf *xdr = &req->rq_snd_buf; 920 rpc_fraghdr rm = xs_stream_record_marker(xdr); 921 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; 922 struct msghdr msg = { 923 .msg_flags = XS_SENDMSG_FLAGS, 924 }; 925 bool vm_wait; 926 unsigned int sent; 927 int status; 928 929 /* Close the stream if the previous transmission was incomplete */ 930 if (xs_send_request_was_aborted(transport, req)) { 931 xprt_force_disconnect(xprt); 932 return -ENOTCONN; 933 } 934 935 xs_pktdump("packet data:", 936 req->rq_svec->iov_base, req->rq_svec->iov_len); 937 938 vm_wait = sk_stream_is_writeable(transport->inet) ? true : false; 939 940 req->rq_xtime = ktime_get(); 941 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 942 transport->xmit.offset, rm, &sent); 943 dprintk("RPC: %s(%u) = %d\n", 944 __func__, xdr->len - transport->xmit.offset, status); 945 946 if (likely(sent > 0) || status == 0) { 947 transport->xmit.offset += sent; 948 req->rq_bytes_sent = transport->xmit.offset; 949 if (likely(req->rq_bytes_sent >= msglen)) { 950 req->rq_xmit_bytes_sent += transport->xmit.offset; 951 transport->xmit.offset = 0; 952 return 0; 953 } 954 status = -EAGAIN; 955 vm_wait = false; 956 } 957 958 switch (status) { 959 case -EAGAIN: 960 status = xs_stream_nospace(req, vm_wait); 961 break; 962 default: 963 dprintk("RPC: sendmsg returned unrecognized error %d\n", 964 -status); 965 fallthrough; 966 case -EPIPE: 967 xprt_force_disconnect(xprt); 968 status = -ENOTCONN; 969 } 970 971 return status; 972 } 973 974 /** 975 * xs_udp_send_request - write an RPC request to a UDP socket 976 * @req: pointer to RPC request 977 * 978 * Return values: 979 * 0: The request has been sent 980 * EAGAIN: The socket was blocked, please call again later to 981 * complete the request 982 * ENOTCONN: Caller needs to invoke connect logic then call again 983 * other: Some other error occurred, the request was not sent 984 */ 985 static int xs_udp_send_request(struct rpc_rqst *req) 986 { 987 struct rpc_xprt *xprt = req->rq_xprt; 988 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 989 struct xdr_buf *xdr = &req->rq_snd_buf; 990 struct msghdr msg = { 991 .msg_name = xs_addr(xprt), 992 .msg_namelen = xprt->addrlen, 993 .msg_flags = XS_SENDMSG_FLAGS, 994 }; 995 unsigned int sent; 996 int status; 997 998 xs_pktdump("packet data:", 999 req->rq_svec->iov_base, 1000 req->rq_svec->iov_len); 1001 1002 if (!xprt_bound(xprt)) 1003 return -ENOTCONN; 1004 1005 if (!xprt_request_get_cong(xprt, req)) 1006 return -EBADSLT; 1007 1008 status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask()); 1009 if (status < 0) 1010 return status; 1011 req->rq_xtime = ktime_get(); 1012 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); 1013 1014 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 1015 xdr->len, status); 1016 1017 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 1018 if (status == -EPERM) 1019 goto process_status; 1020 1021 if (status == -EAGAIN && sock_writeable(transport->inet)) 1022 status = -ENOBUFS; 1023 1024 if (sent > 0 || status == 0) { 1025 req->rq_xmit_bytes_sent += sent; 1026 if (sent >= req->rq_slen) 1027 return 0; 1028 /* Still some bytes left; set up for a retry later. */ 1029 status = -EAGAIN; 1030 } 1031 1032 process_status: 1033 switch (status) { 1034 case -ENOTSOCK: 1035 status = -ENOTCONN; 1036 /* Should we call xs_close() here? */ 1037 break; 1038 case -EAGAIN: 1039 status = xs_sock_nospace(req); 1040 break; 1041 case -ENETUNREACH: 1042 case -ENOBUFS: 1043 case -EPIPE: 1044 case -ECONNREFUSED: 1045 case -EPERM: 1046 /* When the server has died, an ICMP port unreachable message 1047 * prompts ECONNREFUSED. */ 1048 break; 1049 default: 1050 dprintk("RPC: sendmsg returned unrecognized error %d\n", 1051 -status); 1052 } 1053 1054 return status; 1055 } 1056 1057 /** 1058 * xs_tcp_send_request - write an RPC request to a TCP socket 1059 * @req: pointer to RPC request 1060 * 1061 * Return values: 1062 * 0: The request has been sent 1063 * EAGAIN: The socket was blocked, please call again later to 1064 * complete the request 1065 * ENOTCONN: Caller needs to invoke connect logic then call again 1066 * other: Some other error occurred, the request was not sent 1067 * 1068 * XXX: In the case of soft timeouts, should we eventually give up 1069 * if sendmsg is not able to make progress? 1070 */ 1071 static int xs_tcp_send_request(struct rpc_rqst *req) 1072 { 1073 struct rpc_xprt *xprt = req->rq_xprt; 1074 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1075 struct xdr_buf *xdr = &req->rq_snd_buf; 1076 rpc_fraghdr rm = xs_stream_record_marker(xdr); 1077 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen; 1078 struct msghdr msg = { 1079 .msg_flags = XS_SENDMSG_FLAGS, 1080 }; 1081 bool vm_wait; 1082 unsigned int sent; 1083 int status; 1084 1085 /* Close the stream if the previous transmission was incomplete */ 1086 if (xs_send_request_was_aborted(transport, req)) { 1087 if (transport->sock != NULL) 1088 kernel_sock_shutdown(transport->sock, SHUT_RDWR); 1089 return -ENOTCONN; 1090 } 1091 if (!transport->inet) 1092 return -ENOTCONN; 1093 1094 xs_pktdump("packet data:", 1095 req->rq_svec->iov_base, 1096 req->rq_svec->iov_len); 1097 1098 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state)) 1099 xs_tcp_set_socket_timeouts(xprt, transport->sock); 1100 1101 xs_set_srcport(transport, transport->sock); 1102 1103 /* Continue transmitting the packet/record. We must be careful 1104 * to cope with writespace callbacks arriving _after_ we have 1105 * called sendmsg(). */ 1106 req->rq_xtime = ktime_get(); 1107 tcp_sock_set_cork(transport->inet, true); 1108 1109 vm_wait = sk_stream_is_writeable(transport->inet) ? true : false; 1110 1111 do { 1112 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 1113 transport->xmit.offset, rm, &sent); 1114 1115 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 1116 xdr->len - transport->xmit.offset, status); 1117 1118 /* If we've sent the entire packet, immediately 1119 * reset the count of bytes sent. */ 1120 transport->xmit.offset += sent; 1121 req->rq_bytes_sent = transport->xmit.offset; 1122 if (likely(req->rq_bytes_sent >= msglen)) { 1123 req->rq_xmit_bytes_sent += transport->xmit.offset; 1124 transport->xmit.offset = 0; 1125 if (atomic_long_read(&xprt->xmit_queuelen) == 1) 1126 tcp_sock_set_cork(transport->inet, false); 1127 return 0; 1128 } 1129 1130 WARN_ON_ONCE(sent == 0 && status == 0); 1131 1132 if (sent > 0) 1133 vm_wait = false; 1134 1135 } while (status == 0); 1136 1137 switch (status) { 1138 case -ENOTSOCK: 1139 status = -ENOTCONN; 1140 /* Should we call xs_close() here? */ 1141 break; 1142 case -EAGAIN: 1143 status = xs_stream_nospace(req, vm_wait); 1144 break; 1145 case -ECONNRESET: 1146 case -ECONNREFUSED: 1147 case -ENOTCONN: 1148 case -EADDRINUSE: 1149 case -ENOBUFS: 1150 case -EPIPE: 1151 break; 1152 default: 1153 dprintk("RPC: sendmsg returned unrecognized error %d\n", 1154 -status); 1155 } 1156 1157 return status; 1158 } 1159 1160 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 1161 { 1162 transport->old_data_ready = sk->sk_data_ready; 1163 transport->old_state_change = sk->sk_state_change; 1164 transport->old_write_space = sk->sk_write_space; 1165 transport->old_error_report = sk->sk_error_report; 1166 } 1167 1168 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 1169 { 1170 sk->sk_data_ready = transport->old_data_ready; 1171 sk->sk_state_change = transport->old_state_change; 1172 sk->sk_write_space = transport->old_write_space; 1173 sk->sk_error_report = transport->old_error_report; 1174 } 1175 1176 static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) 1177 { 1178 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1179 1180 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state); 1181 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state); 1182 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); 1183 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); 1184 clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); 1185 } 1186 1187 static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) 1188 { 1189 set_bit(nr, &transport->sock_state); 1190 queue_work(xprtiod_workqueue, &transport->error_worker); 1191 } 1192 1193 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1194 { 1195 xprt->connect_cookie++; 1196 smp_mb__before_atomic(); 1197 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1198 clear_bit(XPRT_CLOSING, &xprt->state); 1199 xs_sock_reset_state_flags(xprt); 1200 smp_mb__after_atomic(); 1201 } 1202 1203 /** 1204 * xs_error_report - callback to handle TCP socket state errors 1205 * @sk: socket 1206 * 1207 * Note: we don't call sock_error() since there may be a rpc_task 1208 * using the socket, and so we don't want to clear sk->sk_err. 1209 */ 1210 static void xs_error_report(struct sock *sk) 1211 { 1212 struct sock_xprt *transport; 1213 struct rpc_xprt *xprt; 1214 1215 if (!(xprt = xprt_from_sock(sk))) 1216 return; 1217 1218 transport = container_of(xprt, struct sock_xprt, xprt); 1219 transport->xprt_err = -sk->sk_err; 1220 if (transport->xprt_err == 0) 1221 return; 1222 dprintk("RPC: xs_error_report client %p, error=%d...\n", 1223 xprt, -transport->xprt_err); 1224 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); 1225 1226 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */ 1227 smp_mb__before_atomic(); 1228 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR); 1229 } 1230 1231 static void xs_reset_transport(struct sock_xprt *transport) 1232 { 1233 struct socket *sock = transport->sock; 1234 struct sock *sk = transport->inet; 1235 struct rpc_xprt *xprt = &transport->xprt; 1236 struct file *filp = transport->file; 1237 1238 if (sk == NULL) 1239 return; 1240 /* 1241 * Make sure we're calling this in a context from which it is safe 1242 * to call __fput_sync(). In practice that means rpciod and the 1243 * system workqueue. 1244 */ 1245 if (!(current->flags & PF_WQ_WORKER)) { 1246 WARN_ON_ONCE(1); 1247 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 1248 return; 1249 } 1250 1251 if (atomic_read(&transport->xprt.swapper)) 1252 sk_clear_memalloc(sk); 1253 1254 tls_handshake_cancel(sk); 1255 1256 kernel_sock_shutdown(sock, SHUT_RDWR); 1257 1258 mutex_lock(&transport->recv_mutex); 1259 lock_sock(sk); 1260 transport->inet = NULL; 1261 transport->sock = NULL; 1262 transport->file = NULL; 1263 1264 sk->sk_user_data = NULL; 1265 1266 xs_restore_old_callbacks(transport, sk); 1267 xprt_clear_connected(xprt); 1268 xs_sock_reset_connection_flags(xprt); 1269 /* Reset stream record info */ 1270 xs_stream_reset_connect(transport); 1271 release_sock(sk); 1272 mutex_unlock(&transport->recv_mutex); 1273 1274 trace_rpc_socket_close(xprt, sock); 1275 __fput_sync(filp); 1276 1277 xprt_disconnect_done(xprt); 1278 } 1279 1280 /** 1281 * xs_close - close a socket 1282 * @xprt: transport 1283 * 1284 * This is used when all requests are complete; ie, no DRC state remains 1285 * on the server we want to save. 1286 * 1287 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 1288 * xs_reset_transport() zeroing the socket from underneath a writer. 1289 */ 1290 static void xs_close(struct rpc_xprt *xprt) 1291 { 1292 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1293 1294 dprintk("RPC: xs_close xprt %p\n", xprt); 1295 1296 xs_reset_transport(transport); 1297 xprt->reestablish_timeout = 0; 1298 } 1299 1300 static void xs_inject_disconnect(struct rpc_xprt *xprt) 1301 { 1302 dprintk("RPC: injecting transport disconnect on xprt=%p\n", 1303 xprt); 1304 xprt_disconnect_done(xprt); 1305 } 1306 1307 static void xs_xprt_free(struct rpc_xprt *xprt) 1308 { 1309 xs_free_peer_addresses(xprt); 1310 xprt_free(xprt); 1311 } 1312 1313 /** 1314 * xs_destroy - prepare to shutdown a transport 1315 * @xprt: doomed transport 1316 * 1317 */ 1318 static void xs_destroy(struct rpc_xprt *xprt) 1319 { 1320 struct sock_xprt *transport = container_of(xprt, 1321 struct sock_xprt, xprt); 1322 dprintk("RPC: xs_destroy xprt %p\n", xprt); 1323 1324 cancel_delayed_work_sync(&transport->connect_worker); 1325 xs_close(xprt); 1326 cancel_work_sync(&transport->recv_worker); 1327 cancel_work_sync(&transport->error_worker); 1328 xs_xprt_free(xprt); 1329 module_put(THIS_MODULE); 1330 } 1331 1332 /** 1333 * xs_udp_data_read_skb - receive callback for UDP sockets 1334 * @xprt: transport 1335 * @sk: socket 1336 * @skb: skbuff 1337 * 1338 */ 1339 static void xs_udp_data_read_skb(struct rpc_xprt *xprt, 1340 struct sock *sk, 1341 struct sk_buff *skb) 1342 { 1343 struct rpc_task *task; 1344 struct rpc_rqst *rovr; 1345 int repsize, copied; 1346 u32 _xid; 1347 __be32 *xp; 1348 1349 repsize = skb->len; 1350 if (repsize < 4) { 1351 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 1352 return; 1353 } 1354 1355 /* Copy the XID from the skb... */ 1356 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid); 1357 if (xp == NULL) 1358 return; 1359 1360 /* Look up and lock the request corresponding to the given XID */ 1361 spin_lock(&xprt->queue_lock); 1362 rovr = xprt_lookup_rqst(xprt, *xp); 1363 if (!rovr) 1364 goto out_unlock; 1365 xprt_pin_rqst(rovr); 1366 xprt_update_rtt(rovr->rq_task); 1367 spin_unlock(&xprt->queue_lock); 1368 task = rovr->rq_task; 1369 1370 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1371 copied = repsize; 1372 1373 /* Suck it into the iovec, verify checksum if not done by hw. */ 1374 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1375 spin_lock(&xprt->queue_lock); 1376 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); 1377 goto out_unpin; 1378 } 1379 1380 1381 spin_lock(&xprt->transport_lock); 1382 xprt_adjust_cwnd(xprt, task, copied); 1383 spin_unlock(&xprt->transport_lock); 1384 spin_lock(&xprt->queue_lock); 1385 xprt_complete_rqst(task, copied); 1386 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); 1387 out_unpin: 1388 xprt_unpin_rqst(rovr); 1389 out_unlock: 1390 spin_unlock(&xprt->queue_lock); 1391 } 1392 1393 static void xs_udp_data_receive(struct sock_xprt *transport) 1394 { 1395 struct sk_buff *skb; 1396 struct sock *sk; 1397 int err; 1398 1399 mutex_lock(&transport->recv_mutex); 1400 sk = transport->inet; 1401 if (sk == NULL) 1402 goto out; 1403 for (;;) { 1404 skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); 1405 if (skb == NULL) 1406 break; 1407 xs_udp_data_read_skb(&transport->xprt, sk, skb); 1408 consume_skb(skb); 1409 cond_resched(); 1410 } 1411 xs_poll_check_readable(transport); 1412 out: 1413 mutex_unlock(&transport->recv_mutex); 1414 } 1415 1416 static void xs_udp_data_receive_workfn(struct work_struct *work) 1417 { 1418 struct sock_xprt *transport = 1419 container_of(work, struct sock_xprt, recv_worker); 1420 unsigned int pflags = memalloc_nofs_save(); 1421 1422 xs_udp_data_receive(transport); 1423 memalloc_nofs_restore(pflags); 1424 } 1425 1426 /** 1427 * xs_data_ready - "data ready" callback for sockets 1428 * @sk: socket with data to read 1429 * 1430 */ 1431 static void xs_data_ready(struct sock *sk) 1432 { 1433 struct rpc_xprt *xprt; 1434 1435 trace_sk_data_ready(sk); 1436 1437 xprt = xprt_from_sock(sk); 1438 if (xprt != NULL) { 1439 struct sock_xprt *transport = container_of(xprt, 1440 struct sock_xprt, xprt); 1441 1442 trace_xs_data_ready(xprt); 1443 1444 transport->old_data_ready(sk); 1445 1446 if (test_bit(XPRT_SOCK_IGNORE_RECV, &transport->sock_state)) 1447 return; 1448 1449 /* Any data means we had a useful conversation, so 1450 * then we don't need to delay the next reconnect 1451 */ 1452 if (xprt->reestablish_timeout) 1453 xprt->reestablish_timeout = 0; 1454 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state)) 1455 queue_work(xprtiod_workqueue, &transport->recv_worker); 1456 } 1457 } 1458 1459 /* 1460 * Helper function to force a TCP close if the server is sending 1461 * junk and/or it has put us in CLOSE_WAIT 1462 */ 1463 static void xs_tcp_force_close(struct rpc_xprt *xprt) 1464 { 1465 xprt_force_disconnect(xprt); 1466 } 1467 1468 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1469 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt) 1470 { 1471 return PAGE_SIZE; 1472 } 1473 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1474 1475 /** 1476 * xs_local_state_change - callback to handle AF_LOCAL socket state changes 1477 * @sk: socket whose state has changed 1478 * 1479 */ 1480 static void xs_local_state_change(struct sock *sk) 1481 { 1482 struct rpc_xprt *xprt; 1483 struct sock_xprt *transport; 1484 1485 if (!(xprt = xprt_from_sock(sk))) 1486 return; 1487 transport = container_of(xprt, struct sock_xprt, xprt); 1488 if (sk->sk_shutdown & SHUTDOWN_MASK) { 1489 clear_bit(XPRT_CONNECTED, &xprt->state); 1490 /* Trigger the socket release */ 1491 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1492 } 1493 } 1494 1495 /** 1496 * xs_tcp_state_change - callback to handle TCP socket state changes 1497 * @sk: socket whose state has changed 1498 * 1499 */ 1500 static void xs_tcp_state_change(struct sock *sk) 1501 { 1502 struct rpc_xprt *xprt; 1503 struct sock_xprt *transport; 1504 1505 if (!(xprt = xprt_from_sock(sk))) 1506 return; 1507 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1508 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", 1509 sk->sk_state, xprt_connected(xprt), 1510 sock_flag(sk, SOCK_DEAD), 1511 sock_flag(sk, SOCK_ZAPPED), 1512 sk->sk_shutdown); 1513 1514 transport = container_of(xprt, struct sock_xprt, xprt); 1515 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1516 switch (sk->sk_state) { 1517 case TCP_ESTABLISHED: 1518 if (!xprt_test_and_set_connected(xprt)) { 1519 xprt->connect_cookie++; 1520 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 1521 xprt_clear_connecting(xprt); 1522 1523 xprt->stat.connect_count++; 1524 xprt->stat.connect_time += (long)jiffies - 1525 xprt->stat.connect_start; 1526 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING); 1527 } 1528 break; 1529 case TCP_FIN_WAIT1: 1530 /* The client initiated a shutdown of the socket */ 1531 xprt->connect_cookie++; 1532 xprt->reestablish_timeout = 0; 1533 set_bit(XPRT_CLOSING, &xprt->state); 1534 smp_mb__before_atomic(); 1535 clear_bit(XPRT_CONNECTED, &xprt->state); 1536 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1537 smp_mb__after_atomic(); 1538 break; 1539 case TCP_CLOSE_WAIT: 1540 /* The server initiated a shutdown of the socket */ 1541 xprt->connect_cookie++; 1542 clear_bit(XPRT_CONNECTED, &xprt->state); 1543 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1544 fallthrough; 1545 case TCP_CLOSING: 1546 /* 1547 * If the server closed down the connection, make sure that 1548 * we back off before reconnecting 1549 */ 1550 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1551 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1552 break; 1553 case TCP_LAST_ACK: 1554 set_bit(XPRT_CLOSING, &xprt->state); 1555 smp_mb__before_atomic(); 1556 clear_bit(XPRT_CONNECTED, &xprt->state); 1557 smp_mb__after_atomic(); 1558 break; 1559 case TCP_CLOSE: 1560 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, 1561 &transport->sock_state)) 1562 xprt_clear_connecting(xprt); 1563 clear_bit(XPRT_CLOSING, &xprt->state); 1564 /* Trigger the socket release */ 1565 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); 1566 } 1567 } 1568 1569 static void xs_write_space(struct sock *sk) 1570 { 1571 struct sock_xprt *transport; 1572 struct rpc_xprt *xprt; 1573 1574 if (!sk->sk_socket) 1575 return; 1576 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1577 1578 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1579 return; 1580 transport = container_of(xprt, struct sock_xprt, xprt); 1581 if (!test_and_clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state)) 1582 return; 1583 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE); 1584 sk->sk_write_pending--; 1585 } 1586 1587 /** 1588 * xs_udp_write_space - callback invoked when socket buffer space 1589 * becomes available 1590 * @sk: socket whose state has changed 1591 * 1592 * Called when more output buffer space is available for this socket. 1593 * We try not to wake our writers until they can make "significant" 1594 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1595 * with a bunch of small requests. 1596 */ 1597 static void xs_udp_write_space(struct sock *sk) 1598 { 1599 /* from net/core/sock.c:sock_def_write_space */ 1600 if (sock_writeable(sk)) 1601 xs_write_space(sk); 1602 } 1603 1604 /** 1605 * xs_tcp_write_space - callback invoked when socket buffer space 1606 * becomes available 1607 * @sk: socket whose state has changed 1608 * 1609 * Called when more output buffer space is available for this socket. 1610 * We try not to wake our writers until they can make "significant" 1611 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1612 * with a bunch of small requests. 1613 */ 1614 static void xs_tcp_write_space(struct sock *sk) 1615 { 1616 /* from net/core/stream.c:sk_stream_write_space */ 1617 if (sk_stream_is_writeable(sk)) 1618 xs_write_space(sk); 1619 } 1620 1621 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1622 { 1623 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1624 struct sock *sk = transport->inet; 1625 1626 if (transport->rcvsize) { 1627 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1628 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1629 } 1630 if (transport->sndsize) { 1631 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1632 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1633 sk->sk_write_space(sk); 1634 } 1635 } 1636 1637 /** 1638 * xs_udp_set_buffer_size - set send and receive limits 1639 * @xprt: generic transport 1640 * @sndsize: requested size of send buffer, in bytes 1641 * @rcvsize: requested size of receive buffer, in bytes 1642 * 1643 * Set socket send and receive buffer size limits. 1644 */ 1645 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1646 { 1647 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1648 1649 transport->sndsize = 0; 1650 if (sndsize) 1651 transport->sndsize = sndsize + 1024; 1652 transport->rcvsize = 0; 1653 if (rcvsize) 1654 transport->rcvsize = rcvsize + 1024; 1655 1656 xs_udp_do_set_buffer_size(xprt); 1657 } 1658 1659 /** 1660 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1661 * @xprt: controlling transport 1662 * @task: task that timed out 1663 * 1664 * Adjust the congestion window after a retransmit timeout has occurred. 1665 */ 1666 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) 1667 { 1668 spin_lock(&xprt->transport_lock); 1669 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); 1670 spin_unlock(&xprt->transport_lock); 1671 } 1672 1673 static int xs_get_random_port(void) 1674 { 1675 unsigned short min = xprt_min_resvport, max = xprt_max_resvport; 1676 unsigned short range; 1677 unsigned short rand; 1678 1679 if (max < min) 1680 return -EADDRINUSE; 1681 range = max - min + 1; 1682 rand = get_random_u32_below(range); 1683 return rand + min; 1684 } 1685 1686 static unsigned short xs_sock_getport(struct socket *sock) 1687 { 1688 struct sockaddr_storage buf; 1689 unsigned short port = 0; 1690 1691 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0) 1692 goto out; 1693 switch (buf.ss_family) { 1694 case AF_INET6: 1695 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); 1696 break; 1697 case AF_INET: 1698 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); 1699 } 1700 out: 1701 return port; 1702 } 1703 1704 /** 1705 * xs_set_port - reset the port number in the remote endpoint address 1706 * @xprt: generic transport 1707 * @port: new port number 1708 * 1709 */ 1710 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1711 { 1712 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1713 1714 rpc_set_port(xs_addr(xprt), port); 1715 xs_update_peer_port(xprt); 1716 } 1717 1718 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) 1719 { 1720 if (transport->srcport == 0 && transport->xprt.reuseport) 1721 transport->srcport = xs_sock_getport(sock); 1722 } 1723 1724 static int xs_get_srcport(struct sock_xprt *transport) 1725 { 1726 int port = transport->srcport; 1727 1728 if (port == 0 && transport->xprt.resvport) 1729 port = xs_get_random_port(); 1730 return port; 1731 } 1732 1733 static unsigned short xs_sock_srcport(struct rpc_xprt *xprt) 1734 { 1735 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); 1736 unsigned short ret = 0; 1737 mutex_lock(&sock->recv_mutex); 1738 if (sock->sock) 1739 ret = xs_sock_getport(sock->sock); 1740 mutex_unlock(&sock->recv_mutex); 1741 return ret; 1742 } 1743 1744 static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen) 1745 { 1746 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt); 1747 union { 1748 struct sockaddr sa; 1749 struct sockaddr_storage st; 1750 } saddr; 1751 int ret = -ENOTCONN; 1752 1753 mutex_lock(&sock->recv_mutex); 1754 if (sock->sock) { 1755 ret = kernel_getsockname(sock->sock, &saddr.sa); 1756 if (ret >= 0) 1757 ret = snprintf(buf, buflen, "%pISc", &saddr.sa); 1758 } 1759 mutex_unlock(&sock->recv_mutex); 1760 return ret; 1761 } 1762 1763 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) 1764 { 1765 if (transport->srcport != 0) 1766 transport->srcport = 0; 1767 if (!transport->xprt.resvport) 1768 return 0; 1769 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1770 return xprt_max_resvport; 1771 return --port; 1772 } 1773 static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1774 { 1775 struct sockaddr_storage myaddr; 1776 int err, nloop = 0; 1777 int port = xs_get_srcport(transport); 1778 unsigned short last; 1779 1780 /* 1781 * If we are asking for any ephemeral port (i.e. port == 0 && 1782 * transport->xprt.resvport == 0), don't bind. Let the local 1783 * port selection happen implicitly when the socket is used 1784 * (for example at connect time). 1785 * 1786 * This ensures that we can continue to establish TCP 1787 * connections even when all local ephemeral ports are already 1788 * a part of some TCP connection. This makes no difference 1789 * for UDP sockets, but also doesn't harm them. 1790 * 1791 * If we're asking for any reserved port (i.e. port == 0 && 1792 * transport->xprt.resvport == 1) xs_get_srcport above will 1793 * ensure that port is non-zero and we will bind as needed. 1794 */ 1795 if (port <= 0) 1796 return port; 1797 1798 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1799 do { 1800 rpc_set_port((struct sockaddr *)&myaddr, port); 1801 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1802 transport->xprt.addrlen); 1803 if (err == 0) { 1804 if (transport->xprt.reuseport) 1805 transport->srcport = port; 1806 break; 1807 } 1808 last = port; 1809 port = xs_next_srcport(transport, port); 1810 if (port > last) 1811 nloop++; 1812 } while (err == -EADDRINUSE && nloop != 2); 1813 1814 if (myaddr.ss_family == AF_INET) 1815 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, 1816 &((struct sockaddr_in *)&myaddr)->sin_addr, 1817 port, err ? "failed" : "ok", err); 1818 else 1819 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, 1820 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, 1821 port, err ? "failed" : "ok", err); 1822 return err; 1823 } 1824 1825 /* 1826 * We don't support autobind on AF_LOCAL sockets 1827 */ 1828 static void xs_local_rpcbind(struct rpc_task *task) 1829 { 1830 xprt_set_bound(task->tk_xprt); 1831 } 1832 1833 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1834 { 1835 } 1836 1837 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1838 static struct lock_class_key xs_key[3]; 1839 static struct lock_class_key xs_slock_key[3]; 1840 1841 static inline void xs_reclassify_socketu(struct socket *sock) 1842 { 1843 struct sock *sk = sock->sk; 1844 1845 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1846 &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]); 1847 } 1848 1849 static inline void xs_reclassify_socket4(struct socket *sock) 1850 { 1851 struct sock *sk = sock->sk; 1852 1853 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1854 &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]); 1855 } 1856 1857 static inline void xs_reclassify_socket6(struct socket *sock) 1858 { 1859 struct sock *sk = sock->sk; 1860 1861 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1862 &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]); 1863 } 1864 1865 static inline void xs_reclassify_socket(int family, struct socket *sock) 1866 { 1867 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) 1868 return; 1869 1870 switch (family) { 1871 case AF_LOCAL: 1872 xs_reclassify_socketu(sock); 1873 break; 1874 case AF_INET: 1875 xs_reclassify_socket4(sock); 1876 break; 1877 case AF_INET6: 1878 xs_reclassify_socket6(sock); 1879 break; 1880 } 1881 } 1882 #else 1883 static inline void xs_reclassify_socket(int family, struct socket *sock) 1884 { 1885 } 1886 #endif 1887 1888 static void xs_dummy_setup_socket(struct work_struct *work) 1889 { 1890 } 1891 1892 static struct socket *xs_create_sock(struct rpc_xprt *xprt, 1893 struct sock_xprt *transport, int family, int type, 1894 int protocol, bool reuseport) 1895 { 1896 struct file *filp; 1897 struct socket *sock; 1898 int err; 1899 1900 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); 1901 if (err < 0) { 1902 dprintk("RPC: can't create %d transport socket (%d).\n", 1903 protocol, -err); 1904 goto out; 1905 } 1906 xs_reclassify_socket(family, sock); 1907 1908 if (reuseport) 1909 sock_set_reuseport(sock->sk); 1910 1911 err = xs_bind(transport, sock); 1912 if (err) { 1913 sock_release(sock); 1914 goto out; 1915 } 1916 1917 filp = sock_alloc_file(sock, O_NONBLOCK, NULL); 1918 if (IS_ERR(filp)) 1919 return ERR_CAST(filp); 1920 transport->file = filp; 1921 1922 return sock; 1923 out: 1924 return ERR_PTR(err); 1925 } 1926 1927 static int xs_local_finish_connecting(struct rpc_xprt *xprt, 1928 struct socket *sock) 1929 { 1930 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1931 xprt); 1932 1933 if (!transport->inet) { 1934 struct sock *sk = sock->sk; 1935 1936 lock_sock(sk); 1937 1938 xs_save_old_callbacks(transport, sk); 1939 1940 sk->sk_user_data = xprt; 1941 sk->sk_data_ready = xs_data_ready; 1942 sk->sk_write_space = xs_udp_write_space; 1943 sk->sk_state_change = xs_local_state_change; 1944 sk->sk_error_report = xs_error_report; 1945 sk->sk_use_task_frag = false; 1946 1947 xprt_clear_connected(xprt); 1948 1949 /* Reset to new socket */ 1950 transport->sock = sock; 1951 transport->inet = sk; 1952 1953 release_sock(sk); 1954 } 1955 1956 xs_stream_start_connect(transport); 1957 1958 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); 1959 } 1960 1961 /** 1962 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint 1963 * @transport: socket transport to connect 1964 */ 1965 static int xs_local_setup_socket(struct sock_xprt *transport) 1966 { 1967 struct rpc_xprt *xprt = &transport->xprt; 1968 struct file *filp; 1969 struct socket *sock; 1970 int status; 1971 1972 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1973 SOCK_STREAM, 0, &sock, 1); 1974 if (status < 0) { 1975 dprintk("RPC: can't create AF_LOCAL " 1976 "transport socket (%d).\n", -status); 1977 goto out; 1978 } 1979 xs_reclassify_socket(AF_LOCAL, sock); 1980 1981 filp = sock_alloc_file(sock, O_NONBLOCK, NULL); 1982 if (IS_ERR(filp)) { 1983 status = PTR_ERR(filp); 1984 goto out; 1985 } 1986 transport->file = filp; 1987 1988 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", 1989 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1990 1991 status = xs_local_finish_connecting(xprt, sock); 1992 trace_rpc_socket_connect(xprt, sock, status); 1993 switch (status) { 1994 case 0: 1995 dprintk("RPC: xprt %p connected to %s\n", 1996 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1997 xprt->stat.connect_count++; 1998 xprt->stat.connect_time += (long)jiffies - 1999 xprt->stat.connect_start; 2000 xprt_set_connected(xprt); 2001 break; 2002 case -ENOBUFS: 2003 break; 2004 case -ENOENT: 2005 dprintk("RPC: xprt %p: socket %s does not exist\n", 2006 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2007 break; 2008 case -ECONNREFUSED: 2009 dprintk("RPC: xprt %p: connection refused for %s\n", 2010 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2011 break; 2012 default: 2013 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 2014 __func__, -status, 2015 xprt->address_strings[RPC_DISPLAY_ADDR]); 2016 } 2017 2018 out: 2019 xprt_clear_connecting(xprt); 2020 xprt_wake_pending_tasks(xprt, status); 2021 return status; 2022 } 2023 2024 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2025 { 2026 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2027 int ret; 2028 2029 if (transport->file) 2030 goto force_disconnect; 2031 2032 if (RPC_IS_ASYNC(task)) { 2033 /* 2034 * We want the AF_LOCAL connect to be resolved in the 2035 * filesystem namespace of the process making the rpc 2036 * call. Thus we connect synchronously. 2037 * 2038 * If we want to support asynchronous AF_LOCAL calls, 2039 * we'll need to figure out how to pass a namespace to 2040 * connect. 2041 */ 2042 rpc_task_set_rpc_status(task, -ENOTCONN); 2043 goto out_wake; 2044 } 2045 ret = xs_local_setup_socket(transport); 2046 if (ret && !RPC_IS_SOFTCONN(task)) 2047 msleep_interruptible(15000); 2048 return; 2049 force_disconnect: 2050 xprt_force_disconnect(xprt); 2051 out_wake: 2052 xprt_clear_connecting(xprt); 2053 xprt_wake_pending_tasks(xprt, -ENOTCONN); 2054 } 2055 2056 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 2057 /* 2058 * Note that this should be called with XPRT_LOCKED held, or recv_mutex 2059 * held, or when we otherwise know that we have exclusive access to the 2060 * socket, to guard against races with xs_reset_transport. 2061 */ 2062 static void xs_set_memalloc(struct rpc_xprt *xprt) 2063 { 2064 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 2065 xprt); 2066 2067 /* 2068 * If there's no sock, then we have nothing to set. The 2069 * reconnecting process will get it for us. 2070 */ 2071 if (!transport->inet) 2072 return; 2073 if (atomic_read(&xprt->swapper)) 2074 sk_set_memalloc(transport->inet); 2075 } 2076 2077 /** 2078 * xs_enable_swap - Tag this transport as being used for swap. 2079 * @xprt: transport to tag 2080 * 2081 * Take a reference to this transport on behalf of the rpc_clnt, and 2082 * optionally mark it for swapping if it wasn't already. 2083 */ 2084 static int 2085 xs_enable_swap(struct rpc_xprt *xprt) 2086 { 2087 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2088 2089 mutex_lock(&xs->recv_mutex); 2090 if (atomic_inc_return(&xprt->swapper) == 1 && 2091 xs->inet) 2092 sk_set_memalloc(xs->inet); 2093 mutex_unlock(&xs->recv_mutex); 2094 return 0; 2095 } 2096 2097 /** 2098 * xs_disable_swap - Untag this transport as being used for swap. 2099 * @xprt: transport to tag 2100 * 2101 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the 2102 * swapper refcount goes to 0, untag the socket as a memalloc socket. 2103 */ 2104 static void 2105 xs_disable_swap(struct rpc_xprt *xprt) 2106 { 2107 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2108 2109 mutex_lock(&xs->recv_mutex); 2110 if (atomic_dec_and_test(&xprt->swapper) && 2111 xs->inet) 2112 sk_clear_memalloc(xs->inet); 2113 mutex_unlock(&xs->recv_mutex); 2114 } 2115 #else 2116 static void xs_set_memalloc(struct rpc_xprt *xprt) 2117 { 2118 } 2119 2120 static int 2121 xs_enable_swap(struct rpc_xprt *xprt) 2122 { 2123 return -EINVAL; 2124 } 2125 2126 static void 2127 xs_disable_swap(struct rpc_xprt *xprt) 2128 { 2129 } 2130 #endif 2131 2132 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2133 { 2134 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2135 2136 if (!transport->inet) { 2137 struct sock *sk = sock->sk; 2138 2139 lock_sock(sk); 2140 2141 xs_save_old_callbacks(transport, sk); 2142 2143 sk->sk_user_data = xprt; 2144 sk->sk_data_ready = xs_data_ready; 2145 sk->sk_write_space = xs_udp_write_space; 2146 sk->sk_use_task_frag = false; 2147 2148 xprt_set_connected(xprt); 2149 2150 /* Reset to new socket */ 2151 transport->sock = sock; 2152 transport->inet = sk; 2153 2154 xs_set_memalloc(xprt); 2155 2156 release_sock(sk); 2157 } 2158 xs_udp_do_set_buffer_size(xprt); 2159 2160 xprt->stat.connect_start = jiffies; 2161 } 2162 2163 static void xs_udp_setup_socket(struct work_struct *work) 2164 { 2165 struct sock_xprt *transport = 2166 container_of(work, struct sock_xprt, connect_worker.work); 2167 struct rpc_xprt *xprt = &transport->xprt; 2168 struct socket *sock; 2169 int status = -EIO; 2170 unsigned int pflags = current->flags; 2171 2172 if (atomic_read(&xprt->swapper)) 2173 current->flags |= PF_MEMALLOC; 2174 sock = xs_create_sock(xprt, transport, 2175 xs_addr(xprt)->sa_family, SOCK_DGRAM, 2176 IPPROTO_UDP, false); 2177 if (IS_ERR(sock)) 2178 goto out; 2179 2180 dprintk("RPC: worker connecting xprt %p via %s to " 2181 "%s (port %s)\n", xprt, 2182 xprt->address_strings[RPC_DISPLAY_PROTO], 2183 xprt->address_strings[RPC_DISPLAY_ADDR], 2184 xprt->address_strings[RPC_DISPLAY_PORT]); 2185 2186 xs_udp_finish_connecting(xprt, sock); 2187 trace_rpc_socket_connect(xprt, sock, 0); 2188 status = 0; 2189 out: 2190 xprt_clear_connecting(xprt); 2191 xprt_unlock_connect(xprt, transport); 2192 xprt_wake_pending_tasks(xprt, status); 2193 current_restore_flags(pflags, PF_MEMALLOC); 2194 } 2195 2196 /** 2197 * xs_tcp_shutdown - gracefully shut down a TCP socket 2198 * @xprt: transport 2199 * 2200 * Initiates a graceful shutdown of the TCP socket by calling the 2201 * equivalent of shutdown(SHUT_RDWR); 2202 */ 2203 static void xs_tcp_shutdown(struct rpc_xprt *xprt) 2204 { 2205 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2206 struct socket *sock = transport->sock; 2207 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE; 2208 2209 if (sock == NULL) 2210 return; 2211 if (!xprt->reuseport) { 2212 xs_close(xprt); 2213 return; 2214 } 2215 switch (skst) { 2216 case TCP_FIN_WAIT1: 2217 case TCP_FIN_WAIT2: 2218 case TCP_LAST_ACK: 2219 break; 2220 case TCP_ESTABLISHED: 2221 case TCP_CLOSE_WAIT: 2222 kernel_sock_shutdown(sock, SHUT_RDWR); 2223 trace_rpc_socket_shutdown(xprt, sock); 2224 break; 2225 default: 2226 xs_reset_transport(transport); 2227 } 2228 } 2229 2230 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, 2231 struct socket *sock) 2232 { 2233 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2234 unsigned int keepidle; 2235 unsigned int keepcnt; 2236 unsigned int timeo; 2237 2238 spin_lock(&xprt->transport_lock); 2239 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ); 2240 keepcnt = xprt->timeout->to_retries + 1; 2241 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2242 (xprt->timeout->to_retries + 1); 2243 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2244 spin_unlock(&xprt->transport_lock); 2245 2246 /* TCP Keepalive options */ 2247 sock_set_keepalive(sock->sk); 2248 tcp_sock_set_keepidle(sock->sk, keepidle); 2249 tcp_sock_set_keepintvl(sock->sk, keepidle); 2250 tcp_sock_set_keepcnt(sock->sk, keepcnt); 2251 2252 /* TCP user timeout (see RFC5482) */ 2253 tcp_sock_set_user_timeout(sock->sk, timeo); 2254 } 2255 2256 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt, 2257 unsigned long connect_timeout, 2258 unsigned long reconnect_timeout) 2259 { 2260 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2261 struct rpc_timeout to; 2262 unsigned long initval; 2263 2264 spin_lock(&xprt->transport_lock); 2265 if (reconnect_timeout < xprt->max_reconnect_timeout) 2266 xprt->max_reconnect_timeout = reconnect_timeout; 2267 if (connect_timeout < xprt->connect_timeout) { 2268 memcpy(&to, xprt->timeout, sizeof(to)); 2269 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1); 2270 /* Arbitrary lower limit */ 2271 if (initval < XS_TCP_INIT_REEST_TO << 1) 2272 initval = XS_TCP_INIT_REEST_TO << 1; 2273 to.to_initval = initval; 2274 to.to_maxval = initval; 2275 memcpy(&transport->tcp_timeout, &to, 2276 sizeof(transport->tcp_timeout)); 2277 xprt->timeout = &transport->tcp_timeout; 2278 xprt->connect_timeout = connect_timeout; 2279 } 2280 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); 2281 spin_unlock(&xprt->transport_lock); 2282 } 2283 2284 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2285 { 2286 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2287 2288 if (!transport->inet) { 2289 struct sock *sk = sock->sk; 2290 2291 /* Avoid temporary address, they are bad for long-lived 2292 * connections such as NFS mounts. 2293 * RFC4941, section 3.6 suggests that: 2294 * Individual applications, which have specific 2295 * knowledge about the normal duration of connections, 2296 * MAY override this as appropriate. 2297 */ 2298 if (xs_addr(xprt)->sa_family == PF_INET6) { 2299 ip6_sock_set_addr_preferences(sk, 2300 IPV6_PREFER_SRC_PUBLIC); 2301 } 2302 2303 xs_tcp_set_socket_timeouts(xprt, sock); 2304 tcp_sock_set_nodelay(sk); 2305 2306 lock_sock(sk); 2307 2308 xs_save_old_callbacks(transport, sk); 2309 2310 sk->sk_user_data = xprt; 2311 sk->sk_data_ready = xs_data_ready; 2312 sk->sk_state_change = xs_tcp_state_change; 2313 sk->sk_write_space = xs_tcp_write_space; 2314 sk->sk_error_report = xs_error_report; 2315 sk->sk_use_task_frag = false; 2316 2317 /* socket options */ 2318 sock_reset_flag(sk, SOCK_LINGER); 2319 2320 xprt_clear_connected(xprt); 2321 2322 /* Reset to new socket */ 2323 transport->sock = sock; 2324 transport->inet = sk; 2325 2326 release_sock(sk); 2327 } 2328 2329 if (!xprt_bound(xprt)) 2330 return -ENOTCONN; 2331 2332 xs_set_memalloc(xprt); 2333 2334 xs_stream_start_connect(transport); 2335 2336 /* Tell the socket layer to start connecting... */ 2337 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 2338 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2339 } 2340 2341 /** 2342 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 2343 * @work: queued work item 2344 * 2345 * Invoked by a work queue tasklet. 2346 */ 2347 static void xs_tcp_setup_socket(struct work_struct *work) 2348 { 2349 struct sock_xprt *transport = 2350 container_of(work, struct sock_xprt, connect_worker.work); 2351 struct socket *sock = transport->sock; 2352 struct rpc_xprt *xprt = &transport->xprt; 2353 int status; 2354 unsigned int pflags = current->flags; 2355 2356 if (atomic_read(&xprt->swapper)) 2357 current->flags |= PF_MEMALLOC; 2358 2359 if (xprt_connected(xprt)) 2360 goto out; 2361 if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT, 2362 &transport->sock_state) || 2363 !sock) { 2364 xs_reset_transport(transport); 2365 sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family, 2366 SOCK_STREAM, IPPROTO_TCP, true); 2367 if (IS_ERR(sock)) { 2368 xprt_wake_pending_tasks(xprt, PTR_ERR(sock)); 2369 goto out; 2370 } 2371 } 2372 2373 dprintk("RPC: worker connecting xprt %p via %s to " 2374 "%s (port %s)\n", xprt, 2375 xprt->address_strings[RPC_DISPLAY_PROTO], 2376 xprt->address_strings[RPC_DISPLAY_ADDR], 2377 xprt->address_strings[RPC_DISPLAY_PORT]); 2378 2379 status = xs_tcp_finish_connecting(xprt, sock); 2380 trace_rpc_socket_connect(xprt, sock, status); 2381 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 2382 xprt, -status, xprt_connected(xprt), 2383 sock->sk->sk_state); 2384 switch (status) { 2385 case 0: 2386 case -EINPROGRESS: 2387 /* SYN_SENT! */ 2388 set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state); 2389 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2390 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2391 fallthrough; 2392 case -EALREADY: 2393 goto out_unlock; 2394 case -EADDRNOTAVAIL: 2395 /* Source port number is unavailable. Try a new one! */ 2396 transport->srcport = 0; 2397 status = -EAGAIN; 2398 break; 2399 case -EINVAL: 2400 /* Happens, for instance, if the user specified a link 2401 * local IPv6 address without a scope-id. 2402 */ 2403 case -ECONNREFUSED: 2404 case -ECONNRESET: 2405 case -ENETDOWN: 2406 case -ENETUNREACH: 2407 case -EHOSTUNREACH: 2408 case -EADDRINUSE: 2409 case -ENOBUFS: 2410 break; 2411 default: 2412 printk("%s: connect returned unhandled error %d\n", 2413 __func__, status); 2414 status = -EAGAIN; 2415 } 2416 2417 /* xs_tcp_force_close() wakes tasks with a fixed error code. 2418 * We need to wake them first to ensure the correct error code. 2419 */ 2420 xprt_wake_pending_tasks(xprt, status); 2421 xs_tcp_force_close(xprt); 2422 out: 2423 xprt_clear_connecting(xprt); 2424 out_unlock: 2425 xprt_unlock_connect(xprt, transport); 2426 current_restore_flags(pflags, PF_MEMALLOC); 2427 } 2428 2429 /* 2430 * Transfer the connected socket to @upper_transport, then mark that 2431 * xprt CONNECTED. 2432 */ 2433 static int xs_tcp_tls_finish_connecting(struct rpc_xprt *lower_xprt, 2434 struct sock_xprt *upper_transport) 2435 { 2436 struct sock_xprt *lower_transport = 2437 container_of(lower_xprt, struct sock_xprt, xprt); 2438 struct rpc_xprt *upper_xprt = &upper_transport->xprt; 2439 2440 if (!upper_transport->inet) { 2441 struct socket *sock = lower_transport->sock; 2442 struct sock *sk = sock->sk; 2443 2444 /* Avoid temporary address, they are bad for long-lived 2445 * connections such as NFS mounts. 2446 * RFC4941, section 3.6 suggests that: 2447 * Individual applications, which have specific 2448 * knowledge about the normal duration of connections, 2449 * MAY override this as appropriate. 2450 */ 2451 if (xs_addr(upper_xprt)->sa_family == PF_INET6) 2452 ip6_sock_set_addr_preferences(sk, IPV6_PREFER_SRC_PUBLIC); 2453 2454 xs_tcp_set_socket_timeouts(upper_xprt, sock); 2455 tcp_sock_set_nodelay(sk); 2456 2457 lock_sock(sk); 2458 2459 /* @sk is already connected, so it now has the RPC callbacks. 2460 * Reach into @lower_transport to save the original ones. 2461 */ 2462 upper_transport->old_data_ready = lower_transport->old_data_ready; 2463 upper_transport->old_state_change = lower_transport->old_state_change; 2464 upper_transport->old_write_space = lower_transport->old_write_space; 2465 upper_transport->old_error_report = lower_transport->old_error_report; 2466 sk->sk_user_data = upper_xprt; 2467 2468 /* socket options */ 2469 sock_reset_flag(sk, SOCK_LINGER); 2470 2471 xprt_clear_connected(upper_xprt); 2472 2473 upper_transport->sock = sock; 2474 upper_transport->inet = sk; 2475 upper_transport->file = lower_transport->file; 2476 2477 release_sock(sk); 2478 2479 /* Reset lower_transport before shutting down its clnt */ 2480 mutex_lock(&lower_transport->recv_mutex); 2481 lower_transport->inet = NULL; 2482 lower_transport->sock = NULL; 2483 lower_transport->file = NULL; 2484 2485 xprt_clear_connected(lower_xprt); 2486 xs_sock_reset_connection_flags(lower_xprt); 2487 xs_stream_reset_connect(lower_transport); 2488 mutex_unlock(&lower_transport->recv_mutex); 2489 } 2490 2491 if (!xprt_bound(upper_xprt)) 2492 return -ENOTCONN; 2493 2494 xs_set_memalloc(upper_xprt); 2495 2496 if (!xprt_test_and_set_connected(upper_xprt)) { 2497 upper_xprt->connect_cookie++; 2498 clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2499 xprt_clear_connecting(upper_xprt); 2500 2501 upper_xprt->stat.connect_count++; 2502 upper_xprt->stat.connect_time += (long)jiffies - 2503 upper_xprt->stat.connect_start; 2504 xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2505 } 2506 return 0; 2507 } 2508 2509 /** 2510 * xs_tls_handshake_done - TLS handshake completion handler 2511 * @data: address of xprt to wake 2512 * @status: status of handshake 2513 * @peerid: serial number of key containing the remote's identity 2514 * 2515 */ 2516 static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid) 2517 { 2518 struct rpc_xprt *lower_xprt = data; 2519 struct sock_xprt *lower_transport = 2520 container_of(lower_xprt, struct sock_xprt, xprt); 2521 2522 lower_transport->xprt_err = status ? -EACCES : 0; 2523 complete(&lower_transport->handshake_done); 2524 xprt_put(lower_xprt); 2525 } 2526 2527 static int xs_tls_handshake_sync(struct rpc_xprt *lower_xprt, struct xprtsec_parms *xprtsec) 2528 { 2529 struct sock_xprt *lower_transport = 2530 container_of(lower_xprt, struct sock_xprt, xprt); 2531 struct tls_handshake_args args = { 2532 .ta_sock = lower_transport->sock, 2533 .ta_done = xs_tls_handshake_done, 2534 .ta_data = xprt_get(lower_xprt), 2535 .ta_peername = lower_xprt->servername, 2536 }; 2537 struct sock *sk = lower_transport->inet; 2538 int rc; 2539 2540 init_completion(&lower_transport->handshake_done); 2541 set_bit(XPRT_SOCK_IGNORE_RECV, &lower_transport->sock_state); 2542 lower_transport->xprt_err = -ETIMEDOUT; 2543 switch (xprtsec->policy) { 2544 case RPC_XPRTSEC_TLS_ANON: 2545 rc = tls_client_hello_anon(&args, GFP_KERNEL); 2546 if (rc) 2547 goto out_put_xprt; 2548 break; 2549 case RPC_XPRTSEC_TLS_X509: 2550 args.ta_my_cert = xprtsec->cert_serial; 2551 args.ta_my_privkey = xprtsec->privkey_serial; 2552 rc = tls_client_hello_x509(&args, GFP_KERNEL); 2553 if (rc) 2554 goto out_put_xprt; 2555 break; 2556 default: 2557 rc = -EACCES; 2558 goto out_put_xprt; 2559 } 2560 2561 rc = wait_for_completion_interruptible_timeout(&lower_transport->handshake_done, 2562 XS_TLS_HANDSHAKE_TO); 2563 if (rc <= 0) { 2564 if (!tls_handshake_cancel(sk)) { 2565 if (rc == 0) 2566 rc = -ETIMEDOUT; 2567 goto out_put_xprt; 2568 } 2569 } 2570 2571 rc = lower_transport->xprt_err; 2572 2573 out: 2574 xs_stream_reset_connect(lower_transport); 2575 clear_bit(XPRT_SOCK_IGNORE_RECV, &lower_transport->sock_state); 2576 return rc; 2577 2578 out_put_xprt: 2579 xprt_put(lower_xprt); 2580 goto out; 2581 } 2582 2583 /** 2584 * xs_tcp_tls_setup_socket - establish a TLS session on a TCP socket 2585 * @work: queued work item 2586 * 2587 * Invoked by a work queue tasklet. 2588 * 2589 * For RPC-with-TLS, there is a two-stage connection process. 2590 * 2591 * The "upper-layer xprt" is visible to the RPC consumer. Once it has 2592 * been marked connected, the consumer knows that a TCP connection and 2593 * a TLS session have been established. 2594 * 2595 * A "lower-layer xprt", created in this function, handles the mechanics 2596 * of connecting the TCP socket, performing the RPC_AUTH_TLS probe, and 2597 * then driving the TLS handshake. Once all that is complete, the upper 2598 * layer xprt is marked connected. 2599 */ 2600 static void xs_tcp_tls_setup_socket(struct work_struct *work) 2601 { 2602 struct sock_xprt *upper_transport = 2603 container_of(work, struct sock_xprt, connect_worker.work); 2604 struct rpc_clnt *upper_clnt = upper_transport->clnt; 2605 struct rpc_xprt *upper_xprt = &upper_transport->xprt; 2606 struct rpc_create_args args = { 2607 .net = upper_xprt->xprt_net, 2608 .protocol = upper_xprt->prot, 2609 .address = (struct sockaddr *)&upper_xprt->addr, 2610 .addrsize = upper_xprt->addrlen, 2611 .timeout = upper_clnt->cl_timeout, 2612 .servername = upper_xprt->servername, 2613 .program = upper_clnt->cl_program, 2614 .prognumber = upper_clnt->cl_prog, 2615 .version = upper_clnt->cl_vers, 2616 .authflavor = RPC_AUTH_TLS, 2617 .cred = upper_clnt->cl_cred, 2618 .xprtsec = { 2619 .policy = RPC_XPRTSEC_NONE, 2620 }, 2621 }; 2622 unsigned int pflags = current->flags; 2623 struct rpc_clnt *lower_clnt; 2624 struct rpc_xprt *lower_xprt; 2625 int status; 2626 2627 if (atomic_read(&upper_xprt->swapper)) 2628 current->flags |= PF_MEMALLOC; 2629 2630 xs_stream_start_connect(upper_transport); 2631 2632 /* This implicitly sends an RPC_AUTH_TLS probe */ 2633 lower_clnt = rpc_create(&args); 2634 if (IS_ERR(lower_clnt)) { 2635 trace_rpc_tls_unavailable(upper_clnt, upper_xprt); 2636 clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2637 xprt_clear_connecting(upper_xprt); 2638 xprt_wake_pending_tasks(upper_xprt, PTR_ERR(lower_clnt)); 2639 xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2640 goto out_unlock; 2641 } 2642 2643 /* RPC_AUTH_TLS probe was successful. Try a TLS handshake on 2644 * the lower xprt. 2645 */ 2646 rcu_read_lock(); 2647 lower_xprt = rcu_dereference(lower_clnt->cl_xprt); 2648 rcu_read_unlock(); 2649 status = xs_tls_handshake_sync(lower_xprt, &upper_xprt->xprtsec); 2650 if (status) { 2651 trace_rpc_tls_not_started(upper_clnt, upper_xprt); 2652 goto out_close; 2653 } 2654 2655 status = xs_tcp_tls_finish_connecting(lower_xprt, upper_transport); 2656 if (status) 2657 goto out_close; 2658 2659 trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0); 2660 if (!xprt_test_and_set_connected(upper_xprt)) { 2661 upper_xprt->connect_cookie++; 2662 clear_bit(XPRT_SOCK_CONNECTING, &upper_transport->sock_state); 2663 xprt_clear_connecting(upper_xprt); 2664 2665 upper_xprt->stat.connect_count++; 2666 upper_xprt->stat.connect_time += (long)jiffies - 2667 upper_xprt->stat.connect_start; 2668 xs_run_error_worker(upper_transport, XPRT_SOCK_WAKE_PENDING); 2669 } 2670 rpc_shutdown_client(lower_clnt); 2671 2672 out_unlock: 2673 current_restore_flags(pflags, PF_MEMALLOC); 2674 upper_transport->clnt = NULL; 2675 xprt_unlock_connect(upper_xprt, upper_transport); 2676 return; 2677 2678 out_close: 2679 rpc_shutdown_client(lower_clnt); 2680 2681 /* xprt_force_disconnect() wakes tasks with a fixed tk_status code. 2682 * Wake them first here to ensure they get our tk_status code. 2683 */ 2684 xprt_wake_pending_tasks(upper_xprt, status); 2685 xs_tcp_force_close(upper_xprt); 2686 xprt_clear_connecting(upper_xprt); 2687 goto out_unlock; 2688 } 2689 2690 /** 2691 * xs_connect - connect a socket to a remote endpoint 2692 * @xprt: pointer to transport structure 2693 * @task: address of RPC task that manages state of connect request 2694 * 2695 * TCP: If the remote end dropped the connection, delay reconnecting. 2696 * 2697 * UDP socket connects are synchronous, but we use a work queue anyway 2698 * to guarantee that even unprivileged user processes can set up a 2699 * socket on a privileged port. 2700 * 2701 * If a UDP socket connect fails, the delay behavior here prevents 2702 * retry floods (hard mounts). 2703 */ 2704 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2705 { 2706 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2707 unsigned long delay = 0; 2708 2709 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2710 2711 if (transport->sock != NULL) { 2712 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2713 "seconds\n", xprt, xprt->reestablish_timeout / HZ); 2714 2715 delay = xprt_reconnect_delay(xprt); 2716 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO); 2717 2718 } else 2719 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2720 2721 transport->clnt = task->tk_client; 2722 queue_delayed_work(xprtiod_workqueue, 2723 &transport->connect_worker, 2724 delay); 2725 } 2726 2727 static void xs_wake_disconnect(struct sock_xprt *transport) 2728 { 2729 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state)) 2730 xs_tcp_force_close(&transport->xprt); 2731 } 2732 2733 static void xs_wake_write(struct sock_xprt *transport) 2734 { 2735 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state)) 2736 xprt_write_space(&transport->xprt); 2737 } 2738 2739 static void xs_wake_error(struct sock_xprt *transport) 2740 { 2741 int sockerr; 2742 2743 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) 2744 return; 2745 mutex_lock(&transport->recv_mutex); 2746 if (transport->sock == NULL) 2747 goto out; 2748 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) 2749 goto out; 2750 sockerr = xchg(&transport->xprt_err, 0); 2751 if (sockerr < 0) 2752 xprt_wake_pending_tasks(&transport->xprt, sockerr); 2753 out: 2754 mutex_unlock(&transport->recv_mutex); 2755 } 2756 2757 static void xs_wake_pending(struct sock_xprt *transport) 2758 { 2759 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state)) 2760 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN); 2761 } 2762 2763 static void xs_error_handle(struct work_struct *work) 2764 { 2765 struct sock_xprt *transport = container_of(work, 2766 struct sock_xprt, error_worker); 2767 2768 xs_wake_disconnect(transport); 2769 xs_wake_write(transport); 2770 xs_wake_error(transport); 2771 xs_wake_pending(transport); 2772 } 2773 2774 /** 2775 * xs_local_print_stats - display AF_LOCAL socket-specific stats 2776 * @xprt: rpc_xprt struct containing statistics 2777 * @seq: output file 2778 * 2779 */ 2780 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2781 { 2782 long idle_time = 0; 2783 2784 if (xprt_connected(xprt)) 2785 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2786 2787 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2788 "%llu %llu %lu %llu %llu\n", 2789 xprt->stat.bind_count, 2790 xprt->stat.connect_count, 2791 xprt->stat.connect_time / HZ, 2792 idle_time, 2793 xprt->stat.sends, 2794 xprt->stat.recvs, 2795 xprt->stat.bad_xids, 2796 xprt->stat.req_u, 2797 xprt->stat.bklog_u, 2798 xprt->stat.max_slots, 2799 xprt->stat.sending_u, 2800 xprt->stat.pending_u); 2801 } 2802 2803 /** 2804 * xs_udp_print_stats - display UDP socket-specific stats 2805 * @xprt: rpc_xprt struct containing statistics 2806 * @seq: output file 2807 * 2808 */ 2809 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2810 { 2811 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2812 2813 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu " 2814 "%lu %llu %llu\n", 2815 transport->srcport, 2816 xprt->stat.bind_count, 2817 xprt->stat.sends, 2818 xprt->stat.recvs, 2819 xprt->stat.bad_xids, 2820 xprt->stat.req_u, 2821 xprt->stat.bklog_u, 2822 xprt->stat.max_slots, 2823 xprt->stat.sending_u, 2824 xprt->stat.pending_u); 2825 } 2826 2827 /** 2828 * xs_tcp_print_stats - display TCP socket-specific stats 2829 * @xprt: rpc_xprt struct containing statistics 2830 * @seq: output file 2831 * 2832 */ 2833 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2834 { 2835 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2836 long idle_time = 0; 2837 2838 if (xprt_connected(xprt)) 2839 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2840 2841 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu " 2842 "%llu %llu %lu %llu %llu\n", 2843 transport->srcport, 2844 xprt->stat.bind_count, 2845 xprt->stat.connect_count, 2846 xprt->stat.connect_time / HZ, 2847 idle_time, 2848 xprt->stat.sends, 2849 xprt->stat.recvs, 2850 xprt->stat.bad_xids, 2851 xprt->stat.req_u, 2852 xprt->stat.bklog_u, 2853 xprt->stat.max_slots, 2854 xprt->stat.sending_u, 2855 xprt->stat.pending_u); 2856 } 2857 2858 /* 2859 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2860 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2861 * to use the server side send routines. 2862 */ 2863 static int bc_malloc(struct rpc_task *task) 2864 { 2865 struct rpc_rqst *rqst = task->tk_rqstp; 2866 size_t size = rqst->rq_callsize; 2867 struct page *page; 2868 struct rpc_buffer *buf; 2869 2870 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) { 2871 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n", 2872 size); 2873 return -EINVAL; 2874 } 2875 2876 page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 2877 if (!page) 2878 return -ENOMEM; 2879 2880 buf = page_address(page); 2881 buf->len = PAGE_SIZE; 2882 2883 rqst->rq_buffer = buf->data; 2884 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 2885 return 0; 2886 } 2887 2888 /* 2889 * Free the space allocated in the bc_alloc routine 2890 */ 2891 static void bc_free(struct rpc_task *task) 2892 { 2893 void *buffer = task->tk_rqstp->rq_buffer; 2894 struct rpc_buffer *buf; 2895 2896 buf = container_of(buffer, struct rpc_buffer, data); 2897 free_page((unsigned long)buf); 2898 } 2899 2900 static int bc_sendto(struct rpc_rqst *req) 2901 { 2902 struct xdr_buf *xdr = &req->rq_snd_buf; 2903 struct sock_xprt *transport = 2904 container_of(req->rq_xprt, struct sock_xprt, xprt); 2905 struct msghdr msg = { 2906 .msg_flags = 0, 2907 }; 2908 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | 2909 (u32)xdr->len); 2910 unsigned int sent = 0; 2911 int err; 2912 2913 req->rq_xtime = ktime_get(); 2914 err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask()); 2915 if (err < 0) 2916 return err; 2917 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent); 2918 xdr_free_bvec(xdr); 2919 if (err < 0 || sent != (xdr->len + sizeof(marker))) 2920 return -EAGAIN; 2921 return sent; 2922 } 2923 2924 /** 2925 * bc_send_request - Send a backchannel Call on a TCP socket 2926 * @req: rpc_rqst containing Call message to be sent 2927 * 2928 * xpt_mutex ensures @rqstp's whole message is written to the socket 2929 * without interruption. 2930 * 2931 * Return values: 2932 * %0 if the message was sent successfully 2933 * %ENOTCONN if the message was not sent 2934 */ 2935 static int bc_send_request(struct rpc_rqst *req) 2936 { 2937 struct svc_xprt *xprt; 2938 int len; 2939 2940 /* 2941 * Get the server socket associated with this callback xprt 2942 */ 2943 xprt = req->rq_xprt->bc_xprt; 2944 2945 /* 2946 * Grab the mutex to serialize data as the connection is shared 2947 * with the fore channel 2948 */ 2949 mutex_lock(&xprt->xpt_mutex); 2950 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2951 len = -ENOTCONN; 2952 else 2953 len = bc_sendto(req); 2954 mutex_unlock(&xprt->xpt_mutex); 2955 2956 if (len > 0) 2957 len = 0; 2958 2959 return len; 2960 } 2961 2962 /* 2963 * The close routine. Since this is client initiated, we do nothing 2964 */ 2965 2966 static void bc_close(struct rpc_xprt *xprt) 2967 { 2968 xprt_disconnect_done(xprt); 2969 } 2970 2971 /* 2972 * The xprt destroy routine. Again, because this connection is client 2973 * initiated, we do nothing 2974 */ 2975 2976 static void bc_destroy(struct rpc_xprt *xprt) 2977 { 2978 dprintk("RPC: bc_destroy xprt %p\n", xprt); 2979 2980 xs_xprt_free(xprt); 2981 module_put(THIS_MODULE); 2982 } 2983 2984 static const struct rpc_xprt_ops xs_local_ops = { 2985 .reserve_xprt = xprt_reserve_xprt, 2986 .release_xprt = xprt_release_xprt, 2987 .alloc_slot = xprt_alloc_slot, 2988 .free_slot = xprt_free_slot, 2989 .rpcbind = xs_local_rpcbind, 2990 .set_port = xs_local_set_port, 2991 .connect = xs_local_connect, 2992 .buf_alloc = rpc_malloc, 2993 .buf_free = rpc_free, 2994 .prepare_request = xs_stream_prepare_request, 2995 .send_request = xs_local_send_request, 2996 .wait_for_reply_request = xprt_wait_for_reply_request_def, 2997 .close = xs_close, 2998 .destroy = xs_destroy, 2999 .print_stats = xs_local_print_stats, 3000 .enable_swap = xs_enable_swap, 3001 .disable_swap = xs_disable_swap, 3002 }; 3003 3004 static const struct rpc_xprt_ops xs_udp_ops = { 3005 .set_buffer_size = xs_udp_set_buffer_size, 3006 .reserve_xprt = xprt_reserve_xprt_cong, 3007 .release_xprt = xprt_release_xprt_cong, 3008 .alloc_slot = xprt_alloc_slot, 3009 .free_slot = xprt_free_slot, 3010 .rpcbind = rpcb_getport_async, 3011 .set_port = xs_set_port, 3012 .connect = xs_connect, 3013 .get_srcaddr = xs_sock_srcaddr, 3014 .get_srcport = xs_sock_srcport, 3015 .buf_alloc = rpc_malloc, 3016 .buf_free = rpc_free, 3017 .send_request = xs_udp_send_request, 3018 .wait_for_reply_request = xprt_wait_for_reply_request_rtt, 3019 .timer = xs_udp_timer, 3020 .release_request = xprt_release_rqst_cong, 3021 .close = xs_close, 3022 .destroy = xs_destroy, 3023 .print_stats = xs_udp_print_stats, 3024 .enable_swap = xs_enable_swap, 3025 .disable_swap = xs_disable_swap, 3026 .inject_disconnect = xs_inject_disconnect, 3027 }; 3028 3029 static const struct rpc_xprt_ops xs_tcp_ops = { 3030 .reserve_xprt = xprt_reserve_xprt, 3031 .release_xprt = xprt_release_xprt, 3032 .alloc_slot = xprt_alloc_slot, 3033 .free_slot = xprt_free_slot, 3034 .rpcbind = rpcb_getport_async, 3035 .set_port = xs_set_port, 3036 .connect = xs_connect, 3037 .get_srcaddr = xs_sock_srcaddr, 3038 .get_srcport = xs_sock_srcport, 3039 .buf_alloc = rpc_malloc, 3040 .buf_free = rpc_free, 3041 .prepare_request = xs_stream_prepare_request, 3042 .send_request = xs_tcp_send_request, 3043 .wait_for_reply_request = xprt_wait_for_reply_request_def, 3044 .close = xs_tcp_shutdown, 3045 .destroy = xs_destroy, 3046 .set_connect_timeout = xs_tcp_set_connect_timeout, 3047 .print_stats = xs_tcp_print_stats, 3048 .enable_swap = xs_enable_swap, 3049 .disable_swap = xs_disable_swap, 3050 .inject_disconnect = xs_inject_disconnect, 3051 #ifdef CONFIG_SUNRPC_BACKCHANNEL 3052 .bc_setup = xprt_setup_bc, 3053 .bc_maxpayload = xs_tcp_bc_maxpayload, 3054 .bc_num_slots = xprt_bc_max_slots, 3055 .bc_free_rqst = xprt_free_bc_rqst, 3056 .bc_destroy = xprt_destroy_bc, 3057 #endif 3058 }; 3059 3060 /* 3061 * The rpc_xprt_ops for the server backchannel 3062 */ 3063 3064 static const struct rpc_xprt_ops bc_tcp_ops = { 3065 .reserve_xprt = xprt_reserve_xprt, 3066 .release_xprt = xprt_release_xprt, 3067 .alloc_slot = xprt_alloc_slot, 3068 .free_slot = xprt_free_slot, 3069 .buf_alloc = bc_malloc, 3070 .buf_free = bc_free, 3071 .send_request = bc_send_request, 3072 .wait_for_reply_request = xprt_wait_for_reply_request_def, 3073 .close = bc_close, 3074 .destroy = bc_destroy, 3075 .print_stats = xs_tcp_print_stats, 3076 .enable_swap = xs_enable_swap, 3077 .disable_swap = xs_disable_swap, 3078 .inject_disconnect = xs_inject_disconnect, 3079 }; 3080 3081 static int xs_init_anyaddr(const int family, struct sockaddr *sap) 3082 { 3083 static const struct sockaddr_in sin = { 3084 .sin_family = AF_INET, 3085 .sin_addr.s_addr = htonl(INADDR_ANY), 3086 }; 3087 static const struct sockaddr_in6 sin6 = { 3088 .sin6_family = AF_INET6, 3089 .sin6_addr = IN6ADDR_ANY_INIT, 3090 }; 3091 3092 switch (family) { 3093 case AF_LOCAL: 3094 break; 3095 case AF_INET: 3096 memcpy(sap, &sin, sizeof(sin)); 3097 break; 3098 case AF_INET6: 3099 memcpy(sap, &sin6, sizeof(sin6)); 3100 break; 3101 default: 3102 dprintk("RPC: %s: Bad address family\n", __func__); 3103 return -EAFNOSUPPORT; 3104 } 3105 return 0; 3106 } 3107 3108 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 3109 unsigned int slot_table_size, 3110 unsigned int max_slot_table_size) 3111 { 3112 struct rpc_xprt *xprt; 3113 struct sock_xprt *new; 3114 3115 if (args->addrlen > sizeof(xprt->addr)) { 3116 dprintk("RPC: xs_setup_xprt: address too large\n"); 3117 return ERR_PTR(-EBADF); 3118 } 3119 3120 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, 3121 max_slot_table_size); 3122 if (xprt == NULL) { 3123 dprintk("RPC: xs_setup_xprt: couldn't allocate " 3124 "rpc_xprt\n"); 3125 return ERR_PTR(-ENOMEM); 3126 } 3127 3128 new = container_of(xprt, struct sock_xprt, xprt); 3129 mutex_init(&new->recv_mutex); 3130 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 3131 xprt->addrlen = args->addrlen; 3132 if (args->srcaddr) 3133 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 3134 else { 3135 int err; 3136 err = xs_init_anyaddr(args->dstaddr->sa_family, 3137 (struct sockaddr *)&new->srcaddr); 3138 if (err != 0) { 3139 xprt_free(xprt); 3140 return ERR_PTR(err); 3141 } 3142 } 3143 3144 return xprt; 3145 } 3146 3147 static const struct rpc_timeout xs_local_default_timeout = { 3148 .to_initval = 10 * HZ, 3149 .to_maxval = 10 * HZ, 3150 .to_retries = 2, 3151 }; 3152 3153 /** 3154 * xs_setup_local - Set up transport to use an AF_LOCAL socket 3155 * @args: rpc transport creation arguments 3156 * 3157 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP 3158 */ 3159 static struct rpc_xprt *xs_setup_local(struct xprt_create *args) 3160 { 3161 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; 3162 struct sock_xprt *transport; 3163 struct rpc_xprt *xprt; 3164 struct rpc_xprt *ret; 3165 3166 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3167 xprt_max_tcp_slot_table_entries); 3168 if (IS_ERR(xprt)) 3169 return xprt; 3170 transport = container_of(xprt, struct sock_xprt, xprt); 3171 3172 xprt->prot = 0; 3173 xprt->xprt_class = &xs_local_transport; 3174 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3175 3176 xprt->bind_timeout = XS_BIND_TO; 3177 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 3178 xprt->idle_timeout = XS_IDLE_DISC_TO; 3179 3180 xprt->ops = &xs_local_ops; 3181 xprt->timeout = &xs_local_default_timeout; 3182 3183 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3184 INIT_WORK(&transport->error_worker, xs_error_handle); 3185 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket); 3186 3187 switch (sun->sun_family) { 3188 case AF_LOCAL: 3189 if (sun->sun_path[0] != '/' && sun->sun_path[0] != '\0') { 3190 dprintk("RPC: bad AF_LOCAL address: %s\n", 3191 sun->sun_path); 3192 ret = ERR_PTR(-EINVAL); 3193 goto out_err; 3194 } 3195 xprt_set_bound(xprt); 3196 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 3197 break; 3198 default: 3199 ret = ERR_PTR(-EAFNOSUPPORT); 3200 goto out_err; 3201 } 3202 3203 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", 3204 xprt->address_strings[RPC_DISPLAY_ADDR]); 3205 3206 if (try_module_get(THIS_MODULE)) 3207 return xprt; 3208 ret = ERR_PTR(-EINVAL); 3209 out_err: 3210 xs_xprt_free(xprt); 3211 return ret; 3212 } 3213 3214 static const struct rpc_timeout xs_udp_default_timeout = { 3215 .to_initval = 5 * HZ, 3216 .to_maxval = 30 * HZ, 3217 .to_increment = 5 * HZ, 3218 .to_retries = 5, 3219 }; 3220 3221 /** 3222 * xs_setup_udp - Set up transport to use a UDP socket 3223 * @args: rpc transport creation arguments 3224 * 3225 */ 3226 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 3227 { 3228 struct sockaddr *addr = args->dstaddr; 3229 struct rpc_xprt *xprt; 3230 struct sock_xprt *transport; 3231 struct rpc_xprt *ret; 3232 3233 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, 3234 xprt_udp_slot_table_entries); 3235 if (IS_ERR(xprt)) 3236 return xprt; 3237 transport = container_of(xprt, struct sock_xprt, xprt); 3238 3239 xprt->prot = IPPROTO_UDP; 3240 xprt->xprt_class = &xs_udp_transport; 3241 /* XXX: header size can vary due to auth type, IPv6, etc. */ 3242 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 3243 3244 xprt->bind_timeout = XS_BIND_TO; 3245 xprt->reestablish_timeout = XS_UDP_REEST_TO; 3246 xprt->idle_timeout = XS_IDLE_DISC_TO; 3247 3248 xprt->ops = &xs_udp_ops; 3249 3250 xprt->timeout = &xs_udp_default_timeout; 3251 3252 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); 3253 INIT_WORK(&transport->error_worker, xs_error_handle); 3254 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); 3255 3256 switch (addr->sa_family) { 3257 case AF_INET: 3258 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 3259 xprt_set_bound(xprt); 3260 3261 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 3262 break; 3263 case AF_INET6: 3264 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 3265 xprt_set_bound(xprt); 3266 3267 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 3268 break; 3269 default: 3270 ret = ERR_PTR(-EAFNOSUPPORT); 3271 goto out_err; 3272 } 3273 3274 if (xprt_bound(xprt)) 3275 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3276 xprt->address_strings[RPC_DISPLAY_ADDR], 3277 xprt->address_strings[RPC_DISPLAY_PORT], 3278 xprt->address_strings[RPC_DISPLAY_PROTO]); 3279 else 3280 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 3281 xprt->address_strings[RPC_DISPLAY_ADDR], 3282 xprt->address_strings[RPC_DISPLAY_PROTO]); 3283 3284 if (try_module_get(THIS_MODULE)) 3285 return xprt; 3286 ret = ERR_PTR(-EINVAL); 3287 out_err: 3288 xs_xprt_free(xprt); 3289 return ret; 3290 } 3291 3292 static const struct rpc_timeout xs_tcp_default_timeout = { 3293 .to_initval = 60 * HZ, 3294 .to_maxval = 60 * HZ, 3295 .to_retries = 2, 3296 }; 3297 3298 /** 3299 * xs_setup_tcp - Set up transport to use a TCP socket 3300 * @args: rpc transport creation arguments 3301 * 3302 */ 3303 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 3304 { 3305 struct sockaddr *addr = args->dstaddr; 3306 struct rpc_xprt *xprt; 3307 struct sock_xprt *transport; 3308 struct rpc_xprt *ret; 3309 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 3310 3311 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 3312 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 3313 3314 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3315 max_slot_table_size); 3316 if (IS_ERR(xprt)) 3317 return xprt; 3318 transport = container_of(xprt, struct sock_xprt, xprt); 3319 3320 xprt->prot = IPPROTO_TCP; 3321 xprt->xprt_class = &xs_tcp_transport; 3322 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3323 3324 xprt->bind_timeout = XS_BIND_TO; 3325 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 3326 xprt->idle_timeout = XS_IDLE_DISC_TO; 3327 3328 xprt->ops = &xs_tcp_ops; 3329 xprt->timeout = &xs_tcp_default_timeout; 3330 3331 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 3332 xprt->connect_timeout = xprt->timeout->to_initval * 3333 (xprt->timeout->to_retries + 1); 3334 3335 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3336 INIT_WORK(&transport->error_worker, xs_error_handle); 3337 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 3338 3339 switch (addr->sa_family) { 3340 case AF_INET: 3341 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 3342 xprt_set_bound(xprt); 3343 3344 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 3345 break; 3346 case AF_INET6: 3347 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 3348 xprt_set_bound(xprt); 3349 3350 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 3351 break; 3352 default: 3353 ret = ERR_PTR(-EAFNOSUPPORT); 3354 goto out_err; 3355 } 3356 3357 if (xprt_bound(xprt)) 3358 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3359 xprt->address_strings[RPC_DISPLAY_ADDR], 3360 xprt->address_strings[RPC_DISPLAY_PORT], 3361 xprt->address_strings[RPC_DISPLAY_PROTO]); 3362 else 3363 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 3364 xprt->address_strings[RPC_DISPLAY_ADDR], 3365 xprt->address_strings[RPC_DISPLAY_PROTO]); 3366 3367 if (try_module_get(THIS_MODULE)) 3368 return xprt; 3369 ret = ERR_PTR(-EINVAL); 3370 out_err: 3371 xs_xprt_free(xprt); 3372 return ret; 3373 } 3374 3375 /** 3376 * xs_setup_tcp_tls - Set up transport to use a TCP with TLS 3377 * @args: rpc transport creation arguments 3378 * 3379 */ 3380 static struct rpc_xprt *xs_setup_tcp_tls(struct xprt_create *args) 3381 { 3382 struct sockaddr *addr = args->dstaddr; 3383 struct rpc_xprt *xprt; 3384 struct sock_xprt *transport; 3385 struct rpc_xprt *ret; 3386 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 3387 3388 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 3389 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 3390 3391 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3392 max_slot_table_size); 3393 if (IS_ERR(xprt)) 3394 return xprt; 3395 transport = container_of(xprt, struct sock_xprt, xprt); 3396 3397 xprt->prot = IPPROTO_TCP; 3398 xprt->xprt_class = &xs_tcp_transport; 3399 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3400 3401 xprt->bind_timeout = XS_BIND_TO; 3402 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 3403 xprt->idle_timeout = XS_IDLE_DISC_TO; 3404 3405 xprt->ops = &xs_tcp_ops; 3406 xprt->timeout = &xs_tcp_default_timeout; 3407 3408 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 3409 xprt->connect_timeout = xprt->timeout->to_initval * 3410 (xprt->timeout->to_retries + 1); 3411 3412 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn); 3413 INIT_WORK(&transport->error_worker, xs_error_handle); 3414 3415 switch (args->xprtsec.policy) { 3416 case RPC_XPRTSEC_TLS_ANON: 3417 case RPC_XPRTSEC_TLS_X509: 3418 xprt->xprtsec = args->xprtsec; 3419 INIT_DELAYED_WORK(&transport->connect_worker, 3420 xs_tcp_tls_setup_socket); 3421 break; 3422 default: 3423 ret = ERR_PTR(-EACCES); 3424 goto out_err; 3425 } 3426 3427 switch (addr->sa_family) { 3428 case AF_INET: 3429 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 3430 xprt_set_bound(xprt); 3431 3432 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 3433 break; 3434 case AF_INET6: 3435 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 3436 xprt_set_bound(xprt); 3437 3438 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 3439 break; 3440 default: 3441 ret = ERR_PTR(-EAFNOSUPPORT); 3442 goto out_err; 3443 } 3444 3445 if (xprt_bound(xprt)) 3446 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3447 xprt->address_strings[RPC_DISPLAY_ADDR], 3448 xprt->address_strings[RPC_DISPLAY_PORT], 3449 xprt->address_strings[RPC_DISPLAY_PROTO]); 3450 else 3451 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 3452 xprt->address_strings[RPC_DISPLAY_ADDR], 3453 xprt->address_strings[RPC_DISPLAY_PROTO]); 3454 3455 if (try_module_get(THIS_MODULE)) 3456 return xprt; 3457 ret = ERR_PTR(-EINVAL); 3458 out_err: 3459 xs_xprt_free(xprt); 3460 return ret; 3461 } 3462 3463 /** 3464 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 3465 * @args: rpc transport creation arguments 3466 * 3467 */ 3468 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 3469 { 3470 struct sockaddr *addr = args->dstaddr; 3471 struct rpc_xprt *xprt; 3472 struct sock_xprt *transport; 3473 struct svc_sock *bc_sock; 3474 struct rpc_xprt *ret; 3475 3476 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3477 xprt_tcp_slot_table_entries); 3478 if (IS_ERR(xprt)) 3479 return xprt; 3480 transport = container_of(xprt, struct sock_xprt, xprt); 3481 3482 xprt->prot = IPPROTO_TCP; 3483 xprt->xprt_class = &xs_bc_tcp_transport; 3484 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3485 xprt->timeout = &xs_tcp_default_timeout; 3486 3487 /* backchannel */ 3488 xprt_set_bound(xprt); 3489 xprt->bind_timeout = 0; 3490 xprt->reestablish_timeout = 0; 3491 xprt->idle_timeout = 0; 3492 3493 xprt->ops = &bc_tcp_ops; 3494 3495 switch (addr->sa_family) { 3496 case AF_INET: 3497 xs_format_peer_addresses(xprt, "tcp", 3498 RPCBIND_NETID_TCP); 3499 break; 3500 case AF_INET6: 3501 xs_format_peer_addresses(xprt, "tcp", 3502 RPCBIND_NETID_TCP6); 3503 break; 3504 default: 3505 ret = ERR_PTR(-EAFNOSUPPORT); 3506 goto out_err; 3507 } 3508 3509 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3510 xprt->address_strings[RPC_DISPLAY_ADDR], 3511 xprt->address_strings[RPC_DISPLAY_PORT], 3512 xprt->address_strings[RPC_DISPLAY_PROTO]); 3513 3514 /* 3515 * Once we've associated a backchannel xprt with a connection, 3516 * we want to keep it around as long as the connection lasts, 3517 * in case we need to start using it for a backchannel again; 3518 * this reference won't be dropped until bc_xprt is destroyed. 3519 */ 3520 xprt_get(xprt); 3521 args->bc_xprt->xpt_bc_xprt = xprt; 3522 xprt->bc_xprt = args->bc_xprt; 3523 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 3524 transport->sock = bc_sock->sk_sock; 3525 transport->inet = bc_sock->sk_sk; 3526 3527 /* 3528 * Since we don't want connections for the backchannel, we set 3529 * the xprt status to connected 3530 */ 3531 xprt_set_connected(xprt); 3532 3533 if (try_module_get(THIS_MODULE)) 3534 return xprt; 3535 3536 args->bc_xprt->xpt_bc_xprt = NULL; 3537 args->bc_xprt->xpt_bc_xps = NULL; 3538 xprt_put(xprt); 3539 ret = ERR_PTR(-EINVAL); 3540 out_err: 3541 xs_xprt_free(xprt); 3542 return ret; 3543 } 3544 3545 static struct xprt_class xs_local_transport = { 3546 .list = LIST_HEAD_INIT(xs_local_transport.list), 3547 .name = "named UNIX socket", 3548 .owner = THIS_MODULE, 3549 .ident = XPRT_TRANSPORT_LOCAL, 3550 .setup = xs_setup_local, 3551 .netid = { "" }, 3552 }; 3553 3554 static struct xprt_class xs_udp_transport = { 3555 .list = LIST_HEAD_INIT(xs_udp_transport.list), 3556 .name = "udp", 3557 .owner = THIS_MODULE, 3558 .ident = XPRT_TRANSPORT_UDP, 3559 .setup = xs_setup_udp, 3560 .netid = { "udp", "udp6", "" }, 3561 }; 3562 3563 static struct xprt_class xs_tcp_transport = { 3564 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 3565 .name = "tcp", 3566 .owner = THIS_MODULE, 3567 .ident = XPRT_TRANSPORT_TCP, 3568 .setup = xs_setup_tcp, 3569 .netid = { "tcp", "tcp6", "" }, 3570 }; 3571 3572 static struct xprt_class xs_tcp_tls_transport = { 3573 .list = LIST_HEAD_INIT(xs_tcp_tls_transport.list), 3574 .name = "tcp-with-tls", 3575 .owner = THIS_MODULE, 3576 .ident = XPRT_TRANSPORT_TCP_TLS, 3577 .setup = xs_setup_tcp_tls, 3578 .netid = { "tcp", "tcp6", "" }, 3579 }; 3580 3581 static struct xprt_class xs_bc_tcp_transport = { 3582 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 3583 .name = "tcp NFSv4.1 backchannel", 3584 .owner = THIS_MODULE, 3585 .ident = XPRT_TRANSPORT_BC_TCP, 3586 .setup = xs_setup_bc_tcp, 3587 .netid = { "" }, 3588 }; 3589 3590 /** 3591 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 3592 * 3593 */ 3594 int init_socket_xprt(void) 3595 { 3596 if (!sunrpc_table_header) 3597 sunrpc_table_header = register_sysctl("sunrpc", xs_tunables_table); 3598 3599 xprt_register_transport(&xs_local_transport); 3600 xprt_register_transport(&xs_udp_transport); 3601 xprt_register_transport(&xs_tcp_transport); 3602 xprt_register_transport(&xs_tcp_tls_transport); 3603 xprt_register_transport(&xs_bc_tcp_transport); 3604 3605 return 0; 3606 } 3607 3608 /** 3609 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 3610 * 3611 */ 3612 void cleanup_socket_xprt(void) 3613 { 3614 if (sunrpc_table_header) { 3615 unregister_sysctl_table(sunrpc_table_header); 3616 sunrpc_table_header = NULL; 3617 } 3618 3619 xprt_unregister_transport(&xs_local_transport); 3620 xprt_unregister_transport(&xs_udp_transport); 3621 xprt_unregister_transport(&xs_tcp_transport); 3622 xprt_unregister_transport(&xs_tcp_tls_transport); 3623 xprt_unregister_transport(&xs_bc_tcp_transport); 3624 } 3625 3626 static int param_set_portnr(const char *val, const struct kernel_param *kp) 3627 { 3628 return param_set_uint_minmax(val, kp, 3629 RPC_MIN_RESVPORT, 3630 RPC_MAX_RESVPORT); 3631 } 3632 3633 static const struct kernel_param_ops param_ops_portnr = { 3634 .set = param_set_portnr, 3635 .get = param_get_uint, 3636 }; 3637 3638 #define param_check_portnr(name, p) \ 3639 __param_check(name, p, unsigned int); 3640 3641 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 3642 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 3643 3644 static int param_set_slot_table_size(const char *val, 3645 const struct kernel_param *kp) 3646 { 3647 return param_set_uint_minmax(val, kp, 3648 RPC_MIN_SLOT_TABLE, 3649 RPC_MAX_SLOT_TABLE); 3650 } 3651 3652 static const struct kernel_param_ops param_ops_slot_table_size = { 3653 .set = param_set_slot_table_size, 3654 .get = param_get_uint, 3655 }; 3656 3657 #define param_check_slot_table_size(name, p) \ 3658 __param_check(name, p, unsigned int); 3659 3660 static int param_set_max_slot_table_size(const char *val, 3661 const struct kernel_param *kp) 3662 { 3663 return param_set_uint_minmax(val, kp, 3664 RPC_MIN_SLOT_TABLE, 3665 RPC_MAX_SLOT_TABLE_LIMIT); 3666 } 3667 3668 static const struct kernel_param_ops param_ops_max_slot_table_size = { 3669 .set = param_set_max_slot_table_size, 3670 .get = param_get_uint, 3671 }; 3672 3673 #define param_check_max_slot_table_size(name, p) \ 3674 __param_check(name, p, unsigned int); 3675 3676 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 3677 slot_table_size, 0644); 3678 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, 3679 max_slot_table_size, 0644); 3680 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3681 slot_table_size, 0644); 3682