1 /* 2 * linux/net/sunrpc/xprtsock.c 3 * 4 * Client-side transport implementation for sockets. 5 * 6 * TCP callback races fixes (C) 1998 Red Hat 7 * TCP send fixes (C) 1998 Red Hat 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 * 11 * Rewrite of larges part of the code in order to stabilize TCP stuff. 12 * Fix behaviour when socket buffer is full. 13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> 14 * 15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> 16 * 17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005. 18 * <gilles.quillard@bull.net> 19 */ 20 21 #include <linux/types.h> 22 #include <linux/string.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 #include <linux/capability.h> 26 #include <linux/pagemap.h> 27 #include <linux/errno.h> 28 #include <linux/socket.h> 29 #include <linux/in.h> 30 #include <linux/net.h> 31 #include <linux/mm.h> 32 #include <linux/un.h> 33 #include <linux/udp.h> 34 #include <linux/tcp.h> 35 #include <linux/sunrpc/clnt.h> 36 #include <linux/sunrpc/addr.h> 37 #include <linux/sunrpc/sched.h> 38 #include <linux/sunrpc/svcsock.h> 39 #include <linux/sunrpc/xprtsock.h> 40 #include <linux/file.h> 41 #ifdef CONFIG_SUNRPC_BACKCHANNEL 42 #include <linux/sunrpc/bc_xprt.h> 43 #endif 44 45 #include <net/sock.h> 46 #include <net/checksum.h> 47 #include <net/udp.h> 48 #include <net/tcp.h> 49 50 #include <trace/events/sunrpc.h> 51 52 #include "sunrpc.h" 53 54 static void xs_close(struct rpc_xprt *xprt); 55 56 /* 57 * xprtsock tunables 58 */ 59 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; 60 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE; 61 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE; 62 63 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; 64 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 65 66 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 67 68 #define XS_TCP_LINGER_TO (15U * HZ) 69 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; 70 71 /* 72 * We can register our own files under /proc/sys/sunrpc by 73 * calling register_sysctl_table() again. The files in that 74 * directory become the union of all files registered there. 75 * 76 * We simply need to make sure that we don't collide with 77 * someone else's file names! 78 */ 79 80 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; 81 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; 82 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT; 83 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; 84 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; 85 86 static struct ctl_table_header *sunrpc_table_header; 87 88 /* 89 * FIXME: changing the UDP slot table size should also resize the UDP 90 * socket buffers for existing UDP transports 91 */ 92 static struct ctl_table xs_tunables_table[] = { 93 { 94 .procname = "udp_slot_table_entries", 95 .data = &xprt_udp_slot_table_entries, 96 .maxlen = sizeof(unsigned int), 97 .mode = 0644, 98 .proc_handler = proc_dointvec_minmax, 99 .extra1 = &min_slot_table_size, 100 .extra2 = &max_slot_table_size 101 }, 102 { 103 .procname = "tcp_slot_table_entries", 104 .data = &xprt_tcp_slot_table_entries, 105 .maxlen = sizeof(unsigned int), 106 .mode = 0644, 107 .proc_handler = proc_dointvec_minmax, 108 .extra1 = &min_slot_table_size, 109 .extra2 = &max_slot_table_size 110 }, 111 { 112 .procname = "tcp_max_slot_table_entries", 113 .data = &xprt_max_tcp_slot_table_entries, 114 .maxlen = sizeof(unsigned int), 115 .mode = 0644, 116 .proc_handler = proc_dointvec_minmax, 117 .extra1 = &min_slot_table_size, 118 .extra2 = &max_tcp_slot_table_limit 119 }, 120 { 121 .procname = "min_resvport", 122 .data = &xprt_min_resvport, 123 .maxlen = sizeof(unsigned int), 124 .mode = 0644, 125 .proc_handler = proc_dointvec_minmax, 126 .extra1 = &xprt_min_resvport_limit, 127 .extra2 = &xprt_max_resvport_limit 128 }, 129 { 130 .procname = "max_resvport", 131 .data = &xprt_max_resvport, 132 .maxlen = sizeof(unsigned int), 133 .mode = 0644, 134 .proc_handler = proc_dointvec_minmax, 135 .extra1 = &xprt_min_resvport_limit, 136 .extra2 = &xprt_max_resvport_limit 137 }, 138 { 139 .procname = "tcp_fin_timeout", 140 .data = &xs_tcp_fin_timeout, 141 .maxlen = sizeof(xs_tcp_fin_timeout), 142 .mode = 0644, 143 .proc_handler = proc_dointvec_jiffies, 144 }, 145 { }, 146 }; 147 148 static struct ctl_table sunrpc_table[] = { 149 { 150 .procname = "sunrpc", 151 .mode = 0555, 152 .child = xs_tunables_table 153 }, 154 { }, 155 }; 156 157 #endif 158 159 /* 160 * Wait duration for a reply from the RPC portmapper. 161 */ 162 #define XS_BIND_TO (60U * HZ) 163 164 /* 165 * Delay if a UDP socket connect error occurs. This is most likely some 166 * kind of resource problem on the local host. 167 */ 168 #define XS_UDP_REEST_TO (2U * HZ) 169 170 /* 171 * The reestablish timeout allows clients to delay for a bit before attempting 172 * to reconnect to a server that just dropped our connection. 173 * 174 * We implement an exponential backoff when trying to reestablish a TCP 175 * transport connection with the server. Some servers like to drop a TCP 176 * connection when they are overworked, so we start with a short timeout and 177 * increase over time if the server is down or not responding. 178 */ 179 #define XS_TCP_INIT_REEST_TO (3U * HZ) 180 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) 181 182 /* 183 * TCP idle timeout; client drops the transport socket if it is idle 184 * for this long. Note that we also timeout UDP sockets to prevent 185 * holding port numbers when there is no RPC traffic. 186 */ 187 #define XS_IDLE_DISC_TO (5U * 60 * HZ) 188 189 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 190 # undef RPC_DEBUG_DATA 191 # define RPCDBG_FACILITY RPCDBG_TRANS 192 #endif 193 194 #ifdef RPC_DEBUG_DATA 195 static void xs_pktdump(char *msg, u32 *packet, unsigned int count) 196 { 197 u8 *buf = (u8 *) packet; 198 int j; 199 200 dprintk("RPC: %s\n", msg); 201 for (j = 0; j < count && j < 128; j += 4) { 202 if (!(j & 31)) { 203 if (j) 204 dprintk("\n"); 205 dprintk("0x%04x ", j); 206 } 207 dprintk("%02x%02x%02x%02x ", 208 buf[j], buf[j+1], buf[j+2], buf[j+3]); 209 } 210 dprintk("\n"); 211 } 212 #else 213 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) 214 { 215 /* NOP */ 216 } 217 #endif 218 219 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 220 { 221 return (struct rpc_xprt *) sk->sk_user_data; 222 } 223 224 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) 225 { 226 return (struct sockaddr *) &xprt->addr; 227 } 228 229 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt) 230 { 231 return (struct sockaddr_un *) &xprt->addr; 232 } 233 234 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 235 { 236 return (struct sockaddr_in *) &xprt->addr; 237 } 238 239 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt) 240 { 241 return (struct sockaddr_in6 *) &xprt->addr; 242 } 243 244 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) 245 { 246 struct sockaddr *sap = xs_addr(xprt); 247 struct sockaddr_in6 *sin6; 248 struct sockaddr_in *sin; 249 struct sockaddr_un *sun; 250 char buf[128]; 251 252 switch (sap->sa_family) { 253 case AF_LOCAL: 254 sun = xs_addr_un(xprt); 255 strlcpy(buf, sun->sun_path, sizeof(buf)); 256 xprt->address_strings[RPC_DISPLAY_ADDR] = 257 kstrdup(buf, GFP_KERNEL); 258 break; 259 case AF_INET: 260 (void)rpc_ntop(sap, buf, sizeof(buf)); 261 xprt->address_strings[RPC_DISPLAY_ADDR] = 262 kstrdup(buf, GFP_KERNEL); 263 sin = xs_addr_in(xprt); 264 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 265 break; 266 case AF_INET6: 267 (void)rpc_ntop(sap, buf, sizeof(buf)); 268 xprt->address_strings[RPC_DISPLAY_ADDR] = 269 kstrdup(buf, GFP_KERNEL); 270 sin6 = xs_addr_in6(xprt); 271 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 272 break; 273 default: 274 BUG(); 275 } 276 277 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 278 } 279 280 static void xs_format_common_peer_ports(struct rpc_xprt *xprt) 281 { 282 struct sockaddr *sap = xs_addr(xprt); 283 char buf[128]; 284 285 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 286 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 287 288 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 289 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 290 } 291 292 static void xs_format_peer_addresses(struct rpc_xprt *xprt, 293 const char *protocol, 294 const char *netid) 295 { 296 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 297 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 298 xs_format_common_peer_addresses(xprt); 299 xs_format_common_peer_ports(xprt); 300 } 301 302 static void xs_update_peer_port(struct rpc_xprt *xprt) 303 { 304 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 305 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 306 307 xs_format_common_peer_ports(xprt); 308 } 309 310 static void xs_free_peer_addresses(struct rpc_xprt *xprt) 311 { 312 unsigned int i; 313 314 for (i = 0; i < RPC_DISPLAY_MAX; i++) 315 switch (i) { 316 case RPC_DISPLAY_PROTO: 317 case RPC_DISPLAY_NETID: 318 continue; 319 default: 320 kfree(xprt->address_strings[i]); 321 } 322 } 323 324 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 325 326 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) 327 { 328 struct msghdr msg = { 329 .msg_name = addr, 330 .msg_namelen = addrlen, 331 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), 332 }; 333 struct kvec iov = { 334 .iov_base = vec->iov_base + base, 335 .iov_len = vec->iov_len - base, 336 }; 337 338 if (iov.iov_len != 0) 339 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 340 return kernel_sendmsg(sock, &msg, NULL, 0, 0); 341 } 342 343 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p) 344 { 345 ssize_t (*do_sendpage)(struct socket *sock, struct page *page, 346 int offset, size_t size, int flags); 347 struct page **ppage; 348 unsigned int remainder; 349 int err; 350 351 remainder = xdr->page_len - base; 352 base += xdr->page_base; 353 ppage = xdr->pages + (base >> PAGE_SHIFT); 354 base &= ~PAGE_MASK; 355 do_sendpage = sock->ops->sendpage; 356 if (!zerocopy) 357 do_sendpage = sock_no_sendpage; 358 for(;;) { 359 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); 360 int flags = XS_SENDMSG_FLAGS; 361 362 remainder -= len; 363 if (more) 364 flags |= MSG_MORE; 365 if (remainder != 0) 366 flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE; 367 err = do_sendpage(sock, *ppage, base, len, flags); 368 if (remainder == 0 || err != len) 369 break; 370 *sent_p += err; 371 ppage++; 372 base = 0; 373 } 374 if (err > 0) { 375 *sent_p += err; 376 err = 0; 377 } 378 return err; 379 } 380 381 /** 382 * xs_sendpages - write pages directly to a socket 383 * @sock: socket to send on 384 * @addr: UDP only -- address of destination 385 * @addrlen: UDP only -- length of destination address 386 * @xdr: buffer containing this request 387 * @base: starting position in the buffer 388 * @zerocopy: true if it is safe to use sendpage() 389 * @sent_p: return the total number of bytes successfully queued for sending 390 * 391 */ 392 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p) 393 { 394 unsigned int remainder = xdr->len - base; 395 int err = 0; 396 int sent = 0; 397 398 if (unlikely(!sock)) 399 return -ENOTSOCK; 400 401 if (base != 0) { 402 addr = NULL; 403 addrlen = 0; 404 } 405 406 if (base < xdr->head[0].iov_len || addr != NULL) { 407 unsigned int len = xdr->head[0].iov_len - base; 408 remainder -= len; 409 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); 410 if (remainder == 0 || err != len) 411 goto out; 412 *sent_p += err; 413 base = 0; 414 } else 415 base -= xdr->head[0].iov_len; 416 417 if (base < xdr->page_len) { 418 unsigned int len = xdr->page_len - base; 419 remainder -= len; 420 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent); 421 *sent_p += sent; 422 if (remainder == 0 || sent != len) 423 goto out; 424 base = 0; 425 } else 426 base -= xdr->page_len; 427 428 if (base >= xdr->tail[0].iov_len) 429 return 0; 430 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); 431 out: 432 if (err > 0) { 433 *sent_p += err; 434 err = 0; 435 } 436 return err; 437 } 438 439 static void xs_nospace_callback(struct rpc_task *task) 440 { 441 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 442 443 transport->inet->sk_write_pending--; 444 } 445 446 /** 447 * xs_nospace - place task on wait queue if transmit was incomplete 448 * @task: task to put to sleep 449 * 450 */ 451 static int xs_nospace(struct rpc_task *task) 452 { 453 struct rpc_rqst *req = task->tk_rqstp; 454 struct rpc_xprt *xprt = req->rq_xprt; 455 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 456 struct sock *sk = transport->inet; 457 int ret = -EAGAIN; 458 459 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n", 460 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 461 req->rq_slen); 462 463 /* Protect against races with write_space */ 464 spin_lock_bh(&xprt->transport_lock); 465 466 /* Don't race with disconnect */ 467 if (xprt_connected(xprt)) { 468 /* wait for more buffer space */ 469 sk->sk_write_pending++; 470 xprt_wait_for_buffer_space(task, xs_nospace_callback); 471 } else 472 ret = -ENOTCONN; 473 474 spin_unlock_bh(&xprt->transport_lock); 475 476 /* Race breaker in case memory is freed before above code is called */ 477 sk->sk_write_space(sk); 478 return ret; 479 } 480 481 /* 482 * Construct a stream transport record marker in @buf. 483 */ 484 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf) 485 { 486 u32 reclen = buf->len - sizeof(rpc_fraghdr); 487 rpc_fraghdr *base = buf->head[0].iov_base; 488 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen); 489 } 490 491 /** 492 * xs_local_send_request - write an RPC request to an AF_LOCAL socket 493 * @task: RPC task that manages the state of an RPC request 494 * 495 * Return values: 496 * 0: The request has been sent 497 * EAGAIN: The socket was blocked, please call again later to 498 * complete the request 499 * ENOTCONN: Caller needs to invoke connect logic then call again 500 * other: Some other error occured, the request was not sent 501 */ 502 static int xs_local_send_request(struct rpc_task *task) 503 { 504 struct rpc_rqst *req = task->tk_rqstp; 505 struct rpc_xprt *xprt = req->rq_xprt; 506 struct sock_xprt *transport = 507 container_of(xprt, struct sock_xprt, xprt); 508 struct xdr_buf *xdr = &req->rq_snd_buf; 509 int status; 510 int sent = 0; 511 512 xs_encode_stream_record_marker(&req->rq_snd_buf); 513 514 xs_pktdump("packet data:", 515 req->rq_svec->iov_base, req->rq_svec->iov_len); 516 517 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent, 518 true, &sent); 519 dprintk("RPC: %s(%u) = %d\n", 520 __func__, xdr->len - req->rq_bytes_sent, status); 521 522 if (status == -EAGAIN && sock_writeable(transport->inet)) 523 status = -ENOBUFS; 524 525 if (likely(sent > 0) || status == 0) { 526 req->rq_bytes_sent += sent; 527 req->rq_xmit_bytes_sent += sent; 528 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 529 req->rq_bytes_sent = 0; 530 return 0; 531 } 532 status = -EAGAIN; 533 } 534 535 switch (status) { 536 case -ENOBUFS: 537 break; 538 case -EAGAIN: 539 status = xs_nospace(task); 540 break; 541 default: 542 dprintk("RPC: sendmsg returned unrecognized error %d\n", 543 -status); 544 case -EPIPE: 545 xs_close(xprt); 546 status = -ENOTCONN; 547 } 548 549 return status; 550 } 551 552 /** 553 * xs_udp_send_request - write an RPC request to a UDP socket 554 * @task: address of RPC task that manages the state of an RPC request 555 * 556 * Return values: 557 * 0: The request has been sent 558 * EAGAIN: The socket was blocked, please call again later to 559 * complete the request 560 * ENOTCONN: Caller needs to invoke connect logic then call again 561 * other: Some other error occurred, the request was not sent 562 */ 563 static int xs_udp_send_request(struct rpc_task *task) 564 { 565 struct rpc_rqst *req = task->tk_rqstp; 566 struct rpc_xprt *xprt = req->rq_xprt; 567 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 568 struct xdr_buf *xdr = &req->rq_snd_buf; 569 int sent = 0; 570 int status; 571 572 xs_pktdump("packet data:", 573 req->rq_svec->iov_base, 574 req->rq_svec->iov_len); 575 576 if (!xprt_bound(xprt)) 577 return -ENOTCONN; 578 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, 579 xdr, req->rq_bytes_sent, true, &sent); 580 581 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 582 xdr->len - req->rq_bytes_sent, status); 583 584 /* firewall is blocking us, don't return -EAGAIN or we end up looping */ 585 if (status == -EPERM) 586 goto process_status; 587 588 if (status == -EAGAIN && sock_writeable(transport->inet)) 589 status = -ENOBUFS; 590 591 if (sent > 0 || status == 0) { 592 req->rq_xmit_bytes_sent += sent; 593 if (sent >= req->rq_slen) 594 return 0; 595 /* Still some bytes left; set up for a retry later. */ 596 status = -EAGAIN; 597 } 598 599 process_status: 600 switch (status) { 601 case -ENOTSOCK: 602 status = -ENOTCONN; 603 /* Should we call xs_close() here? */ 604 break; 605 case -EAGAIN: 606 status = xs_nospace(task); 607 break; 608 case -ENETUNREACH: 609 case -ENOBUFS: 610 case -EPIPE: 611 case -ECONNREFUSED: 612 case -EPERM: 613 /* When the server has died, an ICMP port unreachable message 614 * prompts ECONNREFUSED. */ 615 break; 616 default: 617 dprintk("RPC: sendmsg returned unrecognized error %d\n", 618 -status); 619 } 620 621 return status; 622 } 623 624 /** 625 * xs_tcp_send_request - write an RPC request to a TCP socket 626 * @task: address of RPC task that manages the state of an RPC request 627 * 628 * Return values: 629 * 0: The request has been sent 630 * EAGAIN: The socket was blocked, please call again later to 631 * complete the request 632 * ENOTCONN: Caller needs to invoke connect logic then call again 633 * other: Some other error occurred, the request was not sent 634 * 635 * XXX: In the case of soft timeouts, should we eventually give up 636 * if sendmsg is not able to make progress? 637 */ 638 static int xs_tcp_send_request(struct rpc_task *task) 639 { 640 struct rpc_rqst *req = task->tk_rqstp; 641 struct rpc_xprt *xprt = req->rq_xprt; 642 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 643 struct xdr_buf *xdr = &req->rq_snd_buf; 644 bool zerocopy = true; 645 int status; 646 int sent; 647 648 xs_encode_stream_record_marker(&req->rq_snd_buf); 649 650 xs_pktdump("packet data:", 651 req->rq_svec->iov_base, 652 req->rq_svec->iov_len); 653 /* Don't use zero copy if this is a resend. If the RPC call 654 * completes while the socket holds a reference to the pages, 655 * then we may end up resending corrupted data. 656 */ 657 if (task->tk_flags & RPC_TASK_SENT) 658 zerocopy = false; 659 660 /* Continue transmitting the packet/record. We must be careful 661 * to cope with writespace callbacks arriving _after_ we have 662 * called sendmsg(). */ 663 while (1) { 664 sent = 0; 665 status = xs_sendpages(transport->sock, NULL, 0, xdr, 666 req->rq_bytes_sent, zerocopy, &sent); 667 668 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 669 xdr->len - req->rq_bytes_sent, status); 670 671 /* If we've sent the entire packet, immediately 672 * reset the count of bytes sent. */ 673 req->rq_bytes_sent += sent; 674 req->rq_xmit_bytes_sent += sent; 675 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 676 req->rq_bytes_sent = 0; 677 return 0; 678 } 679 680 if (status < 0) 681 break; 682 if (sent == 0) { 683 status = -EAGAIN; 684 break; 685 } 686 } 687 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet)) 688 status = -ENOBUFS; 689 690 switch (status) { 691 case -ENOTSOCK: 692 status = -ENOTCONN; 693 /* Should we call xs_close() here? */ 694 break; 695 case -EAGAIN: 696 status = xs_nospace(task); 697 break; 698 case -ECONNRESET: 699 case -ECONNREFUSED: 700 case -ENOTCONN: 701 case -EADDRINUSE: 702 case -ENOBUFS: 703 case -EPIPE: 704 break; 705 default: 706 dprintk("RPC: sendmsg returned unrecognized error %d\n", 707 -status); 708 } 709 710 return status; 711 } 712 713 /** 714 * xs_tcp_release_xprt - clean up after a tcp transmission 715 * @xprt: transport 716 * @task: rpc task 717 * 718 * This cleans up if an error causes us to abort the transmission of a request. 719 * In this case, the socket may need to be reset in order to avoid confusing 720 * the server. 721 */ 722 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 723 { 724 struct rpc_rqst *req; 725 726 if (task != xprt->snd_task) 727 return; 728 if (task == NULL) 729 goto out_release; 730 req = task->tk_rqstp; 731 if (req == NULL) 732 goto out_release; 733 if (req->rq_bytes_sent == 0) 734 goto out_release; 735 if (req->rq_bytes_sent == req->rq_snd_buf.len) 736 goto out_release; 737 set_bit(XPRT_CLOSE_WAIT, &xprt->state); 738 out_release: 739 xprt_release_xprt(xprt, task); 740 } 741 742 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) 743 { 744 transport->old_data_ready = sk->sk_data_ready; 745 transport->old_state_change = sk->sk_state_change; 746 transport->old_write_space = sk->sk_write_space; 747 transport->old_error_report = sk->sk_error_report; 748 } 749 750 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) 751 { 752 sk->sk_data_ready = transport->old_data_ready; 753 sk->sk_state_change = transport->old_state_change; 754 sk->sk_write_space = transport->old_write_space; 755 sk->sk_error_report = transport->old_error_report; 756 } 757 758 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 759 { 760 smp_mb__before_atomic(); 761 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 762 clear_bit(XPRT_CLOSING, &xprt->state); 763 smp_mb__after_atomic(); 764 } 765 766 static void xs_sock_mark_closed(struct rpc_xprt *xprt) 767 { 768 xs_sock_reset_connection_flags(xprt); 769 /* Mark transport as closed and wake up all pending tasks */ 770 xprt_disconnect_done(xprt); 771 } 772 773 /** 774 * xs_error_report - callback to handle TCP socket state errors 775 * @sk: socket 776 * 777 * Note: we don't call sock_error() since there may be a rpc_task 778 * using the socket, and so we don't want to clear sk->sk_err. 779 */ 780 static void xs_error_report(struct sock *sk) 781 { 782 struct rpc_xprt *xprt; 783 int err; 784 785 read_lock_bh(&sk->sk_callback_lock); 786 if (!(xprt = xprt_from_sock(sk))) 787 goto out; 788 789 err = -sk->sk_err; 790 if (err == 0) 791 goto out; 792 /* Is this a reset event? */ 793 if (sk->sk_state == TCP_CLOSE) 794 xs_sock_mark_closed(xprt); 795 dprintk("RPC: xs_error_report client %p, error=%d...\n", 796 xprt, -err); 797 trace_rpc_socket_error(xprt, sk->sk_socket, err); 798 xprt_wake_pending_tasks(xprt, err); 799 out: 800 read_unlock_bh(&sk->sk_callback_lock); 801 } 802 803 static void xs_reset_transport(struct sock_xprt *transport) 804 { 805 struct socket *sock = transport->sock; 806 struct sock *sk = transport->inet; 807 struct rpc_xprt *xprt = &transport->xprt; 808 809 if (sk == NULL) 810 return; 811 812 if (atomic_read(&transport->xprt.swapper)) 813 sk_clear_memalloc(sk); 814 815 kernel_sock_shutdown(sock, SHUT_RDWR); 816 817 mutex_lock(&transport->recv_mutex); 818 write_lock_bh(&sk->sk_callback_lock); 819 transport->inet = NULL; 820 transport->sock = NULL; 821 822 sk->sk_user_data = NULL; 823 824 xs_restore_old_callbacks(transport, sk); 825 xprt_clear_connected(xprt); 826 write_unlock_bh(&sk->sk_callback_lock); 827 xs_sock_reset_connection_flags(xprt); 828 mutex_unlock(&transport->recv_mutex); 829 830 trace_rpc_socket_close(xprt, sock); 831 sock_release(sock); 832 } 833 834 /** 835 * xs_close - close a socket 836 * @xprt: transport 837 * 838 * This is used when all requests are complete; ie, no DRC state remains 839 * on the server we want to save. 840 * 841 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with 842 * xs_reset_transport() zeroing the socket from underneath a writer. 843 */ 844 static void xs_close(struct rpc_xprt *xprt) 845 { 846 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 847 848 dprintk("RPC: xs_close xprt %p\n", xprt); 849 850 xs_reset_transport(transport); 851 xprt->reestablish_timeout = 0; 852 853 xprt_disconnect_done(xprt); 854 } 855 856 static void xs_inject_disconnect(struct rpc_xprt *xprt) 857 { 858 dprintk("RPC: injecting transport disconnect on xprt=%p\n", 859 xprt); 860 xprt_disconnect_done(xprt); 861 } 862 863 static void xs_xprt_free(struct rpc_xprt *xprt) 864 { 865 xs_free_peer_addresses(xprt); 866 xprt_free(xprt); 867 } 868 869 /** 870 * xs_destroy - prepare to shutdown a transport 871 * @xprt: doomed transport 872 * 873 */ 874 static void xs_destroy(struct rpc_xprt *xprt) 875 { 876 struct sock_xprt *transport = container_of(xprt, 877 struct sock_xprt, xprt); 878 dprintk("RPC: xs_destroy xprt %p\n", xprt); 879 880 cancel_delayed_work_sync(&transport->connect_worker); 881 xs_close(xprt); 882 cancel_work_sync(&transport->recv_worker); 883 xs_xprt_free(xprt); 884 module_put(THIS_MODULE); 885 } 886 887 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) 888 { 889 struct xdr_skb_reader desc = { 890 .skb = skb, 891 .offset = sizeof(rpc_fraghdr), 892 .count = skb->len - sizeof(rpc_fraghdr), 893 }; 894 895 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) 896 return -1; 897 if (desc.count) 898 return -1; 899 return 0; 900 } 901 902 /** 903 * xs_local_data_read_skb 904 * @xprt: transport 905 * @sk: socket 906 * @skb: skbuff 907 * 908 * Currently this assumes we can read the whole reply in a single gulp. 909 */ 910 static void xs_local_data_read_skb(struct rpc_xprt *xprt, 911 struct sock *sk, 912 struct sk_buff *skb) 913 { 914 struct rpc_task *task; 915 struct rpc_rqst *rovr; 916 int repsize, copied; 917 u32 _xid; 918 __be32 *xp; 919 920 repsize = skb->len - sizeof(rpc_fraghdr); 921 if (repsize < 4) { 922 dprintk("RPC: impossible RPC reply size %d\n", repsize); 923 return; 924 } 925 926 /* Copy the XID from the skb... */ 927 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid); 928 if (xp == NULL) 929 return; 930 931 /* Look up and lock the request corresponding to the given XID */ 932 spin_lock_bh(&xprt->transport_lock); 933 rovr = xprt_lookup_rqst(xprt, *xp); 934 if (!rovr) 935 goto out_unlock; 936 task = rovr->rq_task; 937 938 copied = rovr->rq_private_buf.buflen; 939 if (copied > repsize) 940 copied = repsize; 941 942 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) { 943 dprintk("RPC: sk_buff copy failed\n"); 944 goto out_unlock; 945 } 946 947 xprt_complete_rqst(task, copied); 948 949 out_unlock: 950 spin_unlock_bh(&xprt->transport_lock); 951 } 952 953 static void xs_local_data_receive(struct sock_xprt *transport) 954 { 955 struct sk_buff *skb; 956 struct sock *sk; 957 int err; 958 959 mutex_lock(&transport->recv_mutex); 960 sk = transport->inet; 961 if (sk == NULL) 962 goto out; 963 for (;;) { 964 skb = skb_recv_datagram(sk, 0, 1, &err); 965 if (skb == NULL) 966 break; 967 xs_local_data_read_skb(&transport->xprt, sk, skb); 968 skb_free_datagram(sk, skb); 969 } 970 out: 971 mutex_unlock(&transport->recv_mutex); 972 } 973 974 static void xs_local_data_receive_workfn(struct work_struct *work) 975 { 976 struct sock_xprt *transport = 977 container_of(work, struct sock_xprt, recv_worker); 978 xs_local_data_receive(transport); 979 } 980 981 /** 982 * xs_udp_data_read_skb - receive callback for UDP sockets 983 * @xprt: transport 984 * @sk: socket 985 * @skb: skbuff 986 * 987 */ 988 static void xs_udp_data_read_skb(struct rpc_xprt *xprt, 989 struct sock *sk, 990 struct sk_buff *skb) 991 { 992 struct rpc_task *task; 993 struct rpc_rqst *rovr; 994 int repsize, copied; 995 u32 _xid; 996 __be32 *xp; 997 998 repsize = skb->len; 999 if (repsize < 4) { 1000 dprintk("RPC: impossible RPC reply size %d!\n", repsize); 1001 return; 1002 } 1003 1004 /* Copy the XID from the skb... */ 1005 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid); 1006 if (xp == NULL) 1007 return; 1008 1009 /* Look up and lock the request corresponding to the given XID */ 1010 spin_lock_bh(&xprt->transport_lock); 1011 rovr = xprt_lookup_rqst(xprt, *xp); 1012 if (!rovr) 1013 goto out_unlock; 1014 task = rovr->rq_task; 1015 1016 if ((copied = rovr->rq_private_buf.buflen) > repsize) 1017 copied = repsize; 1018 1019 /* Suck it into the iovec, verify checksum if not done by hw. */ 1020 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { 1021 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS); 1022 goto out_unlock; 1023 } 1024 1025 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS); 1026 1027 xprt_adjust_cwnd(xprt, task, copied); 1028 xprt_complete_rqst(task, copied); 1029 1030 out_unlock: 1031 spin_unlock_bh(&xprt->transport_lock); 1032 } 1033 1034 static void xs_udp_data_receive(struct sock_xprt *transport) 1035 { 1036 struct sk_buff *skb; 1037 struct sock *sk; 1038 int err; 1039 1040 mutex_lock(&transport->recv_mutex); 1041 sk = transport->inet; 1042 if (sk == NULL) 1043 goto out; 1044 for (;;) { 1045 skb = skb_recv_datagram(sk, 0, 1, &err); 1046 if (skb == NULL) 1047 break; 1048 xs_udp_data_read_skb(&transport->xprt, sk, skb); 1049 skb_free_datagram(sk, skb); 1050 } 1051 out: 1052 mutex_unlock(&transport->recv_mutex); 1053 } 1054 1055 static void xs_udp_data_receive_workfn(struct work_struct *work) 1056 { 1057 struct sock_xprt *transport = 1058 container_of(work, struct sock_xprt, recv_worker); 1059 xs_udp_data_receive(transport); 1060 } 1061 1062 /** 1063 * xs_data_ready - "data ready" callback for UDP sockets 1064 * @sk: socket with data to read 1065 * 1066 */ 1067 static void xs_data_ready(struct sock *sk) 1068 { 1069 struct rpc_xprt *xprt; 1070 1071 read_lock_bh(&sk->sk_callback_lock); 1072 dprintk("RPC: xs_data_ready...\n"); 1073 xprt = xprt_from_sock(sk); 1074 if (xprt != NULL) { 1075 struct sock_xprt *transport = container_of(xprt, 1076 struct sock_xprt, xprt); 1077 queue_work(rpciod_workqueue, &transport->recv_worker); 1078 } 1079 read_unlock_bh(&sk->sk_callback_lock); 1080 } 1081 1082 /* 1083 * Helper function to force a TCP close if the server is sending 1084 * junk and/or it has put us in CLOSE_WAIT 1085 */ 1086 static void xs_tcp_force_close(struct rpc_xprt *xprt) 1087 { 1088 xprt_force_disconnect(xprt); 1089 } 1090 1091 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 1092 { 1093 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1094 size_t len, used; 1095 char *p; 1096 1097 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; 1098 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; 1099 used = xdr_skb_read_bits(desc, p, len); 1100 transport->tcp_offset += used; 1101 if (used != len) 1102 return; 1103 1104 transport->tcp_reclen = ntohl(transport->tcp_fraghdr); 1105 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) 1106 transport->tcp_flags |= TCP_RCV_LAST_FRAG; 1107 else 1108 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; 1109 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; 1110 1111 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; 1112 transport->tcp_offset = 0; 1113 1114 /* Sanity check of the record length */ 1115 if (unlikely(transport->tcp_reclen < 8)) { 1116 dprintk("RPC: invalid TCP record fragment length\n"); 1117 xs_tcp_force_close(xprt); 1118 return; 1119 } 1120 dprintk("RPC: reading TCP record fragment of length %d\n", 1121 transport->tcp_reclen); 1122 } 1123 1124 static void xs_tcp_check_fraghdr(struct sock_xprt *transport) 1125 { 1126 if (transport->tcp_offset == transport->tcp_reclen) { 1127 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; 1128 transport->tcp_offset = 0; 1129 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { 1130 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1131 transport->tcp_flags |= TCP_RCV_COPY_XID; 1132 transport->tcp_copied = 0; 1133 } 1134 } 1135 } 1136 1137 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1138 { 1139 size_t len, used; 1140 char *p; 1141 1142 len = sizeof(transport->tcp_xid) - transport->tcp_offset; 1143 dprintk("RPC: reading XID (%Zu bytes)\n", len); 1144 p = ((char *) &transport->tcp_xid) + transport->tcp_offset; 1145 used = xdr_skb_read_bits(desc, p, len); 1146 transport->tcp_offset += used; 1147 if (used != len) 1148 return; 1149 transport->tcp_flags &= ~TCP_RCV_COPY_XID; 1150 transport->tcp_flags |= TCP_RCV_READ_CALLDIR; 1151 transport->tcp_copied = 4; 1152 dprintk("RPC: reading %s XID %08x\n", 1153 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for" 1154 : "request with", 1155 ntohl(transport->tcp_xid)); 1156 xs_tcp_check_fraghdr(transport); 1157 } 1158 1159 static inline void xs_tcp_read_calldir(struct sock_xprt *transport, 1160 struct xdr_skb_reader *desc) 1161 { 1162 size_t len, used; 1163 u32 offset; 1164 char *p; 1165 1166 /* 1167 * We want transport->tcp_offset to be 8 at the end of this routine 1168 * (4 bytes for the xid and 4 bytes for the call/reply flag). 1169 * When this function is called for the first time, 1170 * transport->tcp_offset is 4 (after having already read the xid). 1171 */ 1172 offset = transport->tcp_offset - sizeof(transport->tcp_xid); 1173 len = sizeof(transport->tcp_calldir) - offset; 1174 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len); 1175 p = ((char *) &transport->tcp_calldir) + offset; 1176 used = xdr_skb_read_bits(desc, p, len); 1177 transport->tcp_offset += used; 1178 if (used != len) 1179 return; 1180 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR; 1181 /* 1182 * We don't yet have the XDR buffer, so we will write the calldir 1183 * out after we get the buffer from the 'struct rpc_rqst' 1184 */ 1185 switch (ntohl(transport->tcp_calldir)) { 1186 case RPC_REPLY: 1187 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 1188 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1189 transport->tcp_flags |= TCP_RPC_REPLY; 1190 break; 1191 case RPC_CALL: 1192 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR; 1193 transport->tcp_flags |= TCP_RCV_COPY_DATA; 1194 transport->tcp_flags &= ~TCP_RPC_REPLY; 1195 break; 1196 default: 1197 dprintk("RPC: invalid request message type\n"); 1198 xs_tcp_force_close(&transport->xprt); 1199 } 1200 xs_tcp_check_fraghdr(transport); 1201 } 1202 1203 static inline void xs_tcp_read_common(struct rpc_xprt *xprt, 1204 struct xdr_skb_reader *desc, 1205 struct rpc_rqst *req) 1206 { 1207 struct sock_xprt *transport = 1208 container_of(xprt, struct sock_xprt, xprt); 1209 struct xdr_buf *rcvbuf; 1210 size_t len; 1211 ssize_t r; 1212 1213 rcvbuf = &req->rq_private_buf; 1214 1215 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) { 1216 /* 1217 * Save the RPC direction in the XDR buffer 1218 */ 1219 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied, 1220 &transport->tcp_calldir, 1221 sizeof(transport->tcp_calldir)); 1222 transport->tcp_copied += sizeof(transport->tcp_calldir); 1223 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR; 1224 } 1225 1226 len = desc->count; 1227 if (len > transport->tcp_reclen - transport->tcp_offset) { 1228 struct xdr_skb_reader my_desc; 1229 1230 len = transport->tcp_reclen - transport->tcp_offset; 1231 memcpy(&my_desc, desc, sizeof(my_desc)); 1232 my_desc.count = len; 1233 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1234 &my_desc, xdr_skb_read_bits); 1235 desc->count -= r; 1236 desc->offset += r; 1237 } else 1238 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, 1239 desc, xdr_skb_read_bits); 1240 1241 if (r > 0) { 1242 transport->tcp_copied += r; 1243 transport->tcp_offset += r; 1244 } 1245 if (r != len) { 1246 /* Error when copying to the receive buffer, 1247 * usually because we weren't able to allocate 1248 * additional buffer pages. All we can do now 1249 * is turn off TCP_RCV_COPY_DATA, so the request 1250 * will not receive any additional updates, 1251 * and time out. 1252 * Any remaining data from this record will 1253 * be discarded. 1254 */ 1255 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1256 dprintk("RPC: XID %08x truncated request\n", 1257 ntohl(transport->tcp_xid)); 1258 dprintk("RPC: xprt = %p, tcp_copied = %lu, " 1259 "tcp_offset = %u, tcp_reclen = %u\n", 1260 xprt, transport->tcp_copied, 1261 transport->tcp_offset, transport->tcp_reclen); 1262 return; 1263 } 1264 1265 dprintk("RPC: XID %08x read %Zd bytes\n", 1266 ntohl(transport->tcp_xid), r); 1267 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, " 1268 "tcp_reclen = %u\n", xprt, transport->tcp_copied, 1269 transport->tcp_offset, transport->tcp_reclen); 1270 1271 if (transport->tcp_copied == req->rq_private_buf.buflen) 1272 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1273 else if (transport->tcp_offset == transport->tcp_reclen) { 1274 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) 1275 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1276 } 1277 } 1278 1279 /* 1280 * Finds the request corresponding to the RPC xid and invokes the common 1281 * tcp read code to read the data. 1282 */ 1283 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt, 1284 struct xdr_skb_reader *desc) 1285 { 1286 struct sock_xprt *transport = 1287 container_of(xprt, struct sock_xprt, xprt); 1288 struct rpc_rqst *req; 1289 1290 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid)); 1291 1292 /* Find and lock the request corresponding to this xid */ 1293 spin_lock_bh(&xprt->transport_lock); 1294 req = xprt_lookup_rqst(xprt, transport->tcp_xid); 1295 if (!req) { 1296 dprintk("RPC: XID %08x request not found!\n", 1297 ntohl(transport->tcp_xid)); 1298 spin_unlock_bh(&xprt->transport_lock); 1299 return -1; 1300 } 1301 1302 xs_tcp_read_common(xprt, desc, req); 1303 1304 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1305 xprt_complete_rqst(req->rq_task, transport->tcp_copied); 1306 1307 spin_unlock_bh(&xprt->transport_lock); 1308 return 0; 1309 } 1310 1311 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 1312 /* 1313 * Obtains an rpc_rqst previously allocated and invokes the common 1314 * tcp read code to read the data. The result is placed in the callback 1315 * queue. 1316 * If we're unable to obtain the rpc_rqst we schedule the closing of the 1317 * connection and return -1. 1318 */ 1319 static int xs_tcp_read_callback(struct rpc_xprt *xprt, 1320 struct xdr_skb_reader *desc) 1321 { 1322 struct sock_xprt *transport = 1323 container_of(xprt, struct sock_xprt, xprt); 1324 struct rpc_rqst *req; 1325 1326 /* Look up and lock the request corresponding to the given XID */ 1327 spin_lock_bh(&xprt->transport_lock); 1328 req = xprt_lookup_bc_request(xprt, transport->tcp_xid); 1329 if (req == NULL) { 1330 spin_unlock_bh(&xprt->transport_lock); 1331 printk(KERN_WARNING "Callback slot table overflowed\n"); 1332 xprt_force_disconnect(xprt); 1333 return -1; 1334 } 1335 1336 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid)); 1337 xs_tcp_read_common(xprt, desc, req); 1338 1339 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) 1340 xprt_complete_bc_request(req, transport->tcp_copied); 1341 spin_unlock_bh(&xprt->transport_lock); 1342 1343 return 0; 1344 } 1345 1346 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1347 struct xdr_skb_reader *desc) 1348 { 1349 struct sock_xprt *transport = 1350 container_of(xprt, struct sock_xprt, xprt); 1351 1352 return (transport->tcp_flags & TCP_RPC_REPLY) ? 1353 xs_tcp_read_reply(xprt, desc) : 1354 xs_tcp_read_callback(xprt, desc); 1355 } 1356 1357 static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net) 1358 { 1359 int ret; 1360 1361 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0, 1362 SVC_SOCK_ANONYMOUS); 1363 if (ret < 0) 1364 return ret; 1365 return 0; 1366 } 1367 1368 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt) 1369 { 1370 return PAGE_SIZE; 1371 } 1372 #else 1373 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt, 1374 struct xdr_skb_reader *desc) 1375 { 1376 return xs_tcp_read_reply(xprt, desc); 1377 } 1378 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 1379 1380 /* 1381 * Read data off the transport. This can be either an RPC_CALL or an 1382 * RPC_REPLY. Relay the processing to helper functions. 1383 */ 1384 static void xs_tcp_read_data(struct rpc_xprt *xprt, 1385 struct xdr_skb_reader *desc) 1386 { 1387 struct sock_xprt *transport = 1388 container_of(xprt, struct sock_xprt, xprt); 1389 1390 if (_xs_tcp_read_data(xprt, desc) == 0) 1391 xs_tcp_check_fraghdr(transport); 1392 else { 1393 /* 1394 * The transport_lock protects the request handling. 1395 * There's no need to hold it to update the tcp_flags. 1396 */ 1397 transport->tcp_flags &= ~TCP_RCV_COPY_DATA; 1398 } 1399 } 1400 1401 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) 1402 { 1403 size_t len; 1404 1405 len = transport->tcp_reclen - transport->tcp_offset; 1406 if (len > desc->count) 1407 len = desc->count; 1408 desc->count -= len; 1409 desc->offset += len; 1410 transport->tcp_offset += len; 1411 dprintk("RPC: discarded %Zu bytes\n", len); 1412 xs_tcp_check_fraghdr(transport); 1413 } 1414 1415 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) 1416 { 1417 struct rpc_xprt *xprt = rd_desc->arg.data; 1418 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1419 struct xdr_skb_reader desc = { 1420 .skb = skb, 1421 .offset = offset, 1422 .count = len, 1423 }; 1424 1425 dprintk("RPC: xs_tcp_data_recv started\n"); 1426 do { 1427 trace_xs_tcp_data_recv(transport); 1428 /* Read in a new fragment marker if necessary */ 1429 /* Can we ever really expect to get completely empty fragments? */ 1430 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { 1431 xs_tcp_read_fraghdr(xprt, &desc); 1432 continue; 1433 } 1434 /* Read in the xid if necessary */ 1435 if (transport->tcp_flags & TCP_RCV_COPY_XID) { 1436 xs_tcp_read_xid(transport, &desc); 1437 continue; 1438 } 1439 /* Read in the call/reply flag */ 1440 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) { 1441 xs_tcp_read_calldir(transport, &desc); 1442 continue; 1443 } 1444 /* Read in the request data */ 1445 if (transport->tcp_flags & TCP_RCV_COPY_DATA) { 1446 xs_tcp_read_data(xprt, &desc); 1447 continue; 1448 } 1449 /* Skip over any trailing bytes on short reads */ 1450 xs_tcp_read_discard(transport, &desc); 1451 } while (desc.count); 1452 trace_xs_tcp_data_recv(transport); 1453 dprintk("RPC: xs_tcp_data_recv done\n"); 1454 return len - desc.count; 1455 } 1456 1457 static void xs_tcp_data_receive(struct sock_xprt *transport) 1458 { 1459 struct rpc_xprt *xprt = &transport->xprt; 1460 struct sock *sk; 1461 read_descriptor_t rd_desc = { 1462 .count = 2*1024*1024, 1463 .arg.data = xprt, 1464 }; 1465 unsigned long total = 0; 1466 int read = 0; 1467 1468 mutex_lock(&transport->recv_mutex); 1469 sk = transport->inet; 1470 if (sk == NULL) 1471 goto out; 1472 1473 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1474 for (;;) { 1475 lock_sock(sk); 1476 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1477 release_sock(sk); 1478 if (read <= 0) 1479 break; 1480 total += read; 1481 rd_desc.count = 65536; 1482 } 1483 out: 1484 mutex_unlock(&transport->recv_mutex); 1485 trace_xs_tcp_data_ready(xprt, read, total); 1486 } 1487 1488 static void xs_tcp_data_receive_workfn(struct work_struct *work) 1489 { 1490 struct sock_xprt *transport = 1491 container_of(work, struct sock_xprt, recv_worker); 1492 xs_tcp_data_receive(transport); 1493 } 1494 1495 /** 1496 * xs_tcp_data_ready - "data ready" callback for TCP sockets 1497 * @sk: socket with data to read 1498 * 1499 */ 1500 static void xs_tcp_data_ready(struct sock *sk) 1501 { 1502 struct sock_xprt *transport; 1503 struct rpc_xprt *xprt; 1504 1505 dprintk("RPC: xs_tcp_data_ready...\n"); 1506 1507 read_lock_bh(&sk->sk_callback_lock); 1508 if (!(xprt = xprt_from_sock(sk))) 1509 goto out; 1510 transport = container_of(xprt, struct sock_xprt, xprt); 1511 1512 /* Any data means we had a useful conversation, so 1513 * the we don't need to delay the next reconnect 1514 */ 1515 if (xprt->reestablish_timeout) 1516 xprt->reestablish_timeout = 0; 1517 queue_work(rpciod_workqueue, &transport->recv_worker); 1518 1519 out: 1520 read_unlock_bh(&sk->sk_callback_lock); 1521 } 1522 1523 /** 1524 * xs_tcp_state_change - callback to handle TCP socket state changes 1525 * @sk: socket whose state has changed 1526 * 1527 */ 1528 static void xs_tcp_state_change(struct sock *sk) 1529 { 1530 struct rpc_xprt *xprt; 1531 struct sock_xprt *transport; 1532 1533 read_lock_bh(&sk->sk_callback_lock); 1534 if (!(xprt = xprt_from_sock(sk))) 1535 goto out; 1536 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1537 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n", 1538 sk->sk_state, xprt_connected(xprt), 1539 sock_flag(sk, SOCK_DEAD), 1540 sock_flag(sk, SOCK_ZAPPED), 1541 sk->sk_shutdown); 1542 1543 transport = container_of(xprt, struct sock_xprt, xprt); 1544 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1545 switch (sk->sk_state) { 1546 case TCP_ESTABLISHED: 1547 spin_lock(&xprt->transport_lock); 1548 if (!xprt_test_and_set_connected(xprt)) { 1549 1550 /* Reset TCP record info */ 1551 transport->tcp_offset = 0; 1552 transport->tcp_reclen = 0; 1553 transport->tcp_copied = 0; 1554 transport->tcp_flags = 1555 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 1556 xprt->connect_cookie++; 1557 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 1558 xprt_clear_connecting(xprt); 1559 1560 xprt_wake_pending_tasks(xprt, -EAGAIN); 1561 } 1562 spin_unlock(&xprt->transport_lock); 1563 break; 1564 case TCP_FIN_WAIT1: 1565 /* The client initiated a shutdown of the socket */ 1566 xprt->connect_cookie++; 1567 xprt->reestablish_timeout = 0; 1568 set_bit(XPRT_CLOSING, &xprt->state); 1569 smp_mb__before_atomic(); 1570 clear_bit(XPRT_CONNECTED, &xprt->state); 1571 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1572 smp_mb__after_atomic(); 1573 break; 1574 case TCP_CLOSE_WAIT: 1575 /* The server initiated a shutdown of the socket */ 1576 xprt->connect_cookie++; 1577 clear_bit(XPRT_CONNECTED, &xprt->state); 1578 xs_tcp_force_close(xprt); 1579 case TCP_CLOSING: 1580 /* 1581 * If the server closed down the connection, make sure that 1582 * we back off before reconnecting 1583 */ 1584 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 1585 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1586 break; 1587 case TCP_LAST_ACK: 1588 set_bit(XPRT_CLOSING, &xprt->state); 1589 smp_mb__before_atomic(); 1590 clear_bit(XPRT_CONNECTED, &xprt->state); 1591 smp_mb__after_atomic(); 1592 break; 1593 case TCP_CLOSE: 1594 if (test_and_clear_bit(XPRT_SOCK_CONNECTING, 1595 &transport->sock_state)) 1596 xprt_clear_connecting(xprt); 1597 xs_sock_mark_closed(xprt); 1598 } 1599 out: 1600 read_unlock_bh(&sk->sk_callback_lock); 1601 } 1602 1603 static void xs_write_space(struct sock *sk) 1604 { 1605 struct socket_wq *wq; 1606 struct rpc_xprt *xprt; 1607 1608 if (!sk->sk_socket) 1609 return; 1610 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1611 1612 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1613 return; 1614 rcu_read_lock(); 1615 wq = rcu_dereference(sk->sk_wq); 1616 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) 1617 goto out; 1618 1619 xprt_write_space(xprt); 1620 out: 1621 rcu_read_unlock(); 1622 } 1623 1624 /** 1625 * xs_udp_write_space - callback invoked when socket buffer space 1626 * becomes available 1627 * @sk: socket whose state has changed 1628 * 1629 * Called when more output buffer space is available for this socket. 1630 * We try not to wake our writers until they can make "significant" 1631 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1632 * with a bunch of small requests. 1633 */ 1634 static void xs_udp_write_space(struct sock *sk) 1635 { 1636 read_lock_bh(&sk->sk_callback_lock); 1637 1638 /* from net/core/sock.c:sock_def_write_space */ 1639 if (sock_writeable(sk)) 1640 xs_write_space(sk); 1641 1642 read_unlock_bh(&sk->sk_callback_lock); 1643 } 1644 1645 /** 1646 * xs_tcp_write_space - callback invoked when socket buffer space 1647 * becomes available 1648 * @sk: socket whose state has changed 1649 * 1650 * Called when more output buffer space is available for this socket. 1651 * We try not to wake our writers until they can make "significant" 1652 * progress, otherwise we'll waste resources thrashing kernel_sendmsg 1653 * with a bunch of small requests. 1654 */ 1655 static void xs_tcp_write_space(struct sock *sk) 1656 { 1657 read_lock_bh(&sk->sk_callback_lock); 1658 1659 /* from net/core/stream.c:sk_stream_write_space */ 1660 if (sk_stream_is_writeable(sk)) 1661 xs_write_space(sk); 1662 1663 read_unlock_bh(&sk->sk_callback_lock); 1664 } 1665 1666 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1667 { 1668 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1669 struct sock *sk = transport->inet; 1670 1671 if (transport->rcvsize) { 1672 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1673 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; 1674 } 1675 if (transport->sndsize) { 1676 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1677 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; 1678 sk->sk_write_space(sk); 1679 } 1680 } 1681 1682 /** 1683 * xs_udp_set_buffer_size - set send and receive limits 1684 * @xprt: generic transport 1685 * @sndsize: requested size of send buffer, in bytes 1686 * @rcvsize: requested size of receive buffer, in bytes 1687 * 1688 * Set socket send and receive buffer size limits. 1689 */ 1690 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1691 { 1692 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1693 1694 transport->sndsize = 0; 1695 if (sndsize) 1696 transport->sndsize = sndsize + 1024; 1697 transport->rcvsize = 0; 1698 if (rcvsize) 1699 transport->rcvsize = rcvsize + 1024; 1700 1701 xs_udp_do_set_buffer_size(xprt); 1702 } 1703 1704 /** 1705 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport 1706 * @task: task that timed out 1707 * 1708 * Adjust the congestion window after a retransmit timeout has occurred. 1709 */ 1710 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task) 1711 { 1712 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); 1713 } 1714 1715 static unsigned short xs_get_random_port(void) 1716 { 1717 unsigned short range = xprt_max_resvport - xprt_min_resvport; 1718 unsigned short rand = (unsigned short) prandom_u32() % range; 1719 return rand + xprt_min_resvport; 1720 } 1721 1722 /** 1723 * xs_set_reuseaddr_port - set the socket's port and address reuse options 1724 * @sock: socket 1725 * 1726 * Note that this function has to be called on all sockets that share the 1727 * same port, and it must be called before binding. 1728 */ 1729 static void xs_sock_set_reuseport(struct socket *sock) 1730 { 1731 int opt = 1; 1732 1733 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, 1734 (char *)&opt, sizeof(opt)); 1735 } 1736 1737 static unsigned short xs_sock_getport(struct socket *sock) 1738 { 1739 struct sockaddr_storage buf; 1740 int buflen; 1741 unsigned short port = 0; 1742 1743 if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0) 1744 goto out; 1745 switch (buf.ss_family) { 1746 case AF_INET6: 1747 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port); 1748 break; 1749 case AF_INET: 1750 port = ntohs(((struct sockaddr_in *)&buf)->sin_port); 1751 } 1752 out: 1753 return port; 1754 } 1755 1756 /** 1757 * xs_set_port - reset the port number in the remote endpoint address 1758 * @xprt: generic transport 1759 * @port: new port number 1760 * 1761 */ 1762 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1763 { 1764 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1765 1766 rpc_set_port(xs_addr(xprt), port); 1767 xs_update_peer_port(xprt); 1768 } 1769 1770 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) 1771 { 1772 if (transport->srcport == 0) 1773 transport->srcport = xs_sock_getport(sock); 1774 } 1775 1776 static unsigned short xs_get_srcport(struct sock_xprt *transport) 1777 { 1778 unsigned short port = transport->srcport; 1779 1780 if (port == 0 && transport->xprt.resvport) 1781 port = xs_get_random_port(); 1782 return port; 1783 } 1784 1785 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port) 1786 { 1787 if (transport->srcport != 0) 1788 transport->srcport = 0; 1789 if (!transport->xprt.resvport) 1790 return 0; 1791 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1792 return xprt_max_resvport; 1793 return --port; 1794 } 1795 static int xs_bind(struct sock_xprt *transport, struct socket *sock) 1796 { 1797 struct sockaddr_storage myaddr; 1798 int err, nloop = 0; 1799 unsigned short port = xs_get_srcport(transport); 1800 unsigned short last; 1801 1802 /* 1803 * If we are asking for any ephemeral port (i.e. port == 0 && 1804 * transport->xprt.resvport == 0), don't bind. Let the local 1805 * port selection happen implicitly when the socket is used 1806 * (for example at connect time). 1807 * 1808 * This ensures that we can continue to establish TCP 1809 * connections even when all local ephemeral ports are already 1810 * a part of some TCP connection. This makes no difference 1811 * for UDP sockets, but also doens't harm them. 1812 * 1813 * If we're asking for any reserved port (i.e. port == 0 && 1814 * transport->xprt.resvport == 1) xs_get_srcport above will 1815 * ensure that port is non-zero and we will bind as needed. 1816 */ 1817 if (port == 0) 1818 return 0; 1819 1820 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen); 1821 do { 1822 rpc_set_port((struct sockaddr *)&myaddr, port); 1823 err = kernel_bind(sock, (struct sockaddr *)&myaddr, 1824 transport->xprt.addrlen); 1825 if (err == 0) { 1826 transport->srcport = port; 1827 break; 1828 } 1829 last = port; 1830 port = xs_next_srcport(transport, port); 1831 if (port > last) 1832 nloop++; 1833 } while (err == -EADDRINUSE && nloop != 2); 1834 1835 if (myaddr.ss_family == AF_INET) 1836 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__, 1837 &((struct sockaddr_in *)&myaddr)->sin_addr, 1838 port, err ? "failed" : "ok", err); 1839 else 1840 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__, 1841 &((struct sockaddr_in6 *)&myaddr)->sin6_addr, 1842 port, err ? "failed" : "ok", err); 1843 return err; 1844 } 1845 1846 /* 1847 * We don't support autobind on AF_LOCAL sockets 1848 */ 1849 static void xs_local_rpcbind(struct rpc_task *task) 1850 { 1851 xprt_set_bound(task->tk_xprt); 1852 } 1853 1854 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port) 1855 { 1856 } 1857 1858 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1859 static struct lock_class_key xs_key[2]; 1860 static struct lock_class_key xs_slock_key[2]; 1861 1862 static inline void xs_reclassify_socketu(struct socket *sock) 1863 { 1864 struct sock *sk = sock->sk; 1865 1866 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC", 1867 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]); 1868 } 1869 1870 static inline void xs_reclassify_socket4(struct socket *sock) 1871 { 1872 struct sock *sk = sock->sk; 1873 1874 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC", 1875 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]); 1876 } 1877 1878 static inline void xs_reclassify_socket6(struct socket *sock) 1879 { 1880 struct sock *sk = sock->sk; 1881 1882 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC", 1883 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]); 1884 } 1885 1886 static inline void xs_reclassify_socket(int family, struct socket *sock) 1887 { 1888 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk))) 1889 return; 1890 1891 switch (family) { 1892 case AF_LOCAL: 1893 xs_reclassify_socketu(sock); 1894 break; 1895 case AF_INET: 1896 xs_reclassify_socket4(sock); 1897 break; 1898 case AF_INET6: 1899 xs_reclassify_socket6(sock); 1900 break; 1901 } 1902 } 1903 #else 1904 static inline void xs_reclassify_socket(int family, struct socket *sock) 1905 { 1906 } 1907 #endif 1908 1909 static void xs_dummy_setup_socket(struct work_struct *work) 1910 { 1911 } 1912 1913 static struct socket *xs_create_sock(struct rpc_xprt *xprt, 1914 struct sock_xprt *transport, int family, int type, 1915 int protocol, bool reuseport) 1916 { 1917 struct socket *sock; 1918 int err; 1919 1920 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1); 1921 if (err < 0) { 1922 dprintk("RPC: can't create %d transport socket (%d).\n", 1923 protocol, -err); 1924 goto out; 1925 } 1926 xs_reclassify_socket(family, sock); 1927 1928 if (reuseport) 1929 xs_sock_set_reuseport(sock); 1930 1931 err = xs_bind(transport, sock); 1932 if (err) { 1933 sock_release(sock); 1934 goto out; 1935 } 1936 1937 return sock; 1938 out: 1939 return ERR_PTR(err); 1940 } 1941 1942 static int xs_local_finish_connecting(struct rpc_xprt *xprt, 1943 struct socket *sock) 1944 { 1945 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 1946 xprt); 1947 1948 if (!transport->inet) { 1949 struct sock *sk = sock->sk; 1950 1951 write_lock_bh(&sk->sk_callback_lock); 1952 1953 xs_save_old_callbacks(transport, sk); 1954 1955 sk->sk_user_data = xprt; 1956 sk->sk_data_ready = xs_data_ready; 1957 sk->sk_write_space = xs_udp_write_space; 1958 sock_set_flag(sk, SOCK_FASYNC); 1959 sk->sk_error_report = xs_error_report; 1960 sk->sk_allocation = GFP_NOIO; 1961 1962 xprt_clear_connected(xprt); 1963 1964 /* Reset to new socket */ 1965 transport->sock = sock; 1966 transport->inet = sk; 1967 1968 write_unlock_bh(&sk->sk_callback_lock); 1969 } 1970 1971 /* Tell the socket layer to start connecting... */ 1972 xprt->stat.connect_count++; 1973 xprt->stat.connect_start = jiffies; 1974 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0); 1975 } 1976 1977 /** 1978 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint 1979 * @transport: socket transport to connect 1980 */ 1981 static int xs_local_setup_socket(struct sock_xprt *transport) 1982 { 1983 struct rpc_xprt *xprt = &transport->xprt; 1984 struct socket *sock; 1985 int status = -EIO; 1986 1987 status = __sock_create(xprt->xprt_net, AF_LOCAL, 1988 SOCK_STREAM, 0, &sock, 1); 1989 if (status < 0) { 1990 dprintk("RPC: can't create AF_LOCAL " 1991 "transport socket (%d).\n", -status); 1992 goto out; 1993 } 1994 xs_reclassify_socket(AF_LOCAL, sock); 1995 1996 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n", 1997 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 1998 1999 status = xs_local_finish_connecting(xprt, sock); 2000 trace_rpc_socket_connect(xprt, sock, status); 2001 switch (status) { 2002 case 0: 2003 dprintk("RPC: xprt %p connected to %s\n", 2004 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2005 xprt_set_connected(xprt); 2006 case -ENOBUFS: 2007 break; 2008 case -ENOENT: 2009 dprintk("RPC: xprt %p: socket %s does not exist\n", 2010 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2011 break; 2012 case -ECONNREFUSED: 2013 dprintk("RPC: xprt %p: connection refused for %s\n", 2014 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]); 2015 break; 2016 default: 2017 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n", 2018 __func__, -status, 2019 xprt->address_strings[RPC_DISPLAY_ADDR]); 2020 } 2021 2022 out: 2023 xprt_clear_connecting(xprt); 2024 xprt_wake_pending_tasks(xprt, status); 2025 return status; 2026 } 2027 2028 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2029 { 2030 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2031 int ret; 2032 2033 if (RPC_IS_ASYNC(task)) { 2034 /* 2035 * We want the AF_LOCAL connect to be resolved in the 2036 * filesystem namespace of the process making the rpc 2037 * call. Thus we connect synchronously. 2038 * 2039 * If we want to support asynchronous AF_LOCAL calls, 2040 * we'll need to figure out how to pass a namespace to 2041 * connect. 2042 */ 2043 rpc_exit(task, -ENOTCONN); 2044 return; 2045 } 2046 ret = xs_local_setup_socket(transport); 2047 if (ret && !RPC_IS_SOFTCONN(task)) 2048 msleep_interruptible(15000); 2049 } 2050 2051 #if IS_ENABLED(CONFIG_SUNRPC_SWAP) 2052 /* 2053 * Note that this should be called with XPRT_LOCKED held (or when we otherwise 2054 * know that we have exclusive access to the socket), to guard against 2055 * races with xs_reset_transport. 2056 */ 2057 static void xs_set_memalloc(struct rpc_xprt *xprt) 2058 { 2059 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, 2060 xprt); 2061 2062 /* 2063 * If there's no sock, then we have nothing to set. The 2064 * reconnecting process will get it for us. 2065 */ 2066 if (!transport->inet) 2067 return; 2068 if (atomic_read(&xprt->swapper)) 2069 sk_set_memalloc(transport->inet); 2070 } 2071 2072 /** 2073 * xs_enable_swap - Tag this transport as being used for swap. 2074 * @xprt: transport to tag 2075 * 2076 * Take a reference to this transport on behalf of the rpc_clnt, and 2077 * optionally mark it for swapping if it wasn't already. 2078 */ 2079 static int 2080 xs_enable_swap(struct rpc_xprt *xprt) 2081 { 2082 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2083 2084 if (atomic_inc_return(&xprt->swapper) != 1) 2085 return 0; 2086 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 2087 return -ERESTARTSYS; 2088 if (xs->inet) 2089 sk_set_memalloc(xs->inet); 2090 xprt_release_xprt(xprt, NULL); 2091 return 0; 2092 } 2093 2094 /** 2095 * xs_disable_swap - Untag this transport as being used for swap. 2096 * @xprt: transport to tag 2097 * 2098 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the 2099 * swapper refcount goes to 0, untag the socket as a memalloc socket. 2100 */ 2101 static void 2102 xs_disable_swap(struct rpc_xprt *xprt) 2103 { 2104 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); 2105 2106 if (!atomic_dec_and_test(&xprt->swapper)) 2107 return; 2108 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) 2109 return; 2110 if (xs->inet) 2111 sk_clear_memalloc(xs->inet); 2112 xprt_release_xprt(xprt, NULL); 2113 } 2114 #else 2115 static void xs_set_memalloc(struct rpc_xprt *xprt) 2116 { 2117 } 2118 2119 static int 2120 xs_enable_swap(struct rpc_xprt *xprt) 2121 { 2122 return -EINVAL; 2123 } 2124 2125 static void 2126 xs_disable_swap(struct rpc_xprt *xprt) 2127 { 2128 } 2129 #endif 2130 2131 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2132 { 2133 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2134 2135 if (!transport->inet) { 2136 struct sock *sk = sock->sk; 2137 2138 write_lock_bh(&sk->sk_callback_lock); 2139 2140 xs_save_old_callbacks(transport, sk); 2141 2142 sk->sk_user_data = xprt; 2143 sk->sk_data_ready = xs_data_ready; 2144 sk->sk_write_space = xs_udp_write_space; 2145 sock_set_flag(sk, SOCK_FASYNC); 2146 sk->sk_allocation = GFP_NOIO; 2147 2148 xprt_set_connected(xprt); 2149 2150 /* Reset to new socket */ 2151 transport->sock = sock; 2152 transport->inet = sk; 2153 2154 xs_set_memalloc(xprt); 2155 2156 write_unlock_bh(&sk->sk_callback_lock); 2157 } 2158 xs_udp_do_set_buffer_size(xprt); 2159 } 2160 2161 static void xs_udp_setup_socket(struct work_struct *work) 2162 { 2163 struct sock_xprt *transport = 2164 container_of(work, struct sock_xprt, connect_worker.work); 2165 struct rpc_xprt *xprt = &transport->xprt; 2166 struct socket *sock = transport->sock; 2167 int status = -EIO; 2168 2169 sock = xs_create_sock(xprt, transport, 2170 xs_addr(xprt)->sa_family, SOCK_DGRAM, 2171 IPPROTO_UDP, false); 2172 if (IS_ERR(sock)) 2173 goto out; 2174 2175 dprintk("RPC: worker connecting xprt %p via %s to " 2176 "%s (port %s)\n", xprt, 2177 xprt->address_strings[RPC_DISPLAY_PROTO], 2178 xprt->address_strings[RPC_DISPLAY_ADDR], 2179 xprt->address_strings[RPC_DISPLAY_PORT]); 2180 2181 xs_udp_finish_connecting(xprt, sock); 2182 trace_rpc_socket_connect(xprt, sock, 0); 2183 status = 0; 2184 out: 2185 xprt_unlock_connect(xprt, transport); 2186 xprt_clear_connecting(xprt); 2187 xprt_wake_pending_tasks(xprt, status); 2188 } 2189 2190 /** 2191 * xs_tcp_shutdown - gracefully shut down a TCP socket 2192 * @xprt: transport 2193 * 2194 * Initiates a graceful shutdown of the TCP socket by calling the 2195 * equivalent of shutdown(SHUT_RDWR); 2196 */ 2197 static void xs_tcp_shutdown(struct rpc_xprt *xprt) 2198 { 2199 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2200 struct socket *sock = transport->sock; 2201 2202 if (sock == NULL) 2203 return; 2204 if (xprt_connected(xprt)) { 2205 kernel_sock_shutdown(sock, SHUT_RDWR); 2206 trace_rpc_socket_shutdown(xprt, sock); 2207 } else 2208 xs_reset_transport(transport); 2209 } 2210 2211 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2212 { 2213 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2214 int ret = -ENOTCONN; 2215 2216 if (!transport->inet) { 2217 struct sock *sk = sock->sk; 2218 unsigned int keepidle = xprt->timeout->to_initval / HZ; 2219 unsigned int keepcnt = xprt->timeout->to_retries + 1; 2220 unsigned int opt_on = 1; 2221 unsigned int timeo; 2222 2223 /* TCP Keepalive options */ 2224 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, 2225 (char *)&opt_on, sizeof(opt_on)); 2226 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, 2227 (char *)&keepidle, sizeof(keepidle)); 2228 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, 2229 (char *)&keepidle, sizeof(keepidle)); 2230 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, 2231 (char *)&keepcnt, sizeof(keepcnt)); 2232 2233 /* TCP user timeout (see RFC5482) */ 2234 timeo = jiffies_to_msecs(xprt->timeout->to_initval) * 2235 (xprt->timeout->to_retries + 1); 2236 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, 2237 (char *)&timeo, sizeof(timeo)); 2238 2239 write_lock_bh(&sk->sk_callback_lock); 2240 2241 xs_save_old_callbacks(transport, sk); 2242 2243 sk->sk_user_data = xprt; 2244 sk->sk_data_ready = xs_tcp_data_ready; 2245 sk->sk_state_change = xs_tcp_state_change; 2246 sk->sk_write_space = xs_tcp_write_space; 2247 sock_set_flag(sk, SOCK_FASYNC); 2248 sk->sk_error_report = xs_error_report; 2249 sk->sk_allocation = GFP_NOIO; 2250 2251 /* socket options */ 2252 sock_reset_flag(sk, SOCK_LINGER); 2253 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 2254 2255 xprt_clear_connected(xprt); 2256 2257 /* Reset to new socket */ 2258 transport->sock = sock; 2259 transport->inet = sk; 2260 2261 write_unlock_bh(&sk->sk_callback_lock); 2262 } 2263 2264 if (!xprt_bound(xprt)) 2265 goto out; 2266 2267 xs_set_memalloc(xprt); 2268 2269 /* Tell the socket layer to start connecting... */ 2270 xprt->stat.connect_count++; 2271 xprt->stat.connect_start = jiffies; 2272 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); 2273 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2274 switch (ret) { 2275 case 0: 2276 xs_set_srcport(transport, sock); 2277 case -EINPROGRESS: 2278 /* SYN_SENT! */ 2279 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2280 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2281 } 2282 out: 2283 return ret; 2284 } 2285 2286 /** 2287 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint 2288 * 2289 * Invoked by a work queue tasklet. 2290 */ 2291 static void xs_tcp_setup_socket(struct work_struct *work) 2292 { 2293 struct sock_xprt *transport = 2294 container_of(work, struct sock_xprt, connect_worker.work); 2295 struct socket *sock = transport->sock; 2296 struct rpc_xprt *xprt = &transport->xprt; 2297 int status = -EIO; 2298 2299 if (!sock) { 2300 sock = xs_create_sock(xprt, transport, 2301 xs_addr(xprt)->sa_family, SOCK_STREAM, 2302 IPPROTO_TCP, true); 2303 if (IS_ERR(sock)) { 2304 status = PTR_ERR(sock); 2305 goto out; 2306 } 2307 } 2308 2309 dprintk("RPC: worker connecting xprt %p via %s to " 2310 "%s (port %s)\n", xprt, 2311 xprt->address_strings[RPC_DISPLAY_PROTO], 2312 xprt->address_strings[RPC_DISPLAY_ADDR], 2313 xprt->address_strings[RPC_DISPLAY_PORT]); 2314 2315 status = xs_tcp_finish_connecting(xprt, sock); 2316 trace_rpc_socket_connect(xprt, sock, status); 2317 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 2318 xprt, -status, xprt_connected(xprt), 2319 sock->sk->sk_state); 2320 switch (status) { 2321 default: 2322 printk("%s: connect returned unhandled error %d\n", 2323 __func__, status); 2324 case -EADDRNOTAVAIL: 2325 /* We're probably in TIME_WAIT. Get rid of existing socket, 2326 * and retry 2327 */ 2328 xs_tcp_force_close(xprt); 2329 break; 2330 case 0: 2331 case -EINPROGRESS: 2332 case -EALREADY: 2333 xprt_unlock_connect(xprt, transport); 2334 return; 2335 case -EINVAL: 2336 /* Happens, for instance, if the user specified a link 2337 * local IPv6 address without a scope-id. 2338 */ 2339 case -ECONNREFUSED: 2340 case -ECONNRESET: 2341 case -ENETUNREACH: 2342 case -EADDRINUSE: 2343 case -ENOBUFS: 2344 /* retry with existing socket, after a delay */ 2345 xs_tcp_force_close(xprt); 2346 goto out; 2347 } 2348 status = -EAGAIN; 2349 out: 2350 xprt_unlock_connect(xprt, transport); 2351 xprt_clear_connecting(xprt); 2352 xprt_wake_pending_tasks(xprt, status); 2353 } 2354 2355 /** 2356 * xs_connect - connect a socket to a remote endpoint 2357 * @xprt: pointer to transport structure 2358 * @task: address of RPC task that manages state of connect request 2359 * 2360 * TCP: If the remote end dropped the connection, delay reconnecting. 2361 * 2362 * UDP socket connects are synchronous, but we use a work queue anyway 2363 * to guarantee that even unprivileged user processes can set up a 2364 * socket on a privileged port. 2365 * 2366 * If a UDP socket connect fails, the delay behavior here prevents 2367 * retry floods (hard mounts). 2368 */ 2369 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) 2370 { 2371 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2372 2373 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); 2374 2375 if (transport->sock != NULL) { 2376 dprintk("RPC: xs_connect delayed xprt %p for %lu " 2377 "seconds\n", 2378 xprt, xprt->reestablish_timeout / HZ); 2379 2380 /* Start by resetting any existing state */ 2381 xs_reset_transport(transport); 2382 2383 queue_delayed_work(rpciod_workqueue, 2384 &transport->connect_worker, 2385 xprt->reestablish_timeout); 2386 xprt->reestablish_timeout <<= 1; 2387 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO) 2388 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2389 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 2390 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 2391 } else { 2392 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 2393 queue_delayed_work(rpciod_workqueue, 2394 &transport->connect_worker, 0); 2395 } 2396 } 2397 2398 /** 2399 * xs_local_print_stats - display AF_LOCAL socket-specifc stats 2400 * @xprt: rpc_xprt struct containing statistics 2401 * @seq: output file 2402 * 2403 */ 2404 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2405 { 2406 long idle_time = 0; 2407 2408 if (xprt_connected(xprt)) 2409 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2410 2411 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu " 2412 "%llu %llu %lu %llu %llu\n", 2413 xprt->stat.bind_count, 2414 xprt->stat.connect_count, 2415 xprt->stat.connect_time, 2416 idle_time, 2417 xprt->stat.sends, 2418 xprt->stat.recvs, 2419 xprt->stat.bad_xids, 2420 xprt->stat.req_u, 2421 xprt->stat.bklog_u, 2422 xprt->stat.max_slots, 2423 xprt->stat.sending_u, 2424 xprt->stat.pending_u); 2425 } 2426 2427 /** 2428 * xs_udp_print_stats - display UDP socket-specifc stats 2429 * @xprt: rpc_xprt struct containing statistics 2430 * @seq: output file 2431 * 2432 */ 2433 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2434 { 2435 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2436 2437 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu " 2438 "%lu %llu %llu\n", 2439 transport->srcport, 2440 xprt->stat.bind_count, 2441 xprt->stat.sends, 2442 xprt->stat.recvs, 2443 xprt->stat.bad_xids, 2444 xprt->stat.req_u, 2445 xprt->stat.bklog_u, 2446 xprt->stat.max_slots, 2447 xprt->stat.sending_u, 2448 xprt->stat.pending_u); 2449 } 2450 2451 /** 2452 * xs_tcp_print_stats - display TCP socket-specifc stats 2453 * @xprt: rpc_xprt struct containing statistics 2454 * @seq: output file 2455 * 2456 */ 2457 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 2458 { 2459 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2460 long idle_time = 0; 2461 2462 if (xprt_connected(xprt)) 2463 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2464 2465 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu " 2466 "%llu %llu %lu %llu %llu\n", 2467 transport->srcport, 2468 xprt->stat.bind_count, 2469 xprt->stat.connect_count, 2470 xprt->stat.connect_time, 2471 idle_time, 2472 xprt->stat.sends, 2473 xprt->stat.recvs, 2474 xprt->stat.bad_xids, 2475 xprt->stat.req_u, 2476 xprt->stat.bklog_u, 2477 xprt->stat.max_slots, 2478 xprt->stat.sending_u, 2479 xprt->stat.pending_u); 2480 } 2481 2482 /* 2483 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason 2484 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2485 * to use the server side send routines. 2486 */ 2487 static void *bc_malloc(struct rpc_task *task, size_t size) 2488 { 2489 struct page *page; 2490 struct rpc_buffer *buf; 2491 2492 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer)); 2493 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) 2494 return NULL; 2495 2496 page = alloc_page(GFP_KERNEL); 2497 if (!page) 2498 return NULL; 2499 2500 buf = page_address(page); 2501 buf->len = PAGE_SIZE; 2502 2503 return buf->data; 2504 } 2505 2506 /* 2507 * Free the space allocated in the bc_alloc routine 2508 */ 2509 static void bc_free(void *buffer) 2510 { 2511 struct rpc_buffer *buf; 2512 2513 if (!buffer) 2514 return; 2515 2516 buf = container_of(buffer, struct rpc_buffer, data); 2517 free_page((unsigned long)buf); 2518 } 2519 2520 /* 2521 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex 2522 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request. 2523 */ 2524 static int bc_sendto(struct rpc_rqst *req) 2525 { 2526 int len; 2527 struct xdr_buf *xbufp = &req->rq_snd_buf; 2528 struct rpc_xprt *xprt = req->rq_xprt; 2529 struct sock_xprt *transport = 2530 container_of(xprt, struct sock_xprt, xprt); 2531 struct socket *sock = transport->sock; 2532 unsigned long headoff; 2533 unsigned long tailoff; 2534 2535 xs_encode_stream_record_marker(xbufp); 2536 2537 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; 2538 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; 2539 len = svc_send_common(sock, xbufp, 2540 virt_to_page(xbufp->head[0].iov_base), headoff, 2541 xbufp->tail[0].iov_base, tailoff); 2542 2543 if (len != xbufp->len) { 2544 printk(KERN_NOTICE "Error sending entire callback!\n"); 2545 len = -EAGAIN; 2546 } 2547 2548 return len; 2549 } 2550 2551 /* 2552 * The send routine. Borrows from svc_send 2553 */ 2554 static int bc_send_request(struct rpc_task *task) 2555 { 2556 struct rpc_rqst *req = task->tk_rqstp; 2557 struct svc_xprt *xprt; 2558 int len; 2559 2560 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid)); 2561 /* 2562 * Get the server socket associated with this callback xprt 2563 */ 2564 xprt = req->rq_xprt->bc_xprt; 2565 2566 /* 2567 * Grab the mutex to serialize data as the connection is shared 2568 * with the fore channel 2569 */ 2570 if (!mutex_trylock(&xprt->xpt_mutex)) { 2571 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL); 2572 if (!mutex_trylock(&xprt->xpt_mutex)) 2573 return -EAGAIN; 2574 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task); 2575 } 2576 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) 2577 len = -ENOTCONN; 2578 else 2579 len = bc_sendto(req); 2580 mutex_unlock(&xprt->xpt_mutex); 2581 2582 if (len > 0) 2583 len = 0; 2584 2585 return len; 2586 } 2587 2588 /* 2589 * The close routine. Since this is client initiated, we do nothing 2590 */ 2591 2592 static void bc_close(struct rpc_xprt *xprt) 2593 { 2594 } 2595 2596 /* 2597 * The xprt destroy routine. Again, because this connection is client 2598 * initiated, we do nothing 2599 */ 2600 2601 static void bc_destroy(struct rpc_xprt *xprt) 2602 { 2603 dprintk("RPC: bc_destroy xprt %p\n", xprt); 2604 2605 xs_xprt_free(xprt); 2606 module_put(THIS_MODULE); 2607 } 2608 2609 static struct rpc_xprt_ops xs_local_ops = { 2610 .reserve_xprt = xprt_reserve_xprt, 2611 .release_xprt = xs_tcp_release_xprt, 2612 .alloc_slot = xprt_alloc_slot, 2613 .rpcbind = xs_local_rpcbind, 2614 .set_port = xs_local_set_port, 2615 .connect = xs_local_connect, 2616 .buf_alloc = rpc_malloc, 2617 .buf_free = rpc_free, 2618 .send_request = xs_local_send_request, 2619 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2620 .close = xs_close, 2621 .destroy = xs_destroy, 2622 .print_stats = xs_local_print_stats, 2623 .enable_swap = xs_enable_swap, 2624 .disable_swap = xs_disable_swap, 2625 }; 2626 2627 static struct rpc_xprt_ops xs_udp_ops = { 2628 .set_buffer_size = xs_udp_set_buffer_size, 2629 .reserve_xprt = xprt_reserve_xprt_cong, 2630 .release_xprt = xprt_release_xprt_cong, 2631 .alloc_slot = xprt_alloc_slot, 2632 .rpcbind = rpcb_getport_async, 2633 .set_port = xs_set_port, 2634 .connect = xs_connect, 2635 .buf_alloc = rpc_malloc, 2636 .buf_free = rpc_free, 2637 .send_request = xs_udp_send_request, 2638 .set_retrans_timeout = xprt_set_retrans_timeout_rtt, 2639 .timer = xs_udp_timer, 2640 .release_request = xprt_release_rqst_cong, 2641 .close = xs_close, 2642 .destroy = xs_destroy, 2643 .print_stats = xs_udp_print_stats, 2644 .enable_swap = xs_enable_swap, 2645 .disable_swap = xs_disable_swap, 2646 .inject_disconnect = xs_inject_disconnect, 2647 }; 2648 2649 static struct rpc_xprt_ops xs_tcp_ops = { 2650 .reserve_xprt = xprt_reserve_xprt, 2651 .release_xprt = xs_tcp_release_xprt, 2652 .alloc_slot = xprt_lock_and_alloc_slot, 2653 .rpcbind = rpcb_getport_async, 2654 .set_port = xs_set_port, 2655 .connect = xs_connect, 2656 .buf_alloc = rpc_malloc, 2657 .buf_free = rpc_free, 2658 .send_request = xs_tcp_send_request, 2659 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2660 .close = xs_tcp_shutdown, 2661 .destroy = xs_destroy, 2662 .print_stats = xs_tcp_print_stats, 2663 .enable_swap = xs_enable_swap, 2664 .disable_swap = xs_disable_swap, 2665 .inject_disconnect = xs_inject_disconnect, 2666 #ifdef CONFIG_SUNRPC_BACKCHANNEL 2667 .bc_setup = xprt_setup_bc, 2668 .bc_up = xs_tcp_bc_up, 2669 .bc_maxpayload = xs_tcp_bc_maxpayload, 2670 .bc_free_rqst = xprt_free_bc_rqst, 2671 .bc_destroy = xprt_destroy_bc, 2672 #endif 2673 }; 2674 2675 /* 2676 * The rpc_xprt_ops for the server backchannel 2677 */ 2678 2679 static struct rpc_xprt_ops bc_tcp_ops = { 2680 .reserve_xprt = xprt_reserve_xprt, 2681 .release_xprt = xprt_release_xprt, 2682 .alloc_slot = xprt_alloc_slot, 2683 .buf_alloc = bc_malloc, 2684 .buf_free = bc_free, 2685 .send_request = bc_send_request, 2686 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2687 .close = bc_close, 2688 .destroy = bc_destroy, 2689 .print_stats = xs_tcp_print_stats, 2690 .enable_swap = xs_enable_swap, 2691 .disable_swap = xs_disable_swap, 2692 .inject_disconnect = xs_inject_disconnect, 2693 }; 2694 2695 static int xs_init_anyaddr(const int family, struct sockaddr *sap) 2696 { 2697 static const struct sockaddr_in sin = { 2698 .sin_family = AF_INET, 2699 .sin_addr.s_addr = htonl(INADDR_ANY), 2700 }; 2701 static const struct sockaddr_in6 sin6 = { 2702 .sin6_family = AF_INET6, 2703 .sin6_addr = IN6ADDR_ANY_INIT, 2704 }; 2705 2706 switch (family) { 2707 case AF_LOCAL: 2708 break; 2709 case AF_INET: 2710 memcpy(sap, &sin, sizeof(sin)); 2711 break; 2712 case AF_INET6: 2713 memcpy(sap, &sin6, sizeof(sin6)); 2714 break; 2715 default: 2716 dprintk("RPC: %s: Bad address family\n", __func__); 2717 return -EAFNOSUPPORT; 2718 } 2719 return 0; 2720 } 2721 2722 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, 2723 unsigned int slot_table_size, 2724 unsigned int max_slot_table_size) 2725 { 2726 struct rpc_xprt *xprt; 2727 struct sock_xprt *new; 2728 2729 if (args->addrlen > sizeof(xprt->addr)) { 2730 dprintk("RPC: xs_setup_xprt: address too large\n"); 2731 return ERR_PTR(-EBADF); 2732 } 2733 2734 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size, 2735 max_slot_table_size); 2736 if (xprt == NULL) { 2737 dprintk("RPC: xs_setup_xprt: couldn't allocate " 2738 "rpc_xprt\n"); 2739 return ERR_PTR(-ENOMEM); 2740 } 2741 2742 new = container_of(xprt, struct sock_xprt, xprt); 2743 mutex_init(&new->recv_mutex); 2744 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 2745 xprt->addrlen = args->addrlen; 2746 if (args->srcaddr) 2747 memcpy(&new->srcaddr, args->srcaddr, args->addrlen); 2748 else { 2749 int err; 2750 err = xs_init_anyaddr(args->dstaddr->sa_family, 2751 (struct sockaddr *)&new->srcaddr); 2752 if (err != 0) { 2753 xprt_free(xprt); 2754 return ERR_PTR(err); 2755 } 2756 } 2757 2758 return xprt; 2759 } 2760 2761 static const struct rpc_timeout xs_local_default_timeout = { 2762 .to_initval = 10 * HZ, 2763 .to_maxval = 10 * HZ, 2764 .to_retries = 2, 2765 }; 2766 2767 /** 2768 * xs_setup_local - Set up transport to use an AF_LOCAL socket 2769 * @args: rpc transport creation arguments 2770 * 2771 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP 2772 */ 2773 static struct rpc_xprt *xs_setup_local(struct xprt_create *args) 2774 { 2775 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr; 2776 struct sock_xprt *transport; 2777 struct rpc_xprt *xprt; 2778 struct rpc_xprt *ret; 2779 2780 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2781 xprt_max_tcp_slot_table_entries); 2782 if (IS_ERR(xprt)) 2783 return xprt; 2784 transport = container_of(xprt, struct sock_xprt, xprt); 2785 2786 xprt->prot = 0; 2787 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2788 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2789 2790 xprt->bind_timeout = XS_BIND_TO; 2791 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2792 xprt->idle_timeout = XS_IDLE_DISC_TO; 2793 2794 xprt->ops = &xs_local_ops; 2795 xprt->timeout = &xs_local_default_timeout; 2796 2797 INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn); 2798 INIT_DELAYED_WORK(&transport->connect_worker, 2799 xs_dummy_setup_socket); 2800 2801 switch (sun->sun_family) { 2802 case AF_LOCAL: 2803 if (sun->sun_path[0] != '/') { 2804 dprintk("RPC: bad AF_LOCAL address: %s\n", 2805 sun->sun_path); 2806 ret = ERR_PTR(-EINVAL); 2807 goto out_err; 2808 } 2809 xprt_set_bound(xprt); 2810 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 2811 ret = ERR_PTR(xs_local_setup_socket(transport)); 2812 if (ret) 2813 goto out_err; 2814 break; 2815 default: 2816 ret = ERR_PTR(-EAFNOSUPPORT); 2817 goto out_err; 2818 } 2819 2820 dprintk("RPC: set up xprt to %s via AF_LOCAL\n", 2821 xprt->address_strings[RPC_DISPLAY_ADDR]); 2822 2823 if (try_module_get(THIS_MODULE)) 2824 return xprt; 2825 ret = ERR_PTR(-EINVAL); 2826 out_err: 2827 xs_xprt_free(xprt); 2828 return ret; 2829 } 2830 2831 static const struct rpc_timeout xs_udp_default_timeout = { 2832 .to_initval = 5 * HZ, 2833 .to_maxval = 30 * HZ, 2834 .to_increment = 5 * HZ, 2835 .to_retries = 5, 2836 }; 2837 2838 /** 2839 * xs_setup_udp - Set up transport to use a UDP socket 2840 * @args: rpc transport creation arguments 2841 * 2842 */ 2843 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) 2844 { 2845 struct sockaddr *addr = args->dstaddr; 2846 struct rpc_xprt *xprt; 2847 struct sock_xprt *transport; 2848 struct rpc_xprt *ret; 2849 2850 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries, 2851 xprt_udp_slot_table_entries); 2852 if (IS_ERR(xprt)) 2853 return xprt; 2854 transport = container_of(xprt, struct sock_xprt, xprt); 2855 2856 xprt->prot = IPPROTO_UDP; 2857 xprt->tsh_size = 0; 2858 /* XXX: header size can vary due to auth type, IPv6, etc. */ 2859 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 2860 2861 xprt->bind_timeout = XS_BIND_TO; 2862 xprt->reestablish_timeout = XS_UDP_REEST_TO; 2863 xprt->idle_timeout = XS_IDLE_DISC_TO; 2864 2865 xprt->ops = &xs_udp_ops; 2866 2867 xprt->timeout = &xs_udp_default_timeout; 2868 2869 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn); 2870 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket); 2871 2872 switch (addr->sa_family) { 2873 case AF_INET: 2874 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2875 xprt_set_bound(xprt); 2876 2877 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 2878 break; 2879 case AF_INET6: 2880 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2881 xprt_set_bound(xprt); 2882 2883 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2884 break; 2885 default: 2886 ret = ERR_PTR(-EAFNOSUPPORT); 2887 goto out_err; 2888 } 2889 2890 if (xprt_bound(xprt)) 2891 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2892 xprt->address_strings[RPC_DISPLAY_ADDR], 2893 xprt->address_strings[RPC_DISPLAY_PORT], 2894 xprt->address_strings[RPC_DISPLAY_PROTO]); 2895 else 2896 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2897 xprt->address_strings[RPC_DISPLAY_ADDR], 2898 xprt->address_strings[RPC_DISPLAY_PROTO]); 2899 2900 if (try_module_get(THIS_MODULE)) 2901 return xprt; 2902 ret = ERR_PTR(-EINVAL); 2903 out_err: 2904 xs_xprt_free(xprt); 2905 return ret; 2906 } 2907 2908 static const struct rpc_timeout xs_tcp_default_timeout = { 2909 .to_initval = 60 * HZ, 2910 .to_maxval = 60 * HZ, 2911 .to_retries = 2, 2912 }; 2913 2914 /** 2915 * xs_setup_tcp - Set up transport to use a TCP socket 2916 * @args: rpc transport creation arguments 2917 * 2918 */ 2919 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) 2920 { 2921 struct sockaddr *addr = args->dstaddr; 2922 struct rpc_xprt *xprt; 2923 struct sock_xprt *transport; 2924 struct rpc_xprt *ret; 2925 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries; 2926 2927 if (args->flags & XPRT_CREATE_INFINITE_SLOTS) 2928 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT; 2929 2930 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 2931 max_slot_table_size); 2932 if (IS_ERR(xprt)) 2933 return xprt; 2934 transport = container_of(xprt, struct sock_xprt, xprt); 2935 2936 xprt->prot = IPPROTO_TCP; 2937 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 2938 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 2939 2940 xprt->bind_timeout = XS_BIND_TO; 2941 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 2942 xprt->idle_timeout = XS_IDLE_DISC_TO; 2943 2944 xprt->ops = &xs_tcp_ops; 2945 xprt->timeout = &xs_tcp_default_timeout; 2946 2947 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn); 2948 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket); 2949 2950 switch (addr->sa_family) { 2951 case AF_INET: 2952 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2953 xprt_set_bound(xprt); 2954 2955 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 2956 break; 2957 case AF_INET6: 2958 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2959 xprt_set_bound(xprt); 2960 2961 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2962 break; 2963 default: 2964 ret = ERR_PTR(-EAFNOSUPPORT); 2965 goto out_err; 2966 } 2967 2968 if (xprt_bound(xprt)) 2969 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 2970 xprt->address_strings[RPC_DISPLAY_ADDR], 2971 xprt->address_strings[RPC_DISPLAY_PORT], 2972 xprt->address_strings[RPC_DISPLAY_PROTO]); 2973 else 2974 dprintk("RPC: set up xprt to %s (autobind) via %s\n", 2975 xprt->address_strings[RPC_DISPLAY_ADDR], 2976 xprt->address_strings[RPC_DISPLAY_PROTO]); 2977 2978 if (try_module_get(THIS_MODULE)) 2979 return xprt; 2980 ret = ERR_PTR(-EINVAL); 2981 out_err: 2982 xs_xprt_free(xprt); 2983 return ret; 2984 } 2985 2986 /** 2987 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket 2988 * @args: rpc transport creation arguments 2989 * 2990 */ 2991 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) 2992 { 2993 struct sockaddr *addr = args->dstaddr; 2994 struct rpc_xprt *xprt; 2995 struct sock_xprt *transport; 2996 struct svc_sock *bc_sock; 2997 struct rpc_xprt *ret; 2998 2999 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries, 3000 xprt_tcp_slot_table_entries); 3001 if (IS_ERR(xprt)) 3002 return xprt; 3003 transport = container_of(xprt, struct sock_xprt, xprt); 3004 3005 xprt->prot = IPPROTO_TCP; 3006 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 3007 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 3008 xprt->timeout = &xs_tcp_default_timeout; 3009 3010 /* backchannel */ 3011 xprt_set_bound(xprt); 3012 xprt->bind_timeout = 0; 3013 xprt->reestablish_timeout = 0; 3014 xprt->idle_timeout = 0; 3015 3016 xprt->ops = &bc_tcp_ops; 3017 3018 switch (addr->sa_family) { 3019 case AF_INET: 3020 xs_format_peer_addresses(xprt, "tcp", 3021 RPCBIND_NETID_TCP); 3022 break; 3023 case AF_INET6: 3024 xs_format_peer_addresses(xprt, "tcp", 3025 RPCBIND_NETID_TCP6); 3026 break; 3027 default: 3028 ret = ERR_PTR(-EAFNOSUPPORT); 3029 goto out_err; 3030 } 3031 3032 dprintk("RPC: set up xprt to %s (port %s) via %s\n", 3033 xprt->address_strings[RPC_DISPLAY_ADDR], 3034 xprt->address_strings[RPC_DISPLAY_PORT], 3035 xprt->address_strings[RPC_DISPLAY_PROTO]); 3036 3037 /* 3038 * Once we've associated a backchannel xprt with a connection, 3039 * we want to keep it around as long as the connection lasts, 3040 * in case we need to start using it for a backchannel again; 3041 * this reference won't be dropped until bc_xprt is destroyed. 3042 */ 3043 xprt_get(xprt); 3044 args->bc_xprt->xpt_bc_xprt = xprt; 3045 xprt->bc_xprt = args->bc_xprt; 3046 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt); 3047 transport->sock = bc_sock->sk_sock; 3048 transport->inet = bc_sock->sk_sk; 3049 3050 /* 3051 * Since we don't want connections for the backchannel, we set 3052 * the xprt status to connected 3053 */ 3054 xprt_set_connected(xprt); 3055 3056 if (try_module_get(THIS_MODULE)) 3057 return xprt; 3058 3059 args->bc_xprt->xpt_bc_xprt = NULL; 3060 xprt_put(xprt); 3061 ret = ERR_PTR(-EINVAL); 3062 out_err: 3063 xs_xprt_free(xprt); 3064 return ret; 3065 } 3066 3067 static struct xprt_class xs_local_transport = { 3068 .list = LIST_HEAD_INIT(xs_local_transport.list), 3069 .name = "named UNIX socket", 3070 .owner = THIS_MODULE, 3071 .ident = XPRT_TRANSPORT_LOCAL, 3072 .setup = xs_setup_local, 3073 }; 3074 3075 static struct xprt_class xs_udp_transport = { 3076 .list = LIST_HEAD_INIT(xs_udp_transport.list), 3077 .name = "udp", 3078 .owner = THIS_MODULE, 3079 .ident = XPRT_TRANSPORT_UDP, 3080 .setup = xs_setup_udp, 3081 }; 3082 3083 static struct xprt_class xs_tcp_transport = { 3084 .list = LIST_HEAD_INIT(xs_tcp_transport.list), 3085 .name = "tcp", 3086 .owner = THIS_MODULE, 3087 .ident = XPRT_TRANSPORT_TCP, 3088 .setup = xs_setup_tcp, 3089 }; 3090 3091 static struct xprt_class xs_bc_tcp_transport = { 3092 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list), 3093 .name = "tcp NFSv4.1 backchannel", 3094 .owner = THIS_MODULE, 3095 .ident = XPRT_TRANSPORT_BC_TCP, 3096 .setup = xs_setup_bc_tcp, 3097 }; 3098 3099 /** 3100 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client 3101 * 3102 */ 3103 int init_socket_xprt(void) 3104 { 3105 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3106 if (!sunrpc_table_header) 3107 sunrpc_table_header = register_sysctl_table(sunrpc_table); 3108 #endif 3109 3110 xprt_register_transport(&xs_local_transport); 3111 xprt_register_transport(&xs_udp_transport); 3112 xprt_register_transport(&xs_tcp_transport); 3113 xprt_register_transport(&xs_bc_tcp_transport); 3114 3115 return 0; 3116 } 3117 3118 /** 3119 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister 3120 * 3121 */ 3122 void cleanup_socket_xprt(void) 3123 { 3124 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3125 if (sunrpc_table_header) { 3126 unregister_sysctl_table(sunrpc_table_header); 3127 sunrpc_table_header = NULL; 3128 } 3129 #endif 3130 3131 xprt_unregister_transport(&xs_local_transport); 3132 xprt_unregister_transport(&xs_udp_transport); 3133 xprt_unregister_transport(&xs_tcp_transport); 3134 xprt_unregister_transport(&xs_bc_tcp_transport); 3135 } 3136 3137 static int param_set_uint_minmax(const char *val, 3138 const struct kernel_param *kp, 3139 unsigned int min, unsigned int max) 3140 { 3141 unsigned int num; 3142 int ret; 3143 3144 if (!val) 3145 return -EINVAL; 3146 ret = kstrtouint(val, 0, &num); 3147 if (ret == -EINVAL || num < min || num > max) 3148 return -EINVAL; 3149 *((unsigned int *)kp->arg) = num; 3150 return 0; 3151 } 3152 3153 static int param_set_portnr(const char *val, const struct kernel_param *kp) 3154 { 3155 return param_set_uint_minmax(val, kp, 3156 RPC_MIN_RESVPORT, 3157 RPC_MAX_RESVPORT); 3158 } 3159 3160 static const struct kernel_param_ops param_ops_portnr = { 3161 .set = param_set_portnr, 3162 .get = param_get_uint, 3163 }; 3164 3165 #define param_check_portnr(name, p) \ 3166 __param_check(name, p, unsigned int); 3167 3168 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644); 3169 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644); 3170 3171 static int param_set_slot_table_size(const char *val, 3172 const struct kernel_param *kp) 3173 { 3174 return param_set_uint_minmax(val, kp, 3175 RPC_MIN_SLOT_TABLE, 3176 RPC_MAX_SLOT_TABLE); 3177 } 3178 3179 static const struct kernel_param_ops param_ops_slot_table_size = { 3180 .set = param_set_slot_table_size, 3181 .get = param_get_uint, 3182 }; 3183 3184 #define param_check_slot_table_size(name, p) \ 3185 __param_check(name, p, unsigned int); 3186 3187 static int param_set_max_slot_table_size(const char *val, 3188 const struct kernel_param *kp) 3189 { 3190 return param_set_uint_minmax(val, kp, 3191 RPC_MIN_SLOT_TABLE, 3192 RPC_MAX_SLOT_TABLE_LIMIT); 3193 } 3194 3195 static const struct kernel_param_ops param_ops_max_slot_table_size = { 3196 .set = param_set_max_slot_table_size, 3197 .get = param_get_uint, 3198 }; 3199 3200 #define param_check_max_slot_table_size(name, p) \ 3201 __param_check(name, p, unsigned int); 3202 3203 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries, 3204 slot_table_size, 0644); 3205 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries, 3206 max_slot_table_size, 0644); 3207 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries, 3208 slot_table_size, 0644); 3209 3210