1 /* 2 * linux/net/sunrpc/svcsock.c 3 * 4 * These are the RPC server socket internals. 5 * 6 * The server scheduling algorithm does not always distribute the load 7 * evenly when servicing a single client. May need to modify the 8 * svc_sock_enqueue procedure... 9 * 10 * TCP support is largely untested and may be a little slow. The problem 11 * is that we currently do two separate recvfrom's, one for the 4-byte 12 * record length, and the second for the actual record. This could possibly 13 * be improved by always reading a minimum size of around 100 bytes and 14 * tucking any superfluous bytes away in a temporary store. Still, that 15 * leaves write requests out in the rain. An alternative may be to peek at 16 * the first skb in the queue, and if it matches the next TCP sequence 17 * number, to extract the record marker. Yuck. 18 * 19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 20 */ 21 22 #include <linux/sched.h> 23 #include <linux/errno.h> 24 #include <linux/fcntl.h> 25 #include <linux/net.h> 26 #include <linux/in.h> 27 #include <linux/inet.h> 28 #include <linux/udp.h> 29 #include <linux/tcp.h> 30 #include <linux/unistd.h> 31 #include <linux/slab.h> 32 #include <linux/netdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/file.h> 35 #include <linux/freezer.h> 36 #include <net/sock.h> 37 #include <net/checksum.h> 38 #include <net/ip.h> 39 #include <net/ipv6.h> 40 #include <net/tcp_states.h> 41 #include <asm/uaccess.h> 42 #include <asm/ioctls.h> 43 44 #include <linux/sunrpc/types.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/xdr.h> 47 #include <linux/sunrpc/svcsock.h> 48 #include <linux/sunrpc/stats.h> 49 50 /* SMP locking strategy: 51 * 52 * svc_pool->sp_lock protects most of the fields of that pool. 53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 54 * when both need to be taken (rare), svc_serv->sv_lock is first. 55 * BKL protects svc_serv->sv_nrthread. 56 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list 57 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 58 * 59 * Some flags can be set to certain values at any time 60 * providing that certain rules are followed: 61 * 62 * SK_CONN, SK_DATA, can be set or cleared at any time. 63 * after a set, svc_sock_enqueue must be called. 64 * after a clear, the socket must be read/accepted 65 * if this succeeds, it must be set again. 66 * SK_CLOSE can set at any time. It is never cleared. 67 * sk_inuse contains a bias of '1' until SK_DEAD is set. 68 * so when sk_inuse hits zero, we know the socket is dead 69 * and no-one is using it. 70 * SK_DEAD can only be set while SK_BUSY is held which ensures 71 * no other thread will be using the socket or will try to 72 * set SK_DEAD. 73 * 74 */ 75 76 #define RPCDBG_FACILITY RPCDBG_SVCSOCK 77 78 79 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 80 int *errp, int flags); 81 static void svc_delete_socket(struct svc_sock *svsk); 82 static void svc_udp_data_ready(struct sock *, int); 83 static int svc_udp_recvfrom(struct svc_rqst *); 84 static int svc_udp_sendto(struct svc_rqst *); 85 86 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); 87 static int svc_deferred_recv(struct svc_rqst *rqstp); 88 static struct cache_deferred_req *svc_defer(struct cache_req *req); 89 90 /* apparently the "standard" is that clients close 91 * idle connections after 5 minutes, servers after 92 * 6 minutes 93 * http://www.connectathon.org/talks96/nfstcp.pdf 94 */ 95 static int svc_conn_age_period = 6*60; 96 97 #ifdef CONFIG_DEBUG_LOCK_ALLOC 98 static struct lock_class_key svc_key[2]; 99 static struct lock_class_key svc_slock_key[2]; 100 101 static inline void svc_reclassify_socket(struct socket *sock) 102 { 103 struct sock *sk = sock->sk; 104 BUG_ON(sk->sk_lock.owner != NULL); 105 switch (sk->sk_family) { 106 case AF_INET: 107 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 108 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); 109 break; 110 111 case AF_INET6: 112 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", 113 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); 114 break; 115 116 default: 117 BUG(); 118 } 119 } 120 #else 121 static inline void svc_reclassify_socket(struct socket *sock) 122 { 123 } 124 #endif 125 126 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) 127 { 128 switch (addr->sa_family) { 129 case AF_INET: 130 snprintf(buf, len, "%u.%u.%u.%u, port=%u", 131 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 132 htons(((struct sockaddr_in *) addr)->sin_port)); 133 break; 134 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 135 case AF_INET6: 136 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 137 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 138 htons(((struct sockaddr_in6 *) addr)->sin6_port)); 139 break; 140 #endif 141 default: 142 snprintf(buf, len, "unknown address type: %d", addr->sa_family); 143 break; 144 } 145 return buf; 146 } 147 148 /** 149 * svc_print_addr - Format rq_addr field for printing 150 * @rqstp: svc_rqst struct containing address to print 151 * @buf: target buffer for formatted address 152 * @len: length of target buffer 153 * 154 */ 155 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 156 { 157 return __svc_print_addr(svc_addr(rqstp), buf, len); 158 } 159 EXPORT_SYMBOL_GPL(svc_print_addr); 160 161 /* 162 * Queue up an idle server thread. Must have pool->sp_lock held. 163 * Note: this is really a stack rather than a queue, so that we only 164 * use as many different threads as we need, and the rest don't pollute 165 * the cache. 166 */ 167 static inline void 168 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 169 { 170 list_add(&rqstp->rq_list, &pool->sp_threads); 171 } 172 173 /* 174 * Dequeue an nfsd thread. Must have pool->sp_lock held. 175 */ 176 static inline void 177 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 178 { 179 list_del(&rqstp->rq_list); 180 } 181 182 /* 183 * Release an skbuff after use 184 */ 185 static inline void 186 svc_release_skb(struct svc_rqst *rqstp) 187 { 188 struct sk_buff *skb = rqstp->rq_skbuff; 189 struct svc_deferred_req *dr = rqstp->rq_deferred; 190 191 if (skb) { 192 rqstp->rq_skbuff = NULL; 193 194 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 195 skb_free_datagram(rqstp->rq_sock->sk_sk, skb); 196 } 197 if (dr) { 198 rqstp->rq_deferred = NULL; 199 kfree(dr); 200 } 201 } 202 203 /* 204 * Any space to write? 205 */ 206 static inline unsigned long 207 svc_sock_wspace(struct svc_sock *svsk) 208 { 209 int wspace; 210 211 if (svsk->sk_sock->type == SOCK_STREAM) 212 wspace = sk_stream_wspace(svsk->sk_sk); 213 else 214 wspace = sock_wspace(svsk->sk_sk); 215 216 return wspace; 217 } 218 219 /* 220 * Queue up a socket with data pending. If there are idle nfsd 221 * processes, wake 'em up. 222 * 223 */ 224 static void 225 svc_sock_enqueue(struct svc_sock *svsk) 226 { 227 struct svc_serv *serv = svsk->sk_server; 228 struct svc_pool *pool; 229 struct svc_rqst *rqstp; 230 int cpu; 231 232 if (!(svsk->sk_flags & 233 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) 234 return; 235 if (test_bit(SK_DEAD, &svsk->sk_flags)) 236 return; 237 238 cpu = get_cpu(); 239 pool = svc_pool_for_cpu(svsk->sk_server, cpu); 240 put_cpu(); 241 242 spin_lock_bh(&pool->sp_lock); 243 244 if (!list_empty(&pool->sp_threads) && 245 !list_empty(&pool->sp_sockets)) 246 printk(KERN_ERR 247 "svc_sock_enqueue: threads and sockets both waiting??\n"); 248 249 if (test_bit(SK_DEAD, &svsk->sk_flags)) { 250 /* Don't enqueue dead sockets */ 251 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); 252 goto out_unlock; 253 } 254 255 /* Mark socket as busy. It will remain in this state until the 256 * server has processed all pending data and put the socket back 257 * on the idle list. We update SK_BUSY atomically because 258 * it also guards against trying to enqueue the svc_sock twice. 259 */ 260 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { 261 /* Don't enqueue socket while already enqueued */ 262 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 263 goto out_unlock; 264 } 265 BUG_ON(svsk->sk_pool != NULL); 266 svsk->sk_pool = pool; 267 268 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 269 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2 270 > svc_sock_wspace(svsk)) 271 && !test_bit(SK_CLOSE, &svsk->sk_flags) 272 && !test_bit(SK_CONN, &svsk->sk_flags)) { 273 /* Don't enqueue while not enough space for reply */ 274 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 275 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg, 276 svc_sock_wspace(svsk)); 277 svsk->sk_pool = NULL; 278 clear_bit(SK_BUSY, &svsk->sk_flags); 279 goto out_unlock; 280 } 281 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 282 283 284 if (!list_empty(&pool->sp_threads)) { 285 rqstp = list_entry(pool->sp_threads.next, 286 struct svc_rqst, 287 rq_list); 288 dprintk("svc: socket %p served by daemon %p\n", 289 svsk->sk_sk, rqstp); 290 svc_thread_dequeue(pool, rqstp); 291 if (rqstp->rq_sock) 292 printk(KERN_ERR 293 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 294 rqstp, rqstp->rq_sock); 295 rqstp->rq_sock = svsk; 296 atomic_inc(&svsk->sk_inuse); 297 rqstp->rq_reserved = serv->sv_max_mesg; 298 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 299 BUG_ON(svsk->sk_pool != pool); 300 wake_up(&rqstp->rq_wait); 301 } else { 302 dprintk("svc: socket %p put into queue\n", svsk->sk_sk); 303 list_add_tail(&svsk->sk_ready, &pool->sp_sockets); 304 BUG_ON(svsk->sk_pool != pool); 305 } 306 307 out_unlock: 308 spin_unlock_bh(&pool->sp_lock); 309 } 310 311 /* 312 * Dequeue the first socket. Must be called with the pool->sp_lock held. 313 */ 314 static inline struct svc_sock * 315 svc_sock_dequeue(struct svc_pool *pool) 316 { 317 struct svc_sock *svsk; 318 319 if (list_empty(&pool->sp_sockets)) 320 return NULL; 321 322 svsk = list_entry(pool->sp_sockets.next, 323 struct svc_sock, sk_ready); 324 list_del_init(&svsk->sk_ready); 325 326 dprintk("svc: socket %p dequeued, inuse=%d\n", 327 svsk->sk_sk, atomic_read(&svsk->sk_inuse)); 328 329 return svsk; 330 } 331 332 /* 333 * Having read something from a socket, check whether it 334 * needs to be re-enqueued. 335 * Note: SK_DATA only gets cleared when a read-attempt finds 336 * no (or insufficient) data. 337 */ 338 static inline void 339 svc_sock_received(struct svc_sock *svsk) 340 { 341 svsk->sk_pool = NULL; 342 clear_bit(SK_BUSY, &svsk->sk_flags); 343 svc_sock_enqueue(svsk); 344 } 345 346 347 /** 348 * svc_reserve - change the space reserved for the reply to a request. 349 * @rqstp: The request in question 350 * @space: new max space to reserve 351 * 352 * Each request reserves some space on the output queue of the socket 353 * to make sure the reply fits. This function reduces that reserved 354 * space to be the amount of space used already, plus @space. 355 * 356 */ 357 void svc_reserve(struct svc_rqst *rqstp, int space) 358 { 359 space += rqstp->rq_res.head[0].iov_len; 360 361 if (space < rqstp->rq_reserved) { 362 struct svc_sock *svsk = rqstp->rq_sock; 363 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); 364 rqstp->rq_reserved = space; 365 366 svc_sock_enqueue(svsk); 367 } 368 } 369 370 /* 371 * Release a socket after use. 372 */ 373 static inline void 374 svc_sock_put(struct svc_sock *svsk) 375 { 376 if (atomic_dec_and_test(&svsk->sk_inuse)) { 377 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags)); 378 379 dprintk("svc: releasing dead socket\n"); 380 if (svsk->sk_sock->file) 381 sockfd_put(svsk->sk_sock); 382 else 383 sock_release(svsk->sk_sock); 384 if (svsk->sk_info_authunix != NULL) 385 svcauth_unix_info_release(svsk->sk_info_authunix); 386 kfree(svsk); 387 } 388 } 389 390 static void 391 svc_sock_release(struct svc_rqst *rqstp) 392 { 393 struct svc_sock *svsk = rqstp->rq_sock; 394 395 svc_release_skb(rqstp); 396 397 svc_free_res_pages(rqstp); 398 rqstp->rq_res.page_len = 0; 399 rqstp->rq_res.page_base = 0; 400 401 402 /* Reset response buffer and release 403 * the reservation. 404 * But first, check that enough space was reserved 405 * for the reply, otherwise we have a bug! 406 */ 407 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 408 printk(KERN_ERR "RPC request reserved %d but used %d\n", 409 rqstp->rq_reserved, 410 rqstp->rq_res.len); 411 412 rqstp->rq_res.head[0].iov_len = 0; 413 svc_reserve(rqstp, 0); 414 rqstp->rq_sock = NULL; 415 416 svc_sock_put(svsk); 417 } 418 419 /* 420 * External function to wake up a server waiting for data 421 * This really only makes sense for services like lockd 422 * which have exactly one thread anyway. 423 */ 424 void 425 svc_wake_up(struct svc_serv *serv) 426 { 427 struct svc_rqst *rqstp; 428 unsigned int i; 429 struct svc_pool *pool; 430 431 for (i = 0; i < serv->sv_nrpools; i++) { 432 pool = &serv->sv_pools[i]; 433 434 spin_lock_bh(&pool->sp_lock); 435 if (!list_empty(&pool->sp_threads)) { 436 rqstp = list_entry(pool->sp_threads.next, 437 struct svc_rqst, 438 rq_list); 439 dprintk("svc: daemon %p woken up.\n", rqstp); 440 /* 441 svc_thread_dequeue(pool, rqstp); 442 rqstp->rq_sock = NULL; 443 */ 444 wake_up(&rqstp->rq_wait); 445 } 446 spin_unlock_bh(&pool->sp_lock); 447 } 448 } 449 450 union svc_pktinfo_u { 451 struct in_pktinfo pkti; 452 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 453 struct in6_pktinfo pkti6; 454 #endif 455 }; 456 457 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) 458 { 459 switch (rqstp->rq_sock->sk_sk->sk_family) { 460 case AF_INET: { 461 struct in_pktinfo *pki = CMSG_DATA(cmh); 462 463 cmh->cmsg_level = SOL_IP; 464 cmh->cmsg_type = IP_PKTINFO; 465 pki->ipi_ifindex = 0; 466 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; 467 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 468 } 469 break; 470 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 471 case AF_INET6: { 472 struct in6_pktinfo *pki = CMSG_DATA(cmh); 473 474 cmh->cmsg_level = SOL_IPV6; 475 cmh->cmsg_type = IPV6_PKTINFO; 476 pki->ipi6_ifindex = 0; 477 ipv6_addr_copy(&pki->ipi6_addr, 478 &rqstp->rq_daddr.addr6); 479 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 480 } 481 break; 482 #endif 483 } 484 return; 485 } 486 487 /* 488 * Generic sendto routine 489 */ 490 static int 491 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 492 { 493 struct svc_sock *svsk = rqstp->rq_sock; 494 struct socket *sock = svsk->sk_sock; 495 int slen; 496 char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; 497 struct cmsghdr *cmh = (struct cmsghdr *)buffer; 498 int len = 0; 499 int result; 500 int size; 501 struct page **ppage = xdr->pages; 502 size_t base = xdr->page_base; 503 unsigned int pglen = xdr->page_len; 504 unsigned int flags = MSG_MORE; 505 char buf[RPC_MAX_ADDRBUFLEN]; 506 507 slen = xdr->len; 508 509 if (rqstp->rq_prot == IPPROTO_UDP) { 510 struct msghdr msg = { 511 .msg_name = &rqstp->rq_addr, 512 .msg_namelen = rqstp->rq_addrlen, 513 .msg_control = cmh, 514 .msg_controllen = sizeof(buffer), 515 .msg_flags = MSG_MORE, 516 }; 517 518 svc_set_cmsg_data(rqstp, cmh); 519 520 if (sock_sendmsg(sock, &msg, 0) < 0) 521 goto out; 522 } 523 524 /* send head */ 525 if (slen == xdr->head[0].iov_len) 526 flags = 0; 527 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, 528 xdr->head[0].iov_len, flags); 529 if (len != xdr->head[0].iov_len) 530 goto out; 531 slen -= xdr->head[0].iov_len; 532 if (slen == 0) 533 goto out; 534 535 /* send page data */ 536 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; 537 while (pglen > 0) { 538 if (slen == size) 539 flags = 0; 540 result = kernel_sendpage(sock, *ppage, base, size, flags); 541 if (result > 0) 542 len += result; 543 if (result != size) 544 goto out; 545 slen -= size; 546 pglen -= size; 547 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; 548 base = 0; 549 ppage++; 550 } 551 /* send tail */ 552 if (xdr->tail[0].iov_len) { 553 result = kernel_sendpage(sock, rqstp->rq_respages[0], 554 ((unsigned long)xdr->tail[0].iov_base) 555 & (PAGE_SIZE-1), 556 xdr->tail[0].iov_len, 0); 557 558 if (result > 0) 559 len += result; 560 } 561 out: 562 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", 563 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, 564 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); 565 566 return len; 567 } 568 569 /* 570 * Report socket names for nfsdfs 571 */ 572 static int one_sock_name(char *buf, struct svc_sock *svsk) 573 { 574 int len; 575 576 switch(svsk->sk_sk->sk_family) { 577 case AF_INET: 578 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n", 579 svsk->sk_sk->sk_protocol==IPPROTO_UDP? 580 "udp" : "tcp", 581 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr), 582 inet_sk(svsk->sk_sk)->num); 583 break; 584 default: 585 len = sprintf(buf, "*unknown-%d*\n", 586 svsk->sk_sk->sk_family); 587 } 588 return len; 589 } 590 591 int 592 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) 593 { 594 struct svc_sock *svsk, *closesk = NULL; 595 int len = 0; 596 597 if (!serv) 598 return 0; 599 spin_lock_bh(&serv->sv_lock); 600 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { 601 int onelen = one_sock_name(buf+len, svsk); 602 if (toclose && strcmp(toclose, buf+len) == 0) 603 closesk = svsk; 604 else 605 len += onelen; 606 } 607 spin_unlock_bh(&serv->sv_lock); 608 if (closesk) 609 /* Should unregister with portmap, but you cannot 610 * unregister just one protocol... 611 */ 612 svc_close_socket(closesk); 613 else if (toclose) 614 return -ENOENT; 615 return len; 616 } 617 EXPORT_SYMBOL(svc_sock_names); 618 619 /* 620 * Check input queue length 621 */ 622 static int 623 svc_recv_available(struct svc_sock *svsk) 624 { 625 struct socket *sock = svsk->sk_sock; 626 int avail, err; 627 628 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); 629 630 return (err >= 0)? avail : err; 631 } 632 633 /* 634 * Generic recvfrom routine. 635 */ 636 static int 637 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) 638 { 639 struct svc_sock *svsk = rqstp->rq_sock; 640 struct msghdr msg = { 641 .msg_flags = MSG_DONTWAIT, 642 }; 643 int len; 644 645 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, 646 msg.msg_flags); 647 648 /* sock_recvmsg doesn't fill in the name/namelen, so we must.. 649 */ 650 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); 651 rqstp->rq_addrlen = svsk->sk_remotelen; 652 653 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 654 svsk, iov[0].iov_base, iov[0].iov_len, len); 655 656 return len; 657 } 658 659 /* 660 * Set socket snd and rcv buffer lengths 661 */ 662 static inline void 663 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) 664 { 665 #if 0 666 mm_segment_t oldfs; 667 oldfs = get_fs(); set_fs(KERNEL_DS); 668 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, 669 (char*)&snd, sizeof(snd)); 670 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, 671 (char*)&rcv, sizeof(rcv)); 672 #else 673 /* sock_setsockopt limits use to sysctl_?mem_max, 674 * which isn't acceptable. Until that is made conditional 675 * on not having CAP_SYS_RESOURCE or similar, we go direct... 676 * DaveM said I could! 677 */ 678 lock_sock(sock->sk); 679 sock->sk->sk_sndbuf = snd * 2; 680 sock->sk->sk_rcvbuf = rcv * 2; 681 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; 682 release_sock(sock->sk); 683 #endif 684 } 685 /* 686 * INET callback when data has been received on the socket. 687 */ 688 static void 689 svc_udp_data_ready(struct sock *sk, int count) 690 { 691 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 692 693 if (svsk) { 694 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 695 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); 696 set_bit(SK_DATA, &svsk->sk_flags); 697 svc_sock_enqueue(svsk); 698 } 699 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 700 wake_up_interruptible(sk->sk_sleep); 701 } 702 703 /* 704 * INET callback when space is newly available on the socket. 705 */ 706 static void 707 svc_write_space(struct sock *sk) 708 { 709 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); 710 711 if (svsk) { 712 dprintk("svc: socket %p(inet %p), write_space busy=%d\n", 713 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); 714 svc_sock_enqueue(svsk); 715 } 716 717 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { 718 dprintk("RPC svc_write_space: someone sleeping on %p\n", 719 svsk); 720 wake_up_interruptible(sk->sk_sleep); 721 } 722 } 723 724 static void svc_udp_get_sender_address(struct svc_rqst *rqstp, 725 struct sk_buff *skb) 726 { 727 switch (rqstp->rq_sock->sk_sk->sk_family) { 728 case AF_INET: { 729 /* this seems to come from net/ipv4/udp.c:udp_recvmsg */ 730 struct sockaddr_in *sin = svc_addr_in(rqstp); 731 732 sin->sin_family = AF_INET; 733 sin->sin_port = skb->h.uh->source; 734 sin->sin_addr.s_addr = skb->nh.iph->saddr; 735 rqstp->rq_addrlen = sizeof(struct sockaddr_in); 736 /* Remember which interface received this request */ 737 rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr; 738 } 739 break; 740 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 741 case AF_INET6: { 742 /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */ 743 struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp); 744 745 sin6->sin6_family = AF_INET6; 746 sin6->sin6_port = skb->h.uh->source; 747 sin6->sin6_flowinfo = 0; 748 sin6->sin6_scope_id = 0; 749 if (ipv6_addr_type(&sin6->sin6_addr) & 750 IPV6_ADDR_LINKLOCAL) 751 sin6->sin6_scope_id = IP6CB(skb)->iif; 752 ipv6_addr_copy(&sin6->sin6_addr, 753 &skb->nh.ipv6h->saddr); 754 rqstp->rq_addrlen = sizeof(struct sockaddr_in); 755 /* Remember which interface received this request */ 756 ipv6_addr_copy(&rqstp->rq_daddr.addr6, 757 &skb->nh.ipv6h->saddr); 758 } 759 break; 760 #endif 761 } 762 return; 763 } 764 765 /* 766 * Receive a datagram from a UDP socket. 767 */ 768 static int 769 svc_udp_recvfrom(struct svc_rqst *rqstp) 770 { 771 struct svc_sock *svsk = rqstp->rq_sock; 772 struct svc_serv *serv = svsk->sk_server; 773 struct sk_buff *skb; 774 int err, len; 775 776 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 777 /* udp sockets need large rcvbuf as all pending 778 * requests are still in that buffer. sndbuf must 779 * also be large enough that there is enough space 780 * for one reply per thread. We count all threads 781 * rather than threads in a particular pool, which 782 * provides an upper bound on the number of threads 783 * which will access the socket. 784 */ 785 svc_sock_setbufsize(svsk->sk_sock, 786 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 787 (serv->sv_nrthreads+3) * serv->sv_max_mesg); 788 789 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 790 svc_sock_received(svsk); 791 return svc_deferred_recv(rqstp); 792 } 793 794 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 795 svc_delete_socket(svsk); 796 return 0; 797 } 798 799 clear_bit(SK_DATA, &svsk->sk_flags); 800 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { 801 if (err == -EAGAIN) { 802 svc_sock_received(svsk); 803 return err; 804 } 805 /* possibly an icmp error */ 806 dprintk("svc: recvfrom returned error %d\n", -err); 807 } 808 if (skb->tstamp.off_sec == 0) { 809 struct timeval tv; 810 811 tv.tv_sec = xtime.tv_sec; 812 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 813 skb_set_timestamp(skb, &tv); 814 /* Don't enable netstamp, sunrpc doesn't 815 need that much accuracy */ 816 } 817 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); 818 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ 819 820 /* 821 * Maybe more packets - kick another thread ASAP. 822 */ 823 svc_sock_received(svsk); 824 825 len = skb->len - sizeof(struct udphdr); 826 rqstp->rq_arg.len = len; 827 828 rqstp->rq_prot = IPPROTO_UDP; 829 830 svc_udp_get_sender_address(rqstp, skb); 831 832 if (skb_is_nonlinear(skb)) { 833 /* we have to copy */ 834 local_bh_disable(); 835 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 836 local_bh_enable(); 837 /* checksum error */ 838 skb_free_datagram(svsk->sk_sk, skb); 839 return 0; 840 } 841 local_bh_enable(); 842 skb_free_datagram(svsk->sk_sk, skb); 843 } else { 844 /* we can use it in-place */ 845 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 846 rqstp->rq_arg.head[0].iov_len = len; 847 if (skb_checksum_complete(skb)) { 848 skb_free_datagram(svsk->sk_sk, skb); 849 return 0; 850 } 851 rqstp->rq_skbuff = skb; 852 } 853 854 rqstp->rq_arg.page_base = 0; 855 if (len <= rqstp->rq_arg.head[0].iov_len) { 856 rqstp->rq_arg.head[0].iov_len = len; 857 rqstp->rq_arg.page_len = 0; 858 rqstp->rq_respages = rqstp->rq_pages+1; 859 } else { 860 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 861 rqstp->rq_respages = rqstp->rq_pages + 1 + 862 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 863 } 864 865 if (serv->sv_stats) 866 serv->sv_stats->netudpcnt++; 867 868 return len; 869 } 870 871 static int 872 svc_udp_sendto(struct svc_rqst *rqstp) 873 { 874 int error; 875 876 error = svc_sendto(rqstp, &rqstp->rq_res); 877 if (error == -ECONNREFUSED) 878 /* ICMP error on earlier request. */ 879 error = svc_sendto(rqstp, &rqstp->rq_res); 880 881 return error; 882 } 883 884 static void 885 svc_udp_init(struct svc_sock *svsk) 886 { 887 svsk->sk_sk->sk_data_ready = svc_udp_data_ready; 888 svsk->sk_sk->sk_write_space = svc_write_space; 889 svsk->sk_recvfrom = svc_udp_recvfrom; 890 svsk->sk_sendto = svc_udp_sendto; 891 892 /* initialise setting must have enough space to 893 * receive and respond to one request. 894 * svc_udp_recvfrom will re-adjust if necessary 895 */ 896 svc_sock_setbufsize(svsk->sk_sock, 897 3 * svsk->sk_server->sv_max_mesg, 898 3 * svsk->sk_server->sv_max_mesg); 899 900 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 901 set_bit(SK_CHNGBUF, &svsk->sk_flags); 902 } 903 904 /* 905 * A data_ready event on a listening socket means there's a connection 906 * pending. Do not use state_change as a substitute for it. 907 */ 908 static void 909 svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 910 { 911 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 912 913 dprintk("svc: socket %p TCP (listen) state change %d\n", 914 sk, sk->sk_state); 915 916 /* 917 * This callback may called twice when a new connection 918 * is established as a child socket inherits everything 919 * from a parent LISTEN socket. 920 * 1) data_ready method of the parent socket will be called 921 * when one of child sockets become ESTABLISHED. 922 * 2) data_ready method of the child socket may be called 923 * when it receives data before the socket is accepted. 924 * In case of 2, we should ignore it silently. 925 */ 926 if (sk->sk_state == TCP_LISTEN) { 927 if (svsk) { 928 set_bit(SK_CONN, &svsk->sk_flags); 929 svc_sock_enqueue(svsk); 930 } else 931 printk("svc: socket %p: no user data\n", sk); 932 } 933 934 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 935 wake_up_interruptible_all(sk->sk_sleep); 936 } 937 938 /* 939 * A state change on a connected socket means it's dying or dead. 940 */ 941 static void 942 svc_tcp_state_change(struct sock *sk) 943 { 944 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 945 946 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", 947 sk, sk->sk_state, sk->sk_user_data); 948 949 if (!svsk) 950 printk("svc: socket %p: no user data\n", sk); 951 else { 952 set_bit(SK_CLOSE, &svsk->sk_flags); 953 svc_sock_enqueue(svsk); 954 } 955 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 956 wake_up_interruptible_all(sk->sk_sleep); 957 } 958 959 static void 960 svc_tcp_data_ready(struct sock *sk, int count) 961 { 962 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 963 964 dprintk("svc: socket %p TCP data ready (svsk %p)\n", 965 sk, sk->sk_user_data); 966 if (svsk) { 967 set_bit(SK_DATA, &svsk->sk_flags); 968 svc_sock_enqueue(svsk); 969 } 970 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 971 wake_up_interruptible(sk->sk_sleep); 972 } 973 974 static inline int svc_port_is_privileged(struct sockaddr *sin) 975 { 976 switch (sin->sa_family) { 977 case AF_INET: 978 return ntohs(((struct sockaddr_in *)sin)->sin_port) 979 < PROT_SOCK; 980 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 981 case AF_INET6: 982 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 983 < PROT_SOCK; 984 #endif 985 default: 986 return 0; 987 } 988 } 989 990 /* 991 * Accept a TCP connection 992 */ 993 static void 994 svc_tcp_accept(struct svc_sock *svsk) 995 { 996 struct sockaddr_storage addr; 997 struct sockaddr *sin = (struct sockaddr *) &addr; 998 struct svc_serv *serv = svsk->sk_server; 999 struct socket *sock = svsk->sk_sock; 1000 struct socket *newsock; 1001 struct svc_sock *newsvsk; 1002 int err, slen; 1003 char buf[RPC_MAX_ADDRBUFLEN]; 1004 1005 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); 1006 if (!sock) 1007 return; 1008 1009 clear_bit(SK_CONN, &svsk->sk_flags); 1010 err = kernel_accept(sock, &newsock, O_NONBLOCK); 1011 if (err < 0) { 1012 if (err == -ENOMEM) 1013 printk(KERN_WARNING "%s: no more sockets!\n", 1014 serv->sv_name); 1015 else if (err != -EAGAIN && net_ratelimit()) 1016 printk(KERN_WARNING "%s: accept failed (err %d)!\n", 1017 serv->sv_name, -err); 1018 return; 1019 } 1020 1021 set_bit(SK_CONN, &svsk->sk_flags); 1022 svc_sock_enqueue(svsk); 1023 1024 err = kernel_getpeername(newsock, sin, &slen); 1025 if (err < 0) { 1026 if (net_ratelimit()) 1027 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 1028 serv->sv_name, -err); 1029 goto failed; /* aborted connection or whatever */ 1030 } 1031 1032 /* Ideally, we would want to reject connections from unauthorized 1033 * hosts here, but when we get encryption, the IP of the host won't 1034 * tell us anything. For now just warn about unpriv connections. 1035 */ 1036 if (!svc_port_is_privileged(sin)) { 1037 dprintk(KERN_WARNING 1038 "%s: connect from unprivileged port: %s\n", 1039 serv->sv_name, 1040 __svc_print_addr(sin, buf, sizeof(buf))); 1041 } 1042 dprintk("%s: connect from %s\n", serv->sv_name, 1043 __svc_print_addr(sin, buf, sizeof(buf))); 1044 1045 /* make sure that a write doesn't block forever when 1046 * low on memory 1047 */ 1048 newsock->sk->sk_sndtimeo = HZ*30; 1049 1050 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 1051 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) 1052 goto failed; 1053 memcpy(&newsvsk->sk_remote, sin, slen); 1054 newsvsk->sk_remotelen = slen; 1055 1056 svc_sock_received(newsvsk); 1057 1058 /* make sure that we don't have too many active connections. 1059 * If we have, something must be dropped. 1060 * 1061 * There's no point in trying to do random drop here for 1062 * DoS prevention. The NFS clients does 1 reconnect in 15 1063 * seconds. An attacker can easily beat that. 1064 * 1065 * The only somewhat efficient mechanism would be if drop 1066 * old connections from the same IP first. But right now 1067 * we don't even record the client IP in svc_sock. 1068 */ 1069 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 1070 struct svc_sock *svsk = NULL; 1071 spin_lock_bh(&serv->sv_lock); 1072 if (!list_empty(&serv->sv_tempsocks)) { 1073 if (net_ratelimit()) { 1074 /* Try to help the admin */ 1075 printk(KERN_NOTICE "%s: too many open TCP " 1076 "sockets, consider increasing the " 1077 "number of nfsd threads\n", 1078 serv->sv_name); 1079 printk(KERN_NOTICE 1080 "%s: last TCP connect from %s\n", 1081 serv->sv_name, buf); 1082 } 1083 /* 1084 * Always select the oldest socket. It's not fair, 1085 * but so is life 1086 */ 1087 svsk = list_entry(serv->sv_tempsocks.prev, 1088 struct svc_sock, 1089 sk_list); 1090 set_bit(SK_CLOSE, &svsk->sk_flags); 1091 atomic_inc(&svsk->sk_inuse); 1092 } 1093 spin_unlock_bh(&serv->sv_lock); 1094 1095 if (svsk) { 1096 svc_sock_enqueue(svsk); 1097 svc_sock_put(svsk); 1098 } 1099 1100 } 1101 1102 if (serv->sv_stats) 1103 serv->sv_stats->nettcpconn++; 1104 1105 return; 1106 1107 failed: 1108 sock_release(newsock); 1109 return; 1110 } 1111 1112 /* 1113 * Receive data from a TCP socket. 1114 */ 1115 static int 1116 svc_tcp_recvfrom(struct svc_rqst *rqstp) 1117 { 1118 struct svc_sock *svsk = rqstp->rq_sock; 1119 struct svc_serv *serv = svsk->sk_server; 1120 int len; 1121 struct kvec *vec; 1122 int pnum, vlen; 1123 1124 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 1125 svsk, test_bit(SK_DATA, &svsk->sk_flags), 1126 test_bit(SK_CONN, &svsk->sk_flags), 1127 test_bit(SK_CLOSE, &svsk->sk_flags)); 1128 1129 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 1130 svc_sock_received(svsk); 1131 return svc_deferred_recv(rqstp); 1132 } 1133 1134 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 1135 svc_delete_socket(svsk); 1136 return 0; 1137 } 1138 1139 if (svsk->sk_sk->sk_state == TCP_LISTEN) { 1140 svc_tcp_accept(svsk); 1141 svc_sock_received(svsk); 1142 return 0; 1143 } 1144 1145 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 1146 /* sndbuf needs to have room for one request 1147 * per thread, otherwise we can stall even when the 1148 * network isn't a bottleneck. 1149 * 1150 * We count all threads rather than threads in a 1151 * particular pool, which provides an upper bound 1152 * on the number of threads which will access the socket. 1153 * 1154 * rcvbuf just needs to be able to hold a few requests. 1155 * Normally they will be removed from the queue 1156 * as soon a a complete request arrives. 1157 */ 1158 svc_sock_setbufsize(svsk->sk_sock, 1159 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 1160 3 * serv->sv_max_mesg); 1161 1162 clear_bit(SK_DATA, &svsk->sk_flags); 1163 1164 /* Receive data. If we haven't got the record length yet, get 1165 * the next four bytes. Otherwise try to gobble up as much as 1166 * possible up to the complete record length. 1167 */ 1168 if (svsk->sk_tcplen < 4) { 1169 unsigned long want = 4 - svsk->sk_tcplen; 1170 struct kvec iov; 1171 1172 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 1173 iov.iov_len = want; 1174 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) 1175 goto error; 1176 svsk->sk_tcplen += len; 1177 1178 if (len < want) { 1179 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1180 len, want); 1181 svc_sock_received(svsk); 1182 return -EAGAIN; /* record header not complete */ 1183 } 1184 1185 svsk->sk_reclen = ntohl(svsk->sk_reclen); 1186 if (!(svsk->sk_reclen & 0x80000000)) { 1187 /* FIXME: technically, a record can be fragmented, 1188 * and non-terminal fragments will not have the top 1189 * bit set in the fragment length header. 1190 * But apparently no known nfs clients send fragmented 1191 * records. */ 1192 if (net_ratelimit()) 1193 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1194 " (non-terminal)\n", 1195 (unsigned long) svsk->sk_reclen); 1196 goto err_delete; 1197 } 1198 svsk->sk_reclen &= 0x7fffffff; 1199 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1200 if (svsk->sk_reclen > serv->sv_max_mesg) { 1201 if (net_ratelimit()) 1202 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1203 " (large)\n", 1204 (unsigned long) svsk->sk_reclen); 1205 goto err_delete; 1206 } 1207 } 1208 1209 /* Check whether enough data is available */ 1210 len = svc_recv_available(svsk); 1211 if (len < 0) 1212 goto error; 1213 1214 if (len < svsk->sk_reclen) { 1215 dprintk("svc: incomplete TCP record (%d of %d)\n", 1216 len, svsk->sk_reclen); 1217 svc_sock_received(svsk); 1218 return -EAGAIN; /* record not complete */ 1219 } 1220 len = svsk->sk_reclen; 1221 set_bit(SK_DATA, &svsk->sk_flags); 1222 1223 vec = rqstp->rq_vec; 1224 vec[0] = rqstp->rq_arg.head[0]; 1225 vlen = PAGE_SIZE; 1226 pnum = 1; 1227 while (vlen < len) { 1228 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); 1229 vec[pnum].iov_len = PAGE_SIZE; 1230 pnum++; 1231 vlen += PAGE_SIZE; 1232 } 1233 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1234 1235 /* Now receive data */ 1236 len = svc_recvfrom(rqstp, vec, pnum, len); 1237 if (len < 0) 1238 goto error; 1239 1240 dprintk("svc: TCP complete record (%d bytes)\n", len); 1241 rqstp->rq_arg.len = len; 1242 rqstp->rq_arg.page_base = 0; 1243 if (len <= rqstp->rq_arg.head[0].iov_len) { 1244 rqstp->rq_arg.head[0].iov_len = len; 1245 rqstp->rq_arg.page_len = 0; 1246 } else { 1247 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 1248 } 1249 1250 rqstp->rq_skbuff = NULL; 1251 rqstp->rq_prot = IPPROTO_TCP; 1252 1253 /* Reset TCP read info */ 1254 svsk->sk_reclen = 0; 1255 svsk->sk_tcplen = 0; 1256 1257 svc_sock_received(svsk); 1258 if (serv->sv_stats) 1259 serv->sv_stats->nettcpcnt++; 1260 1261 return len; 1262 1263 err_delete: 1264 svc_delete_socket(svsk); 1265 return -EAGAIN; 1266 1267 error: 1268 if (len == -EAGAIN) { 1269 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1270 svc_sock_received(svsk); 1271 } else { 1272 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1273 svsk->sk_server->sv_name, -len); 1274 goto err_delete; 1275 } 1276 1277 return len; 1278 } 1279 1280 /* 1281 * Send out data on TCP socket. 1282 */ 1283 static int 1284 svc_tcp_sendto(struct svc_rqst *rqstp) 1285 { 1286 struct xdr_buf *xbufp = &rqstp->rq_res; 1287 int sent; 1288 __be32 reclen; 1289 1290 /* Set up the first element of the reply kvec. 1291 * Any other kvecs that may be in use have been taken 1292 * care of by the server implementation itself. 1293 */ 1294 reclen = htonl(0x80000000|((xbufp->len ) - 4)); 1295 memcpy(xbufp->head[0].iov_base, &reclen, 4); 1296 1297 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) 1298 return -ENOTCONN; 1299 1300 sent = svc_sendto(rqstp, &rqstp->rq_res); 1301 if (sent != xbufp->len) { 1302 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", 1303 rqstp->rq_sock->sk_server->sv_name, 1304 (sent<0)?"got error":"sent only", 1305 sent, xbufp->len); 1306 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags); 1307 svc_sock_enqueue(rqstp->rq_sock); 1308 sent = -EAGAIN; 1309 } 1310 return sent; 1311 } 1312 1313 static void 1314 svc_tcp_init(struct svc_sock *svsk) 1315 { 1316 struct sock *sk = svsk->sk_sk; 1317 struct tcp_sock *tp = tcp_sk(sk); 1318 1319 svsk->sk_recvfrom = svc_tcp_recvfrom; 1320 svsk->sk_sendto = svc_tcp_sendto; 1321 1322 if (sk->sk_state == TCP_LISTEN) { 1323 dprintk("setting up TCP socket for listening\n"); 1324 sk->sk_data_ready = svc_tcp_listen_data_ready; 1325 set_bit(SK_CONN, &svsk->sk_flags); 1326 } else { 1327 dprintk("setting up TCP socket for reading\n"); 1328 sk->sk_state_change = svc_tcp_state_change; 1329 sk->sk_data_ready = svc_tcp_data_ready; 1330 sk->sk_write_space = svc_write_space; 1331 1332 svsk->sk_reclen = 0; 1333 svsk->sk_tcplen = 0; 1334 1335 tp->nonagle = 1; /* disable Nagle's algorithm */ 1336 1337 /* initialise setting must have enough space to 1338 * receive and respond to one request. 1339 * svc_tcp_recvfrom will re-adjust if necessary 1340 */ 1341 svc_sock_setbufsize(svsk->sk_sock, 1342 3 * svsk->sk_server->sv_max_mesg, 1343 3 * svsk->sk_server->sv_max_mesg); 1344 1345 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1346 set_bit(SK_DATA, &svsk->sk_flags); 1347 if (sk->sk_state != TCP_ESTABLISHED) 1348 set_bit(SK_CLOSE, &svsk->sk_flags); 1349 } 1350 } 1351 1352 void 1353 svc_sock_update_bufs(struct svc_serv *serv) 1354 { 1355 /* 1356 * The number of server threads has changed. Update 1357 * rcvbuf and sndbuf accordingly on all sockets 1358 */ 1359 struct list_head *le; 1360 1361 spin_lock_bh(&serv->sv_lock); 1362 list_for_each(le, &serv->sv_permsocks) { 1363 struct svc_sock *svsk = 1364 list_entry(le, struct svc_sock, sk_list); 1365 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1366 } 1367 list_for_each(le, &serv->sv_tempsocks) { 1368 struct svc_sock *svsk = 1369 list_entry(le, struct svc_sock, sk_list); 1370 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1371 } 1372 spin_unlock_bh(&serv->sv_lock); 1373 } 1374 1375 /* 1376 * Receive the next request on any socket. This code is carefully 1377 * organised not to touch any cachelines in the shared svc_serv 1378 * structure, only cachelines in the local svc_pool. 1379 */ 1380 int 1381 svc_recv(struct svc_rqst *rqstp, long timeout) 1382 { 1383 struct svc_sock *svsk = NULL; 1384 struct svc_serv *serv = rqstp->rq_server; 1385 struct svc_pool *pool = rqstp->rq_pool; 1386 int len, i; 1387 int pages; 1388 struct xdr_buf *arg; 1389 DECLARE_WAITQUEUE(wait, current); 1390 1391 dprintk("svc: server %p waiting for data (to = %ld)\n", 1392 rqstp, timeout); 1393 1394 if (rqstp->rq_sock) 1395 printk(KERN_ERR 1396 "svc_recv: service %p, socket not NULL!\n", 1397 rqstp); 1398 if (waitqueue_active(&rqstp->rq_wait)) 1399 printk(KERN_ERR 1400 "svc_recv: service %p, wait queue active!\n", 1401 rqstp); 1402 1403 1404 /* now allocate needed pages. If we get a failure, sleep briefly */ 1405 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 1406 for (i=0; i < pages ; i++) 1407 while (rqstp->rq_pages[i] == NULL) { 1408 struct page *p = alloc_page(GFP_KERNEL); 1409 if (!p) 1410 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1411 rqstp->rq_pages[i] = p; 1412 } 1413 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 1414 BUG_ON(pages >= RPCSVC_MAXPAGES); 1415 1416 /* Make arg->head point to first page and arg->pages point to rest */ 1417 arg = &rqstp->rq_arg; 1418 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 1419 arg->head[0].iov_len = PAGE_SIZE; 1420 arg->pages = rqstp->rq_pages + 1; 1421 arg->page_base = 0; 1422 /* save at least one page for response */ 1423 arg->page_len = (pages-2)*PAGE_SIZE; 1424 arg->len = (pages-1)*PAGE_SIZE; 1425 arg->tail[0].iov_len = 0; 1426 1427 try_to_freeze(); 1428 cond_resched(); 1429 if (signalled()) 1430 return -EINTR; 1431 1432 spin_lock_bh(&pool->sp_lock); 1433 if ((svsk = svc_sock_dequeue(pool)) != NULL) { 1434 rqstp->rq_sock = svsk; 1435 atomic_inc(&svsk->sk_inuse); 1436 rqstp->rq_reserved = serv->sv_max_mesg; 1437 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1438 } else { 1439 /* No data pending. Go to sleep */ 1440 svc_thread_enqueue(pool, rqstp); 1441 1442 /* 1443 * We have to be able to interrupt this wait 1444 * to bring down the daemons ... 1445 */ 1446 set_current_state(TASK_INTERRUPTIBLE); 1447 add_wait_queue(&rqstp->rq_wait, &wait); 1448 spin_unlock_bh(&pool->sp_lock); 1449 1450 schedule_timeout(timeout); 1451 1452 try_to_freeze(); 1453 1454 spin_lock_bh(&pool->sp_lock); 1455 remove_wait_queue(&rqstp->rq_wait, &wait); 1456 1457 if (!(svsk = rqstp->rq_sock)) { 1458 svc_thread_dequeue(pool, rqstp); 1459 spin_unlock_bh(&pool->sp_lock); 1460 dprintk("svc: server %p, no data yet\n", rqstp); 1461 return signalled()? -EINTR : -EAGAIN; 1462 } 1463 } 1464 spin_unlock_bh(&pool->sp_lock); 1465 1466 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", 1467 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse)); 1468 len = svsk->sk_recvfrom(rqstp); 1469 dprintk("svc: got len=%d\n", len); 1470 1471 /* No data, incomplete (TCP) read, or accept() */ 1472 if (len == 0 || len == -EAGAIN) { 1473 rqstp->rq_res.len = 0; 1474 svc_sock_release(rqstp); 1475 return -EAGAIN; 1476 } 1477 svsk->sk_lastrecv = get_seconds(); 1478 clear_bit(SK_OLD, &svsk->sk_flags); 1479 1480 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 1481 rqstp->rq_chandle.defer = svc_defer; 1482 1483 if (serv->sv_stats) 1484 serv->sv_stats->netcnt++; 1485 return len; 1486 } 1487 1488 /* 1489 * Drop request 1490 */ 1491 void 1492 svc_drop(struct svc_rqst *rqstp) 1493 { 1494 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); 1495 svc_sock_release(rqstp); 1496 } 1497 1498 /* 1499 * Return reply to client. 1500 */ 1501 int 1502 svc_send(struct svc_rqst *rqstp) 1503 { 1504 struct svc_sock *svsk; 1505 int len; 1506 struct xdr_buf *xb; 1507 1508 if ((svsk = rqstp->rq_sock) == NULL) { 1509 printk(KERN_WARNING "NULL socket pointer in %s:%d\n", 1510 __FILE__, __LINE__); 1511 return -EFAULT; 1512 } 1513 1514 /* release the receive skb before sending the reply */ 1515 svc_release_skb(rqstp); 1516 1517 /* calculate over-all length */ 1518 xb = & rqstp->rq_res; 1519 xb->len = xb->head[0].iov_len + 1520 xb->page_len + 1521 xb->tail[0].iov_len; 1522 1523 /* Grab svsk->sk_mutex to serialize outgoing data. */ 1524 mutex_lock(&svsk->sk_mutex); 1525 if (test_bit(SK_DEAD, &svsk->sk_flags)) 1526 len = -ENOTCONN; 1527 else 1528 len = svsk->sk_sendto(rqstp); 1529 mutex_unlock(&svsk->sk_mutex); 1530 svc_sock_release(rqstp); 1531 1532 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 1533 return 0; 1534 return len; 1535 } 1536 1537 /* 1538 * Timer function to close old temporary sockets, using 1539 * a mark-and-sweep algorithm. 1540 */ 1541 static void 1542 svc_age_temp_sockets(unsigned long closure) 1543 { 1544 struct svc_serv *serv = (struct svc_serv *)closure; 1545 struct svc_sock *svsk; 1546 struct list_head *le, *next; 1547 LIST_HEAD(to_be_aged); 1548 1549 dprintk("svc_age_temp_sockets\n"); 1550 1551 if (!spin_trylock_bh(&serv->sv_lock)) { 1552 /* busy, try again 1 sec later */ 1553 dprintk("svc_age_temp_sockets: busy\n"); 1554 mod_timer(&serv->sv_temptimer, jiffies + HZ); 1555 return; 1556 } 1557 1558 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1559 svsk = list_entry(le, struct svc_sock, sk_list); 1560 1561 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1562 continue; 1563 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) 1564 continue; 1565 atomic_inc(&svsk->sk_inuse); 1566 list_move(le, &to_be_aged); 1567 set_bit(SK_CLOSE, &svsk->sk_flags); 1568 set_bit(SK_DETACHED, &svsk->sk_flags); 1569 } 1570 spin_unlock_bh(&serv->sv_lock); 1571 1572 while (!list_empty(&to_be_aged)) { 1573 le = to_be_aged.next; 1574 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ 1575 list_del_init(le); 1576 svsk = list_entry(le, struct svc_sock, sk_list); 1577 1578 dprintk("queuing svsk %p for closing, %lu seconds old\n", 1579 svsk, get_seconds() - svsk->sk_lastrecv); 1580 1581 /* a thread will dequeue and close it soon */ 1582 svc_sock_enqueue(svsk); 1583 svc_sock_put(svsk); 1584 } 1585 1586 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 1587 } 1588 1589 /* 1590 * Initialize socket for RPC use and create svc_sock struct 1591 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. 1592 */ 1593 static struct svc_sock *svc_setup_socket(struct svc_serv *serv, 1594 struct socket *sock, 1595 int *errp, int flags) 1596 { 1597 struct svc_sock *svsk; 1598 struct sock *inet; 1599 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); 1600 int is_temporary = flags & SVC_SOCK_TEMPORARY; 1601 1602 dprintk("svc: svc_setup_socket %p\n", sock); 1603 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1604 *errp = -ENOMEM; 1605 return NULL; 1606 } 1607 1608 inet = sock->sk; 1609 1610 /* Register socket with portmapper */ 1611 if (*errp >= 0 && pmap_register) 1612 *errp = svc_register(serv, inet->sk_protocol, 1613 ntohs(inet_sk(inet)->sport)); 1614 1615 if (*errp < 0) { 1616 kfree(svsk); 1617 return NULL; 1618 } 1619 1620 set_bit(SK_BUSY, &svsk->sk_flags); 1621 inet->sk_user_data = svsk; 1622 svsk->sk_sock = sock; 1623 svsk->sk_sk = inet; 1624 svsk->sk_ostate = inet->sk_state_change; 1625 svsk->sk_odata = inet->sk_data_ready; 1626 svsk->sk_owspace = inet->sk_write_space; 1627 svsk->sk_server = serv; 1628 atomic_set(&svsk->sk_inuse, 1); 1629 svsk->sk_lastrecv = get_seconds(); 1630 spin_lock_init(&svsk->sk_defer_lock); 1631 INIT_LIST_HEAD(&svsk->sk_deferred); 1632 INIT_LIST_HEAD(&svsk->sk_ready); 1633 mutex_init(&svsk->sk_mutex); 1634 1635 /* Initialize the socket */ 1636 if (sock->type == SOCK_DGRAM) 1637 svc_udp_init(svsk); 1638 else 1639 svc_tcp_init(svsk); 1640 1641 spin_lock_bh(&serv->sv_lock); 1642 if (is_temporary) { 1643 set_bit(SK_TEMP, &svsk->sk_flags); 1644 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1645 serv->sv_tmpcnt++; 1646 if (serv->sv_temptimer.function == NULL) { 1647 /* setup timer to age temp sockets */ 1648 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets, 1649 (unsigned long)serv); 1650 mod_timer(&serv->sv_temptimer, 1651 jiffies + svc_conn_age_period * HZ); 1652 } 1653 } else { 1654 clear_bit(SK_TEMP, &svsk->sk_flags); 1655 list_add(&svsk->sk_list, &serv->sv_permsocks); 1656 } 1657 spin_unlock_bh(&serv->sv_lock); 1658 1659 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1660 svsk, svsk->sk_sk); 1661 1662 return svsk; 1663 } 1664 1665 int svc_addsock(struct svc_serv *serv, 1666 int fd, 1667 char *name_return, 1668 int *proto) 1669 { 1670 int err = 0; 1671 struct socket *so = sockfd_lookup(fd, &err); 1672 struct svc_sock *svsk = NULL; 1673 1674 if (!so) 1675 return err; 1676 if (so->sk->sk_family != AF_INET) 1677 err = -EAFNOSUPPORT; 1678 else if (so->sk->sk_protocol != IPPROTO_TCP && 1679 so->sk->sk_protocol != IPPROTO_UDP) 1680 err = -EPROTONOSUPPORT; 1681 else if (so->state > SS_UNCONNECTED) 1682 err = -EISCONN; 1683 else { 1684 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); 1685 if (svsk) { 1686 svc_sock_received(svsk); 1687 err = 0; 1688 } 1689 } 1690 if (err) { 1691 sockfd_put(so); 1692 return err; 1693 } 1694 if (proto) *proto = so->sk->sk_protocol; 1695 return one_sock_name(name_return, svsk); 1696 } 1697 EXPORT_SYMBOL_GPL(svc_addsock); 1698 1699 /* 1700 * Create socket for RPC service. 1701 */ 1702 static int svc_create_socket(struct svc_serv *serv, int protocol, 1703 struct sockaddr *sin, int len, int flags) 1704 { 1705 struct svc_sock *svsk; 1706 struct socket *sock; 1707 int error; 1708 int type; 1709 char buf[RPC_MAX_ADDRBUFLEN]; 1710 1711 dprintk("svc: svc_create_socket(%s, %d, %s)\n", 1712 serv->sv_program->pg_name, protocol, 1713 __svc_print_addr(sin, buf, sizeof(buf))); 1714 1715 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { 1716 printk(KERN_WARNING "svc: only UDP and TCP " 1717 "sockets supported\n"); 1718 return -EINVAL; 1719 } 1720 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; 1721 1722 error = sock_create_kern(sin->sa_family, type, protocol, &sock); 1723 if (error < 0) 1724 return error; 1725 1726 svc_reclassify_socket(sock); 1727 1728 if (type == SOCK_STREAM) 1729 sock->sk->sk_reuse = 1; /* allow address reuse */ 1730 error = kernel_bind(sock, sin, len); 1731 if (error < 0) 1732 goto bummer; 1733 1734 if (protocol == IPPROTO_TCP) { 1735 if ((error = kernel_listen(sock, 64)) < 0) 1736 goto bummer; 1737 } 1738 1739 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { 1740 svc_sock_received(svsk); 1741 return ntohs(inet_sk(svsk->sk_sk)->sport); 1742 } 1743 1744 bummer: 1745 dprintk("svc: svc_create_socket error = %d\n", -error); 1746 sock_release(sock); 1747 return error; 1748 } 1749 1750 /* 1751 * Remove a dead socket 1752 */ 1753 static void 1754 svc_delete_socket(struct svc_sock *svsk) 1755 { 1756 struct svc_serv *serv; 1757 struct sock *sk; 1758 1759 dprintk("svc: svc_delete_socket(%p)\n", svsk); 1760 1761 serv = svsk->sk_server; 1762 sk = svsk->sk_sk; 1763 1764 sk->sk_state_change = svsk->sk_ostate; 1765 sk->sk_data_ready = svsk->sk_odata; 1766 sk->sk_write_space = svsk->sk_owspace; 1767 1768 spin_lock_bh(&serv->sv_lock); 1769 1770 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1771 list_del_init(&svsk->sk_list); 1772 /* 1773 * We used to delete the svc_sock from whichever list 1774 * it's sk_ready node was on, but we don't actually 1775 * need to. This is because the only time we're called 1776 * while still attached to a queue, the queue itself 1777 * is about to be destroyed (in svc_destroy). 1778 */ 1779 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) { 1780 BUG_ON(atomic_read(&svsk->sk_inuse)<2); 1781 atomic_dec(&svsk->sk_inuse); 1782 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1783 serv->sv_tmpcnt--; 1784 } 1785 1786 spin_unlock_bh(&serv->sv_lock); 1787 } 1788 1789 void svc_close_socket(struct svc_sock *svsk) 1790 { 1791 set_bit(SK_CLOSE, &svsk->sk_flags); 1792 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) 1793 /* someone else will have to effect the close */ 1794 return; 1795 1796 atomic_inc(&svsk->sk_inuse); 1797 svc_delete_socket(svsk); 1798 clear_bit(SK_BUSY, &svsk->sk_flags); 1799 svc_sock_put(svsk); 1800 } 1801 1802 /** 1803 * svc_makesock - Make a socket for nfsd and lockd 1804 * @serv: RPC server structure 1805 * @protocol: transport protocol to use 1806 * @port: port to use 1807 * @flags: requested socket characteristics 1808 * 1809 */ 1810 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port, 1811 int flags) 1812 { 1813 struct sockaddr_in sin = { 1814 .sin_family = AF_INET, 1815 .sin_addr.s_addr = INADDR_ANY, 1816 .sin_port = htons(port), 1817 }; 1818 1819 dprintk("svc: creating socket proto = %d\n", protocol); 1820 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin, 1821 sizeof(sin), flags); 1822 } 1823 1824 /* 1825 * Handle defer and revisit of requests 1826 */ 1827 1828 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1829 { 1830 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1831 struct svc_sock *svsk; 1832 1833 if (too_many) { 1834 svc_sock_put(dr->svsk); 1835 kfree(dr); 1836 return; 1837 } 1838 dprintk("revisit queued\n"); 1839 svsk = dr->svsk; 1840 dr->svsk = NULL; 1841 spin_lock_bh(&svsk->sk_defer_lock); 1842 list_add(&dr->handle.recent, &svsk->sk_deferred); 1843 spin_unlock_bh(&svsk->sk_defer_lock); 1844 set_bit(SK_DEFERRED, &svsk->sk_flags); 1845 svc_sock_enqueue(svsk); 1846 svc_sock_put(svsk); 1847 } 1848 1849 static struct cache_deferred_req * 1850 svc_defer(struct cache_req *req) 1851 { 1852 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1853 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); 1854 struct svc_deferred_req *dr; 1855 1856 if (rqstp->rq_arg.page_len) 1857 return NULL; /* if more than a page, give up FIXME */ 1858 if (rqstp->rq_deferred) { 1859 dr = rqstp->rq_deferred; 1860 rqstp->rq_deferred = NULL; 1861 } else { 1862 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1863 /* FIXME maybe discard if size too large */ 1864 dr = kmalloc(size, GFP_KERNEL); 1865 if (dr == NULL) 1866 return NULL; 1867 1868 dr->handle.owner = rqstp->rq_server; 1869 dr->prot = rqstp->rq_prot; 1870 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1871 dr->addrlen = rqstp->rq_addrlen; 1872 dr->daddr = rqstp->rq_daddr; 1873 dr->argslen = rqstp->rq_arg.len >> 2; 1874 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1875 } 1876 atomic_inc(&rqstp->rq_sock->sk_inuse); 1877 dr->svsk = rqstp->rq_sock; 1878 1879 dr->handle.revisit = svc_revisit; 1880 return &dr->handle; 1881 } 1882 1883 /* 1884 * recv data from a deferred request into an active one 1885 */ 1886 static int svc_deferred_recv(struct svc_rqst *rqstp) 1887 { 1888 struct svc_deferred_req *dr = rqstp->rq_deferred; 1889 1890 rqstp->rq_arg.head[0].iov_base = dr->args; 1891 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; 1892 rqstp->rq_arg.page_len = 0; 1893 rqstp->rq_arg.len = dr->argslen<<2; 1894 rqstp->rq_prot = dr->prot; 1895 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1896 rqstp->rq_addrlen = dr->addrlen; 1897 rqstp->rq_daddr = dr->daddr; 1898 rqstp->rq_respages = rqstp->rq_pages; 1899 return dr->argslen<<2; 1900 } 1901 1902 1903 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1904 { 1905 struct svc_deferred_req *dr = NULL; 1906 1907 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1908 return NULL; 1909 spin_lock_bh(&svsk->sk_defer_lock); 1910 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1911 if (!list_empty(&svsk->sk_deferred)) { 1912 dr = list_entry(svsk->sk_deferred.next, 1913 struct svc_deferred_req, 1914 handle.recent); 1915 list_del_init(&dr->handle.recent); 1916 set_bit(SK_DEFERRED, &svsk->sk_flags); 1917 } 1918 spin_unlock_bh(&svsk->sk_defer_lock); 1919 return dr; 1920 } 1921