1 /* 2 * linux/net/sunrpc/svcsock.c 3 * 4 * These are the RPC server socket internals. 5 * 6 * The server scheduling algorithm does not always distribute the load 7 * evenly when servicing a single client. May need to modify the 8 * svc_sock_enqueue procedure... 9 * 10 * TCP support is largely untested and may be a little slow. The problem 11 * is that we currently do two separate recvfrom's, one for the 4-byte 12 * record length, and the second for the actual record. This could possibly 13 * be improved by always reading a minimum size of around 100 bytes and 14 * tucking any superfluous bytes away in a temporary store. Still, that 15 * leaves write requests out in the rain. An alternative may be to peek at 16 * the first skb in the queue, and if it matches the next TCP sequence 17 * number, to extract the record marker. Yuck. 18 * 19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 20 */ 21 22 #include <linux/sched.h> 23 #include <linux/errno.h> 24 #include <linux/fcntl.h> 25 #include <linux/net.h> 26 #include <linux/in.h> 27 #include <linux/inet.h> 28 #include <linux/udp.h> 29 #include <linux/tcp.h> 30 #include <linux/unistd.h> 31 #include <linux/slab.h> 32 #include <linux/netdevice.h> 33 #include <linux/skbuff.h> 34 #include <linux/file.h> 35 #include <linux/freezer.h> 36 #include <net/sock.h> 37 #include <net/checksum.h> 38 #include <net/ip.h> 39 #include <net/ipv6.h> 40 #include <net/tcp_states.h> 41 #include <asm/uaccess.h> 42 #include <asm/ioctls.h> 43 44 #include <linux/sunrpc/types.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/xdr.h> 47 #include <linux/sunrpc/svcsock.h> 48 #include <linux/sunrpc/stats.h> 49 50 /* SMP locking strategy: 51 * 52 * svc_pool->sp_lock protects most of the fields of that pool. 53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 54 * when both need to be taken (rare), svc_serv->sv_lock is first. 55 * BKL protects svc_serv->sv_nrthread. 56 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list 57 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 58 * 59 * Some flags can be set to certain values at any time 60 * providing that certain rules are followed: 61 * 62 * SK_CONN, SK_DATA, can be set or cleared at any time. 63 * after a set, svc_sock_enqueue must be called. 64 * after a clear, the socket must be read/accepted 65 * if this succeeds, it must be set again. 66 * SK_CLOSE can set at any time. It is never cleared. 67 * sk_inuse contains a bias of '1' until SK_DEAD is set. 68 * so when sk_inuse hits zero, we know the socket is dead 69 * and no-one is using it. 70 * SK_DEAD can only be set while SK_BUSY is held which ensures 71 * no other thread will be using the socket or will try to 72 * set SK_DEAD. 73 * 74 */ 75 76 #define RPCDBG_FACILITY RPCDBG_SVCSOCK 77 78 79 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 80 int *errp, int flags); 81 static void svc_delete_socket(struct svc_sock *svsk); 82 static void svc_udp_data_ready(struct sock *, int); 83 static int svc_udp_recvfrom(struct svc_rqst *); 84 static int svc_udp_sendto(struct svc_rqst *); 85 static void svc_close_socket(struct svc_sock *svsk); 86 87 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); 88 static int svc_deferred_recv(struct svc_rqst *rqstp); 89 static struct cache_deferred_req *svc_defer(struct cache_req *req); 90 91 /* apparently the "standard" is that clients close 92 * idle connections after 5 minutes, servers after 93 * 6 minutes 94 * http://www.connectathon.org/talks96/nfstcp.pdf 95 */ 96 static int svc_conn_age_period = 6*60; 97 98 #ifdef CONFIG_DEBUG_LOCK_ALLOC 99 static struct lock_class_key svc_key[2]; 100 static struct lock_class_key svc_slock_key[2]; 101 102 static inline void svc_reclassify_socket(struct socket *sock) 103 { 104 struct sock *sk = sock->sk; 105 BUG_ON(sk->sk_lock.owner != NULL); 106 switch (sk->sk_family) { 107 case AF_INET: 108 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 109 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); 110 break; 111 112 case AF_INET6: 113 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", 114 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); 115 break; 116 117 default: 118 BUG(); 119 } 120 } 121 #else 122 static inline void svc_reclassify_socket(struct socket *sock) 123 { 124 } 125 #endif 126 127 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) 128 { 129 switch (addr->sa_family) { 130 case AF_INET: 131 snprintf(buf, len, "%u.%u.%u.%u, port=%u", 132 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 133 htons(((struct sockaddr_in *) addr)->sin_port)); 134 break; 135 136 case AF_INET6: 137 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 138 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 139 htons(((struct sockaddr_in6 *) addr)->sin6_port)); 140 break; 141 142 default: 143 snprintf(buf, len, "unknown address type: %d", addr->sa_family); 144 break; 145 } 146 return buf; 147 } 148 149 /** 150 * svc_print_addr - Format rq_addr field for printing 151 * @rqstp: svc_rqst struct containing address to print 152 * @buf: target buffer for formatted address 153 * @len: length of target buffer 154 * 155 */ 156 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 157 { 158 return __svc_print_addr(svc_addr(rqstp), buf, len); 159 } 160 EXPORT_SYMBOL_GPL(svc_print_addr); 161 162 /* 163 * Queue up an idle server thread. Must have pool->sp_lock held. 164 * Note: this is really a stack rather than a queue, so that we only 165 * use as many different threads as we need, and the rest don't pollute 166 * the cache. 167 */ 168 static inline void 169 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 170 { 171 list_add(&rqstp->rq_list, &pool->sp_threads); 172 } 173 174 /* 175 * Dequeue an nfsd thread. Must have pool->sp_lock held. 176 */ 177 static inline void 178 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 179 { 180 list_del(&rqstp->rq_list); 181 } 182 183 /* 184 * Release an skbuff after use 185 */ 186 static inline void 187 svc_release_skb(struct svc_rqst *rqstp) 188 { 189 struct sk_buff *skb = rqstp->rq_skbuff; 190 struct svc_deferred_req *dr = rqstp->rq_deferred; 191 192 if (skb) { 193 rqstp->rq_skbuff = NULL; 194 195 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 196 skb_free_datagram(rqstp->rq_sock->sk_sk, skb); 197 } 198 if (dr) { 199 rqstp->rq_deferred = NULL; 200 kfree(dr); 201 } 202 } 203 204 /* 205 * Any space to write? 206 */ 207 static inline unsigned long 208 svc_sock_wspace(struct svc_sock *svsk) 209 { 210 int wspace; 211 212 if (svsk->sk_sock->type == SOCK_STREAM) 213 wspace = sk_stream_wspace(svsk->sk_sk); 214 else 215 wspace = sock_wspace(svsk->sk_sk); 216 217 return wspace; 218 } 219 220 /* 221 * Queue up a socket with data pending. If there are idle nfsd 222 * processes, wake 'em up. 223 * 224 */ 225 static void 226 svc_sock_enqueue(struct svc_sock *svsk) 227 { 228 struct svc_serv *serv = svsk->sk_server; 229 struct svc_pool *pool; 230 struct svc_rqst *rqstp; 231 int cpu; 232 233 if (!(svsk->sk_flags & 234 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) 235 return; 236 if (test_bit(SK_DEAD, &svsk->sk_flags)) 237 return; 238 239 cpu = get_cpu(); 240 pool = svc_pool_for_cpu(svsk->sk_server, cpu); 241 put_cpu(); 242 243 spin_lock_bh(&pool->sp_lock); 244 245 if (!list_empty(&pool->sp_threads) && 246 !list_empty(&pool->sp_sockets)) 247 printk(KERN_ERR 248 "svc_sock_enqueue: threads and sockets both waiting??\n"); 249 250 if (test_bit(SK_DEAD, &svsk->sk_flags)) { 251 /* Don't enqueue dead sockets */ 252 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); 253 goto out_unlock; 254 } 255 256 /* Mark socket as busy. It will remain in this state until the 257 * server has processed all pending data and put the socket back 258 * on the idle list. We update SK_BUSY atomically because 259 * it also guards against trying to enqueue the svc_sock twice. 260 */ 261 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { 262 /* Don't enqueue socket while already enqueued */ 263 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 264 goto out_unlock; 265 } 266 BUG_ON(svsk->sk_pool != NULL); 267 svsk->sk_pool = pool; 268 269 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 270 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2 271 > svc_sock_wspace(svsk)) 272 && !test_bit(SK_CLOSE, &svsk->sk_flags) 273 && !test_bit(SK_CONN, &svsk->sk_flags)) { 274 /* Don't enqueue while not enough space for reply */ 275 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 276 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg, 277 svc_sock_wspace(svsk)); 278 svsk->sk_pool = NULL; 279 clear_bit(SK_BUSY, &svsk->sk_flags); 280 goto out_unlock; 281 } 282 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 283 284 285 if (!list_empty(&pool->sp_threads)) { 286 rqstp = list_entry(pool->sp_threads.next, 287 struct svc_rqst, 288 rq_list); 289 dprintk("svc: socket %p served by daemon %p\n", 290 svsk->sk_sk, rqstp); 291 svc_thread_dequeue(pool, rqstp); 292 if (rqstp->rq_sock) 293 printk(KERN_ERR 294 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 295 rqstp, rqstp->rq_sock); 296 rqstp->rq_sock = svsk; 297 atomic_inc(&svsk->sk_inuse); 298 rqstp->rq_reserved = serv->sv_max_mesg; 299 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 300 BUG_ON(svsk->sk_pool != pool); 301 wake_up(&rqstp->rq_wait); 302 } else { 303 dprintk("svc: socket %p put into queue\n", svsk->sk_sk); 304 list_add_tail(&svsk->sk_ready, &pool->sp_sockets); 305 BUG_ON(svsk->sk_pool != pool); 306 } 307 308 out_unlock: 309 spin_unlock_bh(&pool->sp_lock); 310 } 311 312 /* 313 * Dequeue the first socket. Must be called with the pool->sp_lock held. 314 */ 315 static inline struct svc_sock * 316 svc_sock_dequeue(struct svc_pool *pool) 317 { 318 struct svc_sock *svsk; 319 320 if (list_empty(&pool->sp_sockets)) 321 return NULL; 322 323 svsk = list_entry(pool->sp_sockets.next, 324 struct svc_sock, sk_ready); 325 list_del_init(&svsk->sk_ready); 326 327 dprintk("svc: socket %p dequeued, inuse=%d\n", 328 svsk->sk_sk, atomic_read(&svsk->sk_inuse)); 329 330 return svsk; 331 } 332 333 /* 334 * Having read something from a socket, check whether it 335 * needs to be re-enqueued. 336 * Note: SK_DATA only gets cleared when a read-attempt finds 337 * no (or insufficient) data. 338 */ 339 static inline void 340 svc_sock_received(struct svc_sock *svsk) 341 { 342 svsk->sk_pool = NULL; 343 clear_bit(SK_BUSY, &svsk->sk_flags); 344 svc_sock_enqueue(svsk); 345 } 346 347 348 /** 349 * svc_reserve - change the space reserved for the reply to a request. 350 * @rqstp: The request in question 351 * @space: new max space to reserve 352 * 353 * Each request reserves some space on the output queue of the socket 354 * to make sure the reply fits. This function reduces that reserved 355 * space to be the amount of space used already, plus @space. 356 * 357 */ 358 void svc_reserve(struct svc_rqst *rqstp, int space) 359 { 360 space += rqstp->rq_res.head[0].iov_len; 361 362 if (space < rqstp->rq_reserved) { 363 struct svc_sock *svsk = rqstp->rq_sock; 364 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); 365 rqstp->rq_reserved = space; 366 367 svc_sock_enqueue(svsk); 368 } 369 } 370 371 /* 372 * Release a socket after use. 373 */ 374 static inline void 375 svc_sock_put(struct svc_sock *svsk) 376 { 377 if (atomic_dec_and_test(&svsk->sk_inuse)) { 378 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags)); 379 380 dprintk("svc: releasing dead socket\n"); 381 if (svsk->sk_sock->file) 382 sockfd_put(svsk->sk_sock); 383 else 384 sock_release(svsk->sk_sock); 385 if (svsk->sk_info_authunix != NULL) 386 svcauth_unix_info_release(svsk->sk_info_authunix); 387 kfree(svsk); 388 } 389 } 390 391 static void 392 svc_sock_release(struct svc_rqst *rqstp) 393 { 394 struct svc_sock *svsk = rqstp->rq_sock; 395 396 svc_release_skb(rqstp); 397 398 svc_free_res_pages(rqstp); 399 rqstp->rq_res.page_len = 0; 400 rqstp->rq_res.page_base = 0; 401 402 403 /* Reset response buffer and release 404 * the reservation. 405 * But first, check that enough space was reserved 406 * for the reply, otherwise we have a bug! 407 */ 408 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 409 printk(KERN_ERR "RPC request reserved %d but used %d\n", 410 rqstp->rq_reserved, 411 rqstp->rq_res.len); 412 413 rqstp->rq_res.head[0].iov_len = 0; 414 svc_reserve(rqstp, 0); 415 rqstp->rq_sock = NULL; 416 417 svc_sock_put(svsk); 418 } 419 420 /* 421 * External function to wake up a server waiting for data 422 * This really only makes sense for services like lockd 423 * which have exactly one thread anyway. 424 */ 425 void 426 svc_wake_up(struct svc_serv *serv) 427 { 428 struct svc_rqst *rqstp; 429 unsigned int i; 430 struct svc_pool *pool; 431 432 for (i = 0; i < serv->sv_nrpools; i++) { 433 pool = &serv->sv_pools[i]; 434 435 spin_lock_bh(&pool->sp_lock); 436 if (!list_empty(&pool->sp_threads)) { 437 rqstp = list_entry(pool->sp_threads.next, 438 struct svc_rqst, 439 rq_list); 440 dprintk("svc: daemon %p woken up.\n", rqstp); 441 /* 442 svc_thread_dequeue(pool, rqstp); 443 rqstp->rq_sock = NULL; 444 */ 445 wake_up(&rqstp->rq_wait); 446 } 447 spin_unlock_bh(&pool->sp_lock); 448 } 449 } 450 451 union svc_pktinfo_u { 452 struct in_pktinfo pkti; 453 struct in6_pktinfo pkti6; 454 }; 455 456 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) 457 { 458 switch (rqstp->rq_sock->sk_sk->sk_family) { 459 case AF_INET: { 460 struct in_pktinfo *pki = CMSG_DATA(cmh); 461 462 cmh->cmsg_level = SOL_IP; 463 cmh->cmsg_type = IP_PKTINFO; 464 pki->ipi_ifindex = 0; 465 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; 466 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 467 } 468 break; 469 470 case AF_INET6: { 471 struct in6_pktinfo *pki = CMSG_DATA(cmh); 472 473 cmh->cmsg_level = SOL_IPV6; 474 cmh->cmsg_type = IPV6_PKTINFO; 475 pki->ipi6_ifindex = 0; 476 ipv6_addr_copy(&pki->ipi6_addr, 477 &rqstp->rq_daddr.addr6); 478 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 479 } 480 break; 481 } 482 return; 483 } 484 485 /* 486 * Generic sendto routine 487 */ 488 static int 489 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 490 { 491 struct svc_sock *svsk = rqstp->rq_sock; 492 struct socket *sock = svsk->sk_sock; 493 int slen; 494 char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; 495 struct cmsghdr *cmh = (struct cmsghdr *)buffer; 496 int len = 0; 497 int result; 498 int size; 499 struct page **ppage = xdr->pages; 500 size_t base = xdr->page_base; 501 unsigned int pglen = xdr->page_len; 502 unsigned int flags = MSG_MORE; 503 char buf[RPC_MAX_ADDRBUFLEN]; 504 505 slen = xdr->len; 506 507 if (rqstp->rq_prot == IPPROTO_UDP) { 508 struct msghdr msg = { 509 .msg_name = &rqstp->rq_addr, 510 .msg_namelen = rqstp->rq_addrlen, 511 .msg_control = cmh, 512 .msg_controllen = sizeof(buffer), 513 .msg_flags = MSG_MORE, 514 }; 515 516 svc_set_cmsg_data(rqstp, cmh); 517 518 if (sock_sendmsg(sock, &msg, 0) < 0) 519 goto out; 520 } 521 522 /* send head */ 523 if (slen == xdr->head[0].iov_len) 524 flags = 0; 525 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, 526 xdr->head[0].iov_len, flags); 527 if (len != xdr->head[0].iov_len) 528 goto out; 529 slen -= xdr->head[0].iov_len; 530 if (slen == 0) 531 goto out; 532 533 /* send page data */ 534 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; 535 while (pglen > 0) { 536 if (slen == size) 537 flags = 0; 538 result = kernel_sendpage(sock, *ppage, base, size, flags); 539 if (result > 0) 540 len += result; 541 if (result != size) 542 goto out; 543 slen -= size; 544 pglen -= size; 545 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; 546 base = 0; 547 ppage++; 548 } 549 /* send tail */ 550 if (xdr->tail[0].iov_len) { 551 result = kernel_sendpage(sock, rqstp->rq_respages[0], 552 ((unsigned long)xdr->tail[0].iov_base) 553 & (PAGE_SIZE-1), 554 xdr->tail[0].iov_len, 0); 555 556 if (result > 0) 557 len += result; 558 } 559 out: 560 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", 561 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, 562 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); 563 564 return len; 565 } 566 567 /* 568 * Report socket names for nfsdfs 569 */ 570 static int one_sock_name(char *buf, struct svc_sock *svsk) 571 { 572 int len; 573 574 switch(svsk->sk_sk->sk_family) { 575 case AF_INET: 576 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n", 577 svsk->sk_sk->sk_protocol==IPPROTO_UDP? 578 "udp" : "tcp", 579 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr), 580 inet_sk(svsk->sk_sk)->num); 581 break; 582 default: 583 len = sprintf(buf, "*unknown-%d*\n", 584 svsk->sk_sk->sk_family); 585 } 586 return len; 587 } 588 589 int 590 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) 591 { 592 struct svc_sock *svsk, *closesk = NULL; 593 int len = 0; 594 595 if (!serv) 596 return 0; 597 spin_lock_bh(&serv->sv_lock); 598 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { 599 int onelen = one_sock_name(buf+len, svsk); 600 if (toclose && strcmp(toclose, buf+len) == 0) 601 closesk = svsk; 602 else 603 len += onelen; 604 } 605 spin_unlock_bh(&serv->sv_lock); 606 if (closesk) 607 /* Should unregister with portmap, but you cannot 608 * unregister just one protocol... 609 */ 610 svc_close_socket(closesk); 611 else if (toclose) 612 return -ENOENT; 613 return len; 614 } 615 EXPORT_SYMBOL(svc_sock_names); 616 617 /* 618 * Check input queue length 619 */ 620 static int 621 svc_recv_available(struct svc_sock *svsk) 622 { 623 struct socket *sock = svsk->sk_sock; 624 int avail, err; 625 626 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); 627 628 return (err >= 0)? avail : err; 629 } 630 631 /* 632 * Generic recvfrom routine. 633 */ 634 static int 635 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) 636 { 637 struct svc_sock *svsk = rqstp->rq_sock; 638 struct msghdr msg = { 639 .msg_flags = MSG_DONTWAIT, 640 }; 641 int len; 642 643 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, 644 msg.msg_flags); 645 646 /* sock_recvmsg doesn't fill in the name/namelen, so we must.. 647 */ 648 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); 649 rqstp->rq_addrlen = svsk->sk_remotelen; 650 651 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 652 svsk, iov[0].iov_base, iov[0].iov_len, len); 653 654 return len; 655 } 656 657 /* 658 * Set socket snd and rcv buffer lengths 659 */ 660 static inline void 661 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) 662 { 663 #if 0 664 mm_segment_t oldfs; 665 oldfs = get_fs(); set_fs(KERNEL_DS); 666 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, 667 (char*)&snd, sizeof(snd)); 668 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, 669 (char*)&rcv, sizeof(rcv)); 670 #else 671 /* sock_setsockopt limits use to sysctl_?mem_max, 672 * which isn't acceptable. Until that is made conditional 673 * on not having CAP_SYS_RESOURCE or similar, we go direct... 674 * DaveM said I could! 675 */ 676 lock_sock(sock->sk); 677 sock->sk->sk_sndbuf = snd * 2; 678 sock->sk->sk_rcvbuf = rcv * 2; 679 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; 680 release_sock(sock->sk); 681 #endif 682 } 683 /* 684 * INET callback when data has been received on the socket. 685 */ 686 static void 687 svc_udp_data_ready(struct sock *sk, int count) 688 { 689 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 690 691 if (svsk) { 692 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 693 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); 694 set_bit(SK_DATA, &svsk->sk_flags); 695 svc_sock_enqueue(svsk); 696 } 697 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 698 wake_up_interruptible(sk->sk_sleep); 699 } 700 701 /* 702 * INET callback when space is newly available on the socket. 703 */ 704 static void 705 svc_write_space(struct sock *sk) 706 { 707 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); 708 709 if (svsk) { 710 dprintk("svc: socket %p(inet %p), write_space busy=%d\n", 711 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); 712 svc_sock_enqueue(svsk); 713 } 714 715 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { 716 dprintk("RPC svc_write_space: someone sleeping on %p\n", 717 svsk); 718 wake_up_interruptible(sk->sk_sleep); 719 } 720 } 721 722 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp, 723 struct cmsghdr *cmh) 724 { 725 switch (rqstp->rq_sock->sk_sk->sk_family) { 726 case AF_INET: { 727 struct in_pktinfo *pki = CMSG_DATA(cmh); 728 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; 729 break; 730 } 731 case AF_INET6: { 732 struct in6_pktinfo *pki = CMSG_DATA(cmh); 733 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); 734 break; 735 } 736 } 737 } 738 739 /* 740 * Receive a datagram from a UDP socket. 741 */ 742 static int 743 svc_udp_recvfrom(struct svc_rqst *rqstp) 744 { 745 struct svc_sock *svsk = rqstp->rq_sock; 746 struct svc_serv *serv = svsk->sk_server; 747 struct sk_buff *skb; 748 char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))]; 749 struct cmsghdr *cmh = (struct cmsghdr *)buffer; 750 int err, len; 751 struct msghdr msg = { 752 .msg_name = svc_addr(rqstp), 753 .msg_control = cmh, 754 .msg_controllen = sizeof(buffer), 755 .msg_flags = MSG_DONTWAIT, 756 }; 757 758 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 759 /* udp sockets need large rcvbuf as all pending 760 * requests are still in that buffer. sndbuf must 761 * also be large enough that there is enough space 762 * for one reply per thread. We count all threads 763 * rather than threads in a particular pool, which 764 * provides an upper bound on the number of threads 765 * which will access the socket. 766 */ 767 svc_sock_setbufsize(svsk->sk_sock, 768 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 769 (serv->sv_nrthreads+3) * serv->sv_max_mesg); 770 771 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 772 svc_sock_received(svsk); 773 return svc_deferred_recv(rqstp); 774 } 775 776 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 777 svc_delete_socket(svsk); 778 return 0; 779 } 780 781 clear_bit(SK_DATA, &svsk->sk_flags); 782 while ((err == kernel_recvmsg(svsk->sk_sock, &msg, NULL, 783 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 || 784 (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { 785 if (err == -EAGAIN) { 786 svc_sock_received(svsk); 787 return err; 788 } 789 /* possibly an icmp error */ 790 dprintk("svc: recvfrom returned error %d\n", -err); 791 } 792 rqstp->rq_addrlen = sizeof(rqstp->rq_addr); 793 if (skb->tstamp.off_sec == 0) { 794 struct timeval tv; 795 796 tv.tv_sec = xtime.tv_sec; 797 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 798 skb_set_timestamp(skb, &tv); 799 /* Don't enable netstamp, sunrpc doesn't 800 need that much accuracy */ 801 } 802 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); 803 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ 804 805 /* 806 * Maybe more packets - kick another thread ASAP. 807 */ 808 svc_sock_received(svsk); 809 810 len = skb->len - sizeof(struct udphdr); 811 rqstp->rq_arg.len = len; 812 813 rqstp->rq_prot = IPPROTO_UDP; 814 815 if (cmh->cmsg_level != IPPROTO_IP || 816 cmh->cmsg_type != IP_PKTINFO) { 817 if (net_ratelimit()) 818 printk("rpcsvc: received unknown control message:" 819 "%d/%d\n", 820 cmh->cmsg_level, cmh->cmsg_type); 821 skb_free_datagram(svsk->sk_sk, skb); 822 return 0; 823 } 824 svc_udp_get_dest_address(rqstp, cmh); 825 826 if (skb_is_nonlinear(skb)) { 827 /* we have to copy */ 828 local_bh_disable(); 829 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 830 local_bh_enable(); 831 /* checksum error */ 832 skb_free_datagram(svsk->sk_sk, skb); 833 return 0; 834 } 835 local_bh_enable(); 836 skb_free_datagram(svsk->sk_sk, skb); 837 } else { 838 /* we can use it in-place */ 839 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 840 rqstp->rq_arg.head[0].iov_len = len; 841 if (skb_checksum_complete(skb)) { 842 skb_free_datagram(svsk->sk_sk, skb); 843 return 0; 844 } 845 rqstp->rq_skbuff = skb; 846 } 847 848 rqstp->rq_arg.page_base = 0; 849 if (len <= rqstp->rq_arg.head[0].iov_len) { 850 rqstp->rq_arg.head[0].iov_len = len; 851 rqstp->rq_arg.page_len = 0; 852 rqstp->rq_respages = rqstp->rq_pages+1; 853 } else { 854 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 855 rqstp->rq_respages = rqstp->rq_pages + 1 + 856 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 857 } 858 859 if (serv->sv_stats) 860 serv->sv_stats->netudpcnt++; 861 862 return len; 863 } 864 865 static int 866 svc_udp_sendto(struct svc_rqst *rqstp) 867 { 868 int error; 869 870 error = svc_sendto(rqstp, &rqstp->rq_res); 871 if (error == -ECONNREFUSED) 872 /* ICMP error on earlier request. */ 873 error = svc_sendto(rqstp, &rqstp->rq_res); 874 875 return error; 876 } 877 878 static void 879 svc_udp_init(struct svc_sock *svsk) 880 { 881 int one = 1; 882 mm_segment_t oldfs; 883 884 svsk->sk_sk->sk_data_ready = svc_udp_data_ready; 885 svsk->sk_sk->sk_write_space = svc_write_space; 886 svsk->sk_recvfrom = svc_udp_recvfrom; 887 svsk->sk_sendto = svc_udp_sendto; 888 889 /* initialise setting must have enough space to 890 * receive and respond to one request. 891 * svc_udp_recvfrom will re-adjust if necessary 892 */ 893 svc_sock_setbufsize(svsk->sk_sock, 894 3 * svsk->sk_server->sv_max_mesg, 895 3 * svsk->sk_server->sv_max_mesg); 896 897 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 898 set_bit(SK_CHNGBUF, &svsk->sk_flags); 899 900 oldfs = get_fs(); 901 set_fs(KERNEL_DS); 902 /* make sure we get destination address info */ 903 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, 904 (char __user *)&one, sizeof(one)); 905 set_fs(oldfs); 906 } 907 908 /* 909 * A data_ready event on a listening socket means there's a connection 910 * pending. Do not use state_change as a substitute for it. 911 */ 912 static void 913 svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 914 { 915 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 916 917 dprintk("svc: socket %p TCP (listen) state change %d\n", 918 sk, sk->sk_state); 919 920 /* 921 * This callback may called twice when a new connection 922 * is established as a child socket inherits everything 923 * from a parent LISTEN socket. 924 * 1) data_ready method of the parent socket will be called 925 * when one of child sockets become ESTABLISHED. 926 * 2) data_ready method of the child socket may be called 927 * when it receives data before the socket is accepted. 928 * In case of 2, we should ignore it silently. 929 */ 930 if (sk->sk_state == TCP_LISTEN) { 931 if (svsk) { 932 set_bit(SK_CONN, &svsk->sk_flags); 933 svc_sock_enqueue(svsk); 934 } else 935 printk("svc: socket %p: no user data\n", sk); 936 } 937 938 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 939 wake_up_interruptible_all(sk->sk_sleep); 940 } 941 942 /* 943 * A state change on a connected socket means it's dying or dead. 944 */ 945 static void 946 svc_tcp_state_change(struct sock *sk) 947 { 948 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 949 950 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", 951 sk, sk->sk_state, sk->sk_user_data); 952 953 if (!svsk) 954 printk("svc: socket %p: no user data\n", sk); 955 else { 956 set_bit(SK_CLOSE, &svsk->sk_flags); 957 svc_sock_enqueue(svsk); 958 } 959 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 960 wake_up_interruptible_all(sk->sk_sleep); 961 } 962 963 static void 964 svc_tcp_data_ready(struct sock *sk, int count) 965 { 966 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 967 968 dprintk("svc: socket %p TCP data ready (svsk %p)\n", 969 sk, sk->sk_user_data); 970 if (svsk) { 971 set_bit(SK_DATA, &svsk->sk_flags); 972 svc_sock_enqueue(svsk); 973 } 974 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 975 wake_up_interruptible(sk->sk_sleep); 976 } 977 978 static inline int svc_port_is_privileged(struct sockaddr *sin) 979 { 980 switch (sin->sa_family) { 981 case AF_INET: 982 return ntohs(((struct sockaddr_in *)sin)->sin_port) 983 < PROT_SOCK; 984 case AF_INET6: 985 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 986 < PROT_SOCK; 987 default: 988 return 0; 989 } 990 } 991 992 /* 993 * Accept a TCP connection 994 */ 995 static void 996 svc_tcp_accept(struct svc_sock *svsk) 997 { 998 struct sockaddr_storage addr; 999 struct sockaddr *sin = (struct sockaddr *) &addr; 1000 struct svc_serv *serv = svsk->sk_server; 1001 struct socket *sock = svsk->sk_sock; 1002 struct socket *newsock; 1003 struct svc_sock *newsvsk; 1004 int err, slen; 1005 char buf[RPC_MAX_ADDRBUFLEN]; 1006 1007 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); 1008 if (!sock) 1009 return; 1010 1011 clear_bit(SK_CONN, &svsk->sk_flags); 1012 err = kernel_accept(sock, &newsock, O_NONBLOCK); 1013 if (err < 0) { 1014 if (err == -ENOMEM) 1015 printk(KERN_WARNING "%s: no more sockets!\n", 1016 serv->sv_name); 1017 else if (err != -EAGAIN && net_ratelimit()) 1018 printk(KERN_WARNING "%s: accept failed (err %d)!\n", 1019 serv->sv_name, -err); 1020 return; 1021 } 1022 1023 set_bit(SK_CONN, &svsk->sk_flags); 1024 svc_sock_enqueue(svsk); 1025 1026 err = kernel_getpeername(newsock, sin, &slen); 1027 if (err < 0) { 1028 if (net_ratelimit()) 1029 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 1030 serv->sv_name, -err); 1031 goto failed; /* aborted connection or whatever */ 1032 } 1033 1034 /* Ideally, we would want to reject connections from unauthorized 1035 * hosts here, but when we get encryption, the IP of the host won't 1036 * tell us anything. For now just warn about unpriv connections. 1037 */ 1038 if (!svc_port_is_privileged(sin)) { 1039 dprintk(KERN_WARNING 1040 "%s: connect from unprivileged port: %s\n", 1041 serv->sv_name, 1042 __svc_print_addr(sin, buf, sizeof(buf))); 1043 } 1044 dprintk("%s: connect from %s\n", serv->sv_name, 1045 __svc_print_addr(sin, buf, sizeof(buf))); 1046 1047 /* make sure that a write doesn't block forever when 1048 * low on memory 1049 */ 1050 newsock->sk->sk_sndtimeo = HZ*30; 1051 1052 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 1053 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) 1054 goto failed; 1055 memcpy(&newsvsk->sk_remote, sin, slen); 1056 newsvsk->sk_remotelen = slen; 1057 1058 svc_sock_received(newsvsk); 1059 1060 /* make sure that we don't have too many active connections. 1061 * If we have, something must be dropped. 1062 * 1063 * There's no point in trying to do random drop here for 1064 * DoS prevention. The NFS clients does 1 reconnect in 15 1065 * seconds. An attacker can easily beat that. 1066 * 1067 * The only somewhat efficient mechanism would be if drop 1068 * old connections from the same IP first. But right now 1069 * we don't even record the client IP in svc_sock. 1070 */ 1071 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 1072 struct svc_sock *svsk = NULL; 1073 spin_lock_bh(&serv->sv_lock); 1074 if (!list_empty(&serv->sv_tempsocks)) { 1075 if (net_ratelimit()) { 1076 /* Try to help the admin */ 1077 printk(KERN_NOTICE "%s: too many open TCP " 1078 "sockets, consider increasing the " 1079 "number of nfsd threads\n", 1080 serv->sv_name); 1081 printk(KERN_NOTICE 1082 "%s: last TCP connect from %s\n", 1083 serv->sv_name, buf); 1084 } 1085 /* 1086 * Always select the oldest socket. It's not fair, 1087 * but so is life 1088 */ 1089 svsk = list_entry(serv->sv_tempsocks.prev, 1090 struct svc_sock, 1091 sk_list); 1092 set_bit(SK_CLOSE, &svsk->sk_flags); 1093 atomic_inc(&svsk->sk_inuse); 1094 } 1095 spin_unlock_bh(&serv->sv_lock); 1096 1097 if (svsk) { 1098 svc_sock_enqueue(svsk); 1099 svc_sock_put(svsk); 1100 } 1101 1102 } 1103 1104 if (serv->sv_stats) 1105 serv->sv_stats->nettcpconn++; 1106 1107 return; 1108 1109 failed: 1110 sock_release(newsock); 1111 return; 1112 } 1113 1114 /* 1115 * Receive data from a TCP socket. 1116 */ 1117 static int 1118 svc_tcp_recvfrom(struct svc_rqst *rqstp) 1119 { 1120 struct svc_sock *svsk = rqstp->rq_sock; 1121 struct svc_serv *serv = svsk->sk_server; 1122 int len; 1123 struct kvec *vec; 1124 int pnum, vlen; 1125 1126 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 1127 svsk, test_bit(SK_DATA, &svsk->sk_flags), 1128 test_bit(SK_CONN, &svsk->sk_flags), 1129 test_bit(SK_CLOSE, &svsk->sk_flags)); 1130 1131 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 1132 svc_sock_received(svsk); 1133 return svc_deferred_recv(rqstp); 1134 } 1135 1136 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 1137 svc_delete_socket(svsk); 1138 return 0; 1139 } 1140 1141 if (svsk->sk_sk->sk_state == TCP_LISTEN) { 1142 svc_tcp_accept(svsk); 1143 svc_sock_received(svsk); 1144 return 0; 1145 } 1146 1147 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 1148 /* sndbuf needs to have room for one request 1149 * per thread, otherwise we can stall even when the 1150 * network isn't a bottleneck. 1151 * 1152 * We count all threads rather than threads in a 1153 * particular pool, which provides an upper bound 1154 * on the number of threads which will access the socket. 1155 * 1156 * rcvbuf just needs to be able to hold a few requests. 1157 * Normally they will be removed from the queue 1158 * as soon a a complete request arrives. 1159 */ 1160 svc_sock_setbufsize(svsk->sk_sock, 1161 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 1162 3 * serv->sv_max_mesg); 1163 1164 clear_bit(SK_DATA, &svsk->sk_flags); 1165 1166 /* Receive data. If we haven't got the record length yet, get 1167 * the next four bytes. Otherwise try to gobble up as much as 1168 * possible up to the complete record length. 1169 */ 1170 if (svsk->sk_tcplen < 4) { 1171 unsigned long want = 4 - svsk->sk_tcplen; 1172 struct kvec iov; 1173 1174 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 1175 iov.iov_len = want; 1176 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) 1177 goto error; 1178 svsk->sk_tcplen += len; 1179 1180 if (len < want) { 1181 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1182 len, want); 1183 svc_sock_received(svsk); 1184 return -EAGAIN; /* record header not complete */ 1185 } 1186 1187 svsk->sk_reclen = ntohl(svsk->sk_reclen); 1188 if (!(svsk->sk_reclen & 0x80000000)) { 1189 /* FIXME: technically, a record can be fragmented, 1190 * and non-terminal fragments will not have the top 1191 * bit set in the fragment length header. 1192 * But apparently no known nfs clients send fragmented 1193 * records. */ 1194 if (net_ratelimit()) 1195 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1196 " (non-terminal)\n", 1197 (unsigned long) svsk->sk_reclen); 1198 goto err_delete; 1199 } 1200 svsk->sk_reclen &= 0x7fffffff; 1201 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1202 if (svsk->sk_reclen > serv->sv_max_mesg) { 1203 if (net_ratelimit()) 1204 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1205 " (large)\n", 1206 (unsigned long) svsk->sk_reclen); 1207 goto err_delete; 1208 } 1209 } 1210 1211 /* Check whether enough data is available */ 1212 len = svc_recv_available(svsk); 1213 if (len < 0) 1214 goto error; 1215 1216 if (len < svsk->sk_reclen) { 1217 dprintk("svc: incomplete TCP record (%d of %d)\n", 1218 len, svsk->sk_reclen); 1219 svc_sock_received(svsk); 1220 return -EAGAIN; /* record not complete */ 1221 } 1222 len = svsk->sk_reclen; 1223 set_bit(SK_DATA, &svsk->sk_flags); 1224 1225 vec = rqstp->rq_vec; 1226 vec[0] = rqstp->rq_arg.head[0]; 1227 vlen = PAGE_SIZE; 1228 pnum = 1; 1229 while (vlen < len) { 1230 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); 1231 vec[pnum].iov_len = PAGE_SIZE; 1232 pnum++; 1233 vlen += PAGE_SIZE; 1234 } 1235 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1236 1237 /* Now receive data */ 1238 len = svc_recvfrom(rqstp, vec, pnum, len); 1239 if (len < 0) 1240 goto error; 1241 1242 dprintk("svc: TCP complete record (%d bytes)\n", len); 1243 rqstp->rq_arg.len = len; 1244 rqstp->rq_arg.page_base = 0; 1245 if (len <= rqstp->rq_arg.head[0].iov_len) { 1246 rqstp->rq_arg.head[0].iov_len = len; 1247 rqstp->rq_arg.page_len = 0; 1248 } else { 1249 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 1250 } 1251 1252 rqstp->rq_skbuff = NULL; 1253 rqstp->rq_prot = IPPROTO_TCP; 1254 1255 /* Reset TCP read info */ 1256 svsk->sk_reclen = 0; 1257 svsk->sk_tcplen = 0; 1258 1259 svc_sock_received(svsk); 1260 if (serv->sv_stats) 1261 serv->sv_stats->nettcpcnt++; 1262 1263 return len; 1264 1265 err_delete: 1266 svc_delete_socket(svsk); 1267 return -EAGAIN; 1268 1269 error: 1270 if (len == -EAGAIN) { 1271 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1272 svc_sock_received(svsk); 1273 } else { 1274 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1275 svsk->sk_server->sv_name, -len); 1276 goto err_delete; 1277 } 1278 1279 return len; 1280 } 1281 1282 /* 1283 * Send out data on TCP socket. 1284 */ 1285 static int 1286 svc_tcp_sendto(struct svc_rqst *rqstp) 1287 { 1288 struct xdr_buf *xbufp = &rqstp->rq_res; 1289 int sent; 1290 __be32 reclen; 1291 1292 /* Set up the first element of the reply kvec. 1293 * Any other kvecs that may be in use have been taken 1294 * care of by the server implementation itself. 1295 */ 1296 reclen = htonl(0x80000000|((xbufp->len ) - 4)); 1297 memcpy(xbufp->head[0].iov_base, &reclen, 4); 1298 1299 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) 1300 return -ENOTCONN; 1301 1302 sent = svc_sendto(rqstp, &rqstp->rq_res); 1303 if (sent != xbufp->len) { 1304 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", 1305 rqstp->rq_sock->sk_server->sv_name, 1306 (sent<0)?"got error":"sent only", 1307 sent, xbufp->len); 1308 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags); 1309 svc_sock_enqueue(rqstp->rq_sock); 1310 sent = -EAGAIN; 1311 } 1312 return sent; 1313 } 1314 1315 static void 1316 svc_tcp_init(struct svc_sock *svsk) 1317 { 1318 struct sock *sk = svsk->sk_sk; 1319 struct tcp_sock *tp = tcp_sk(sk); 1320 1321 svsk->sk_recvfrom = svc_tcp_recvfrom; 1322 svsk->sk_sendto = svc_tcp_sendto; 1323 1324 if (sk->sk_state == TCP_LISTEN) { 1325 dprintk("setting up TCP socket for listening\n"); 1326 sk->sk_data_ready = svc_tcp_listen_data_ready; 1327 set_bit(SK_CONN, &svsk->sk_flags); 1328 } else { 1329 dprintk("setting up TCP socket for reading\n"); 1330 sk->sk_state_change = svc_tcp_state_change; 1331 sk->sk_data_ready = svc_tcp_data_ready; 1332 sk->sk_write_space = svc_write_space; 1333 1334 svsk->sk_reclen = 0; 1335 svsk->sk_tcplen = 0; 1336 1337 tp->nonagle = 1; /* disable Nagle's algorithm */ 1338 1339 /* initialise setting must have enough space to 1340 * receive and respond to one request. 1341 * svc_tcp_recvfrom will re-adjust if necessary 1342 */ 1343 svc_sock_setbufsize(svsk->sk_sock, 1344 3 * svsk->sk_server->sv_max_mesg, 1345 3 * svsk->sk_server->sv_max_mesg); 1346 1347 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1348 set_bit(SK_DATA, &svsk->sk_flags); 1349 if (sk->sk_state != TCP_ESTABLISHED) 1350 set_bit(SK_CLOSE, &svsk->sk_flags); 1351 } 1352 } 1353 1354 void 1355 svc_sock_update_bufs(struct svc_serv *serv) 1356 { 1357 /* 1358 * The number of server threads has changed. Update 1359 * rcvbuf and sndbuf accordingly on all sockets 1360 */ 1361 struct list_head *le; 1362 1363 spin_lock_bh(&serv->sv_lock); 1364 list_for_each(le, &serv->sv_permsocks) { 1365 struct svc_sock *svsk = 1366 list_entry(le, struct svc_sock, sk_list); 1367 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1368 } 1369 list_for_each(le, &serv->sv_tempsocks) { 1370 struct svc_sock *svsk = 1371 list_entry(le, struct svc_sock, sk_list); 1372 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1373 } 1374 spin_unlock_bh(&serv->sv_lock); 1375 } 1376 1377 /* 1378 * Receive the next request on any socket. This code is carefully 1379 * organised not to touch any cachelines in the shared svc_serv 1380 * structure, only cachelines in the local svc_pool. 1381 */ 1382 int 1383 svc_recv(struct svc_rqst *rqstp, long timeout) 1384 { 1385 struct svc_sock *svsk = NULL; 1386 struct svc_serv *serv = rqstp->rq_server; 1387 struct svc_pool *pool = rqstp->rq_pool; 1388 int len, i; 1389 int pages; 1390 struct xdr_buf *arg; 1391 DECLARE_WAITQUEUE(wait, current); 1392 1393 dprintk("svc: server %p waiting for data (to = %ld)\n", 1394 rqstp, timeout); 1395 1396 if (rqstp->rq_sock) 1397 printk(KERN_ERR 1398 "svc_recv: service %p, socket not NULL!\n", 1399 rqstp); 1400 if (waitqueue_active(&rqstp->rq_wait)) 1401 printk(KERN_ERR 1402 "svc_recv: service %p, wait queue active!\n", 1403 rqstp); 1404 1405 1406 /* now allocate needed pages. If we get a failure, sleep briefly */ 1407 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 1408 for (i=0; i < pages ; i++) 1409 while (rqstp->rq_pages[i] == NULL) { 1410 struct page *p = alloc_page(GFP_KERNEL); 1411 if (!p) 1412 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1413 rqstp->rq_pages[i] = p; 1414 } 1415 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 1416 BUG_ON(pages >= RPCSVC_MAXPAGES); 1417 1418 /* Make arg->head point to first page and arg->pages point to rest */ 1419 arg = &rqstp->rq_arg; 1420 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 1421 arg->head[0].iov_len = PAGE_SIZE; 1422 arg->pages = rqstp->rq_pages + 1; 1423 arg->page_base = 0; 1424 /* save at least one page for response */ 1425 arg->page_len = (pages-2)*PAGE_SIZE; 1426 arg->len = (pages-1)*PAGE_SIZE; 1427 arg->tail[0].iov_len = 0; 1428 1429 try_to_freeze(); 1430 cond_resched(); 1431 if (signalled()) 1432 return -EINTR; 1433 1434 spin_lock_bh(&pool->sp_lock); 1435 if ((svsk = svc_sock_dequeue(pool)) != NULL) { 1436 rqstp->rq_sock = svsk; 1437 atomic_inc(&svsk->sk_inuse); 1438 rqstp->rq_reserved = serv->sv_max_mesg; 1439 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1440 } else { 1441 /* No data pending. Go to sleep */ 1442 svc_thread_enqueue(pool, rqstp); 1443 1444 /* 1445 * We have to be able to interrupt this wait 1446 * to bring down the daemons ... 1447 */ 1448 set_current_state(TASK_INTERRUPTIBLE); 1449 add_wait_queue(&rqstp->rq_wait, &wait); 1450 spin_unlock_bh(&pool->sp_lock); 1451 1452 schedule_timeout(timeout); 1453 1454 try_to_freeze(); 1455 1456 spin_lock_bh(&pool->sp_lock); 1457 remove_wait_queue(&rqstp->rq_wait, &wait); 1458 1459 if (!(svsk = rqstp->rq_sock)) { 1460 svc_thread_dequeue(pool, rqstp); 1461 spin_unlock_bh(&pool->sp_lock); 1462 dprintk("svc: server %p, no data yet\n", rqstp); 1463 return signalled()? -EINTR : -EAGAIN; 1464 } 1465 } 1466 spin_unlock_bh(&pool->sp_lock); 1467 1468 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", 1469 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse)); 1470 len = svsk->sk_recvfrom(rqstp); 1471 dprintk("svc: got len=%d\n", len); 1472 1473 /* No data, incomplete (TCP) read, or accept() */ 1474 if (len == 0 || len == -EAGAIN) { 1475 rqstp->rq_res.len = 0; 1476 svc_sock_release(rqstp); 1477 return -EAGAIN; 1478 } 1479 svsk->sk_lastrecv = get_seconds(); 1480 clear_bit(SK_OLD, &svsk->sk_flags); 1481 1482 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 1483 rqstp->rq_chandle.defer = svc_defer; 1484 1485 if (serv->sv_stats) 1486 serv->sv_stats->netcnt++; 1487 return len; 1488 } 1489 1490 /* 1491 * Drop request 1492 */ 1493 void 1494 svc_drop(struct svc_rqst *rqstp) 1495 { 1496 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); 1497 svc_sock_release(rqstp); 1498 } 1499 1500 /* 1501 * Return reply to client. 1502 */ 1503 int 1504 svc_send(struct svc_rqst *rqstp) 1505 { 1506 struct svc_sock *svsk; 1507 int len; 1508 struct xdr_buf *xb; 1509 1510 if ((svsk = rqstp->rq_sock) == NULL) { 1511 printk(KERN_WARNING "NULL socket pointer in %s:%d\n", 1512 __FILE__, __LINE__); 1513 return -EFAULT; 1514 } 1515 1516 /* release the receive skb before sending the reply */ 1517 svc_release_skb(rqstp); 1518 1519 /* calculate over-all length */ 1520 xb = & rqstp->rq_res; 1521 xb->len = xb->head[0].iov_len + 1522 xb->page_len + 1523 xb->tail[0].iov_len; 1524 1525 /* Grab svsk->sk_mutex to serialize outgoing data. */ 1526 mutex_lock(&svsk->sk_mutex); 1527 if (test_bit(SK_DEAD, &svsk->sk_flags)) 1528 len = -ENOTCONN; 1529 else 1530 len = svsk->sk_sendto(rqstp); 1531 mutex_unlock(&svsk->sk_mutex); 1532 svc_sock_release(rqstp); 1533 1534 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 1535 return 0; 1536 return len; 1537 } 1538 1539 /* 1540 * Timer function to close old temporary sockets, using 1541 * a mark-and-sweep algorithm. 1542 */ 1543 static void 1544 svc_age_temp_sockets(unsigned long closure) 1545 { 1546 struct svc_serv *serv = (struct svc_serv *)closure; 1547 struct svc_sock *svsk; 1548 struct list_head *le, *next; 1549 LIST_HEAD(to_be_aged); 1550 1551 dprintk("svc_age_temp_sockets\n"); 1552 1553 if (!spin_trylock_bh(&serv->sv_lock)) { 1554 /* busy, try again 1 sec later */ 1555 dprintk("svc_age_temp_sockets: busy\n"); 1556 mod_timer(&serv->sv_temptimer, jiffies + HZ); 1557 return; 1558 } 1559 1560 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1561 svsk = list_entry(le, struct svc_sock, sk_list); 1562 1563 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1564 continue; 1565 if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) 1566 continue; 1567 atomic_inc(&svsk->sk_inuse); 1568 list_move(le, &to_be_aged); 1569 set_bit(SK_CLOSE, &svsk->sk_flags); 1570 set_bit(SK_DETACHED, &svsk->sk_flags); 1571 } 1572 spin_unlock_bh(&serv->sv_lock); 1573 1574 while (!list_empty(&to_be_aged)) { 1575 le = to_be_aged.next; 1576 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ 1577 list_del_init(le); 1578 svsk = list_entry(le, struct svc_sock, sk_list); 1579 1580 dprintk("queuing svsk %p for closing, %lu seconds old\n", 1581 svsk, get_seconds() - svsk->sk_lastrecv); 1582 1583 /* a thread will dequeue and close it soon */ 1584 svc_sock_enqueue(svsk); 1585 svc_sock_put(svsk); 1586 } 1587 1588 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 1589 } 1590 1591 /* 1592 * Initialize socket for RPC use and create svc_sock struct 1593 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. 1594 */ 1595 static struct svc_sock *svc_setup_socket(struct svc_serv *serv, 1596 struct socket *sock, 1597 int *errp, int flags) 1598 { 1599 struct svc_sock *svsk; 1600 struct sock *inet; 1601 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); 1602 int is_temporary = flags & SVC_SOCK_TEMPORARY; 1603 1604 dprintk("svc: svc_setup_socket %p\n", sock); 1605 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1606 *errp = -ENOMEM; 1607 return NULL; 1608 } 1609 1610 inet = sock->sk; 1611 1612 /* Register socket with portmapper */ 1613 if (*errp >= 0 && pmap_register) 1614 *errp = svc_register(serv, inet->sk_protocol, 1615 ntohs(inet_sk(inet)->sport)); 1616 1617 if (*errp < 0) { 1618 kfree(svsk); 1619 return NULL; 1620 } 1621 1622 set_bit(SK_BUSY, &svsk->sk_flags); 1623 inet->sk_user_data = svsk; 1624 svsk->sk_sock = sock; 1625 svsk->sk_sk = inet; 1626 svsk->sk_ostate = inet->sk_state_change; 1627 svsk->sk_odata = inet->sk_data_ready; 1628 svsk->sk_owspace = inet->sk_write_space; 1629 svsk->sk_server = serv; 1630 atomic_set(&svsk->sk_inuse, 1); 1631 svsk->sk_lastrecv = get_seconds(); 1632 spin_lock_init(&svsk->sk_defer_lock); 1633 INIT_LIST_HEAD(&svsk->sk_deferred); 1634 INIT_LIST_HEAD(&svsk->sk_ready); 1635 mutex_init(&svsk->sk_mutex); 1636 1637 /* Initialize the socket */ 1638 if (sock->type == SOCK_DGRAM) 1639 svc_udp_init(svsk); 1640 else 1641 svc_tcp_init(svsk); 1642 1643 spin_lock_bh(&serv->sv_lock); 1644 if (is_temporary) { 1645 set_bit(SK_TEMP, &svsk->sk_flags); 1646 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1647 serv->sv_tmpcnt++; 1648 if (serv->sv_temptimer.function == NULL) { 1649 /* setup timer to age temp sockets */ 1650 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets, 1651 (unsigned long)serv); 1652 mod_timer(&serv->sv_temptimer, 1653 jiffies + svc_conn_age_period * HZ); 1654 } 1655 } else { 1656 clear_bit(SK_TEMP, &svsk->sk_flags); 1657 list_add(&svsk->sk_list, &serv->sv_permsocks); 1658 } 1659 spin_unlock_bh(&serv->sv_lock); 1660 1661 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1662 svsk, svsk->sk_sk); 1663 1664 return svsk; 1665 } 1666 1667 int svc_addsock(struct svc_serv *serv, 1668 int fd, 1669 char *name_return, 1670 int *proto) 1671 { 1672 int err = 0; 1673 struct socket *so = sockfd_lookup(fd, &err); 1674 struct svc_sock *svsk = NULL; 1675 1676 if (!so) 1677 return err; 1678 if (so->sk->sk_family != AF_INET) 1679 err = -EAFNOSUPPORT; 1680 else if (so->sk->sk_protocol != IPPROTO_TCP && 1681 so->sk->sk_protocol != IPPROTO_UDP) 1682 err = -EPROTONOSUPPORT; 1683 else if (so->state > SS_UNCONNECTED) 1684 err = -EISCONN; 1685 else { 1686 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); 1687 if (svsk) { 1688 svc_sock_received(svsk); 1689 err = 0; 1690 } 1691 } 1692 if (err) { 1693 sockfd_put(so); 1694 return err; 1695 } 1696 if (proto) *proto = so->sk->sk_protocol; 1697 return one_sock_name(name_return, svsk); 1698 } 1699 EXPORT_SYMBOL_GPL(svc_addsock); 1700 1701 /* 1702 * Create socket for RPC service. 1703 */ 1704 static int svc_create_socket(struct svc_serv *serv, int protocol, 1705 struct sockaddr *sin, int len, int flags) 1706 { 1707 struct svc_sock *svsk; 1708 struct socket *sock; 1709 int error; 1710 int type; 1711 char buf[RPC_MAX_ADDRBUFLEN]; 1712 1713 dprintk("svc: svc_create_socket(%s, %d, %s)\n", 1714 serv->sv_program->pg_name, protocol, 1715 __svc_print_addr(sin, buf, sizeof(buf))); 1716 1717 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { 1718 printk(KERN_WARNING "svc: only UDP and TCP " 1719 "sockets supported\n"); 1720 return -EINVAL; 1721 } 1722 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; 1723 1724 error = sock_create_kern(sin->sa_family, type, protocol, &sock); 1725 if (error < 0) 1726 return error; 1727 1728 svc_reclassify_socket(sock); 1729 1730 if (type == SOCK_STREAM) 1731 sock->sk->sk_reuse = 1; /* allow address reuse */ 1732 error = kernel_bind(sock, sin, len); 1733 if (error < 0) 1734 goto bummer; 1735 1736 if (protocol == IPPROTO_TCP) { 1737 if ((error = kernel_listen(sock, 64)) < 0) 1738 goto bummer; 1739 } 1740 1741 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { 1742 svc_sock_received(svsk); 1743 return ntohs(inet_sk(svsk->sk_sk)->sport); 1744 } 1745 1746 bummer: 1747 dprintk("svc: svc_create_socket error = %d\n", -error); 1748 sock_release(sock); 1749 return error; 1750 } 1751 1752 /* 1753 * Remove a dead socket 1754 */ 1755 static void 1756 svc_delete_socket(struct svc_sock *svsk) 1757 { 1758 struct svc_serv *serv; 1759 struct sock *sk; 1760 1761 dprintk("svc: svc_delete_socket(%p)\n", svsk); 1762 1763 serv = svsk->sk_server; 1764 sk = svsk->sk_sk; 1765 1766 sk->sk_state_change = svsk->sk_ostate; 1767 sk->sk_data_ready = svsk->sk_odata; 1768 sk->sk_write_space = svsk->sk_owspace; 1769 1770 spin_lock_bh(&serv->sv_lock); 1771 1772 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1773 list_del_init(&svsk->sk_list); 1774 /* 1775 * We used to delete the svc_sock from whichever list 1776 * it's sk_ready node was on, but we don't actually 1777 * need to. This is because the only time we're called 1778 * while still attached to a queue, the queue itself 1779 * is about to be destroyed (in svc_destroy). 1780 */ 1781 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) { 1782 BUG_ON(atomic_read(&svsk->sk_inuse)<2); 1783 atomic_dec(&svsk->sk_inuse); 1784 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1785 serv->sv_tmpcnt--; 1786 } 1787 1788 spin_unlock_bh(&serv->sv_lock); 1789 } 1790 1791 static void svc_close_socket(struct svc_sock *svsk) 1792 { 1793 set_bit(SK_CLOSE, &svsk->sk_flags); 1794 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) 1795 /* someone else will have to effect the close */ 1796 return; 1797 1798 atomic_inc(&svsk->sk_inuse); 1799 svc_delete_socket(svsk); 1800 clear_bit(SK_BUSY, &svsk->sk_flags); 1801 svc_sock_put(svsk); 1802 } 1803 1804 void svc_force_close_socket(struct svc_sock *svsk) 1805 { 1806 set_bit(SK_CLOSE, &svsk->sk_flags); 1807 if (test_bit(SK_BUSY, &svsk->sk_flags)) { 1808 /* Waiting to be processed, but no threads left, 1809 * So just remove it from the waiting list 1810 */ 1811 list_del_init(&svsk->sk_ready); 1812 clear_bit(SK_BUSY, &svsk->sk_flags); 1813 } 1814 svc_close_socket(svsk); 1815 } 1816 1817 /** 1818 * svc_makesock - Make a socket for nfsd and lockd 1819 * @serv: RPC server structure 1820 * @protocol: transport protocol to use 1821 * @port: port to use 1822 * @flags: requested socket characteristics 1823 * 1824 */ 1825 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port, 1826 int flags) 1827 { 1828 struct sockaddr_in sin = { 1829 .sin_family = AF_INET, 1830 .sin_addr.s_addr = INADDR_ANY, 1831 .sin_port = htons(port), 1832 }; 1833 1834 dprintk("svc: creating socket proto = %d\n", protocol); 1835 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin, 1836 sizeof(sin), flags); 1837 } 1838 1839 /* 1840 * Handle defer and revisit of requests 1841 */ 1842 1843 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1844 { 1845 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1846 struct svc_sock *svsk; 1847 1848 if (too_many) { 1849 svc_sock_put(dr->svsk); 1850 kfree(dr); 1851 return; 1852 } 1853 dprintk("revisit queued\n"); 1854 svsk = dr->svsk; 1855 dr->svsk = NULL; 1856 spin_lock_bh(&svsk->sk_defer_lock); 1857 list_add(&dr->handle.recent, &svsk->sk_deferred); 1858 spin_unlock_bh(&svsk->sk_defer_lock); 1859 set_bit(SK_DEFERRED, &svsk->sk_flags); 1860 svc_sock_enqueue(svsk); 1861 svc_sock_put(svsk); 1862 } 1863 1864 static struct cache_deferred_req * 1865 svc_defer(struct cache_req *req) 1866 { 1867 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1868 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); 1869 struct svc_deferred_req *dr; 1870 1871 if (rqstp->rq_arg.page_len) 1872 return NULL; /* if more than a page, give up FIXME */ 1873 if (rqstp->rq_deferred) { 1874 dr = rqstp->rq_deferred; 1875 rqstp->rq_deferred = NULL; 1876 } else { 1877 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1878 /* FIXME maybe discard if size too large */ 1879 dr = kmalloc(size, GFP_KERNEL); 1880 if (dr == NULL) 1881 return NULL; 1882 1883 dr->handle.owner = rqstp->rq_server; 1884 dr->prot = rqstp->rq_prot; 1885 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1886 dr->addrlen = rqstp->rq_addrlen; 1887 dr->daddr = rqstp->rq_daddr; 1888 dr->argslen = rqstp->rq_arg.len >> 2; 1889 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1890 } 1891 atomic_inc(&rqstp->rq_sock->sk_inuse); 1892 dr->svsk = rqstp->rq_sock; 1893 1894 dr->handle.revisit = svc_revisit; 1895 return &dr->handle; 1896 } 1897 1898 /* 1899 * recv data from a deferred request into an active one 1900 */ 1901 static int svc_deferred_recv(struct svc_rqst *rqstp) 1902 { 1903 struct svc_deferred_req *dr = rqstp->rq_deferred; 1904 1905 rqstp->rq_arg.head[0].iov_base = dr->args; 1906 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; 1907 rqstp->rq_arg.page_len = 0; 1908 rqstp->rq_arg.len = dr->argslen<<2; 1909 rqstp->rq_prot = dr->prot; 1910 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1911 rqstp->rq_addrlen = dr->addrlen; 1912 rqstp->rq_daddr = dr->daddr; 1913 rqstp->rq_respages = rqstp->rq_pages; 1914 return dr->argslen<<2; 1915 } 1916 1917 1918 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1919 { 1920 struct svc_deferred_req *dr = NULL; 1921 1922 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1923 return NULL; 1924 spin_lock_bh(&svsk->sk_defer_lock); 1925 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1926 if (!list_empty(&svsk->sk_deferred)) { 1927 dr = list_entry(svsk->sk_deferred.next, 1928 struct svc_deferred_req, 1929 handle.recent); 1930 list_del_init(&dr->handle.recent); 1931 set_bit(SK_DEFERRED, &svsk->sk_flags); 1932 } 1933 spin_unlock_bh(&svsk->sk_defer_lock); 1934 return dr; 1935 } 1936