1 /* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/slab.h> 12 #include <net/sock.h> 13 #include <linux/sunrpc/addr.h> 14 #include <linux/sunrpc/stats.h> 15 #include <linux/sunrpc/svc_xprt.h> 16 #include <linux/sunrpc/svcsock.h> 17 #include <linux/sunrpc/xprt.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <trace/events/sunrpc.h> 21 22 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 23 24 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 25 static int svc_deferred_recv(struct svc_rqst *rqstp); 26 static struct cache_deferred_req *svc_defer(struct cache_req *req); 27 static void svc_age_temp_xprts(unsigned long closure); 28 static void svc_delete_xprt(struct svc_xprt *xprt); 29 30 /* apparently the "standard" is that clients close 31 * idle connections after 5 minutes, servers after 32 * 6 minutes 33 * http://www.connectathon.org/talks96/nfstcp.pdf 34 */ 35 static int svc_conn_age_period = 6*60; 36 37 /* List of registered transport classes */ 38 static DEFINE_SPINLOCK(svc_xprt_class_lock); 39 static LIST_HEAD(svc_xprt_class_list); 40 41 /* SMP locking strategy: 42 * 43 * svc_pool->sp_lock protects most of the fields of that pool. 44 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 45 * when both need to be taken (rare), svc_serv->sv_lock is first. 46 * The "service mutex" protects svc_serv->sv_nrthread. 47 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 48 * and the ->sk_info_authunix cache. 49 * 50 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 51 * enqueued multiply. During normal transport processing this bit 52 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 53 * Providers should not manipulate this bit directly. 54 * 55 * Some flags can be set to certain values at any time 56 * providing that certain rules are followed: 57 * 58 * XPT_CONN, XPT_DATA: 59 * - Can be set or cleared at any time. 60 * - After a set, svc_xprt_enqueue must be called to enqueue 61 * the transport for processing. 62 * - After a clear, the transport must be read/accepted. 63 * If this succeeds, it must be set again. 64 * XPT_CLOSE: 65 * - Can set at any time. It is never cleared. 66 * XPT_DEAD: 67 * - Can only be set while XPT_BUSY is held which ensures 68 * that no other thread will be using the transport or will 69 * try to set XPT_DEAD. 70 */ 71 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 72 { 73 struct svc_xprt_class *cl; 74 int res = -EEXIST; 75 76 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 77 78 INIT_LIST_HEAD(&xcl->xcl_list); 79 spin_lock(&svc_xprt_class_lock); 80 /* Make sure there isn't already a class with the same name */ 81 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 82 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 83 goto out; 84 } 85 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 86 res = 0; 87 out: 88 spin_unlock(&svc_xprt_class_lock); 89 return res; 90 } 91 EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 92 93 void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 94 { 95 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 96 spin_lock(&svc_xprt_class_lock); 97 list_del_init(&xcl->xcl_list); 98 spin_unlock(&svc_xprt_class_lock); 99 } 100 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 101 102 /* 103 * Format the transport list for printing 104 */ 105 int svc_print_xprts(char *buf, int maxlen) 106 { 107 struct svc_xprt_class *xcl; 108 char tmpstr[80]; 109 int len = 0; 110 buf[0] = '\0'; 111 112 spin_lock(&svc_xprt_class_lock); 113 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 114 int slen; 115 116 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 117 slen = strlen(tmpstr); 118 if (len + slen > maxlen) 119 break; 120 len += slen; 121 strcat(buf, tmpstr); 122 } 123 spin_unlock(&svc_xprt_class_lock); 124 125 return len; 126 } 127 128 static void svc_xprt_free(struct kref *kref) 129 { 130 struct svc_xprt *xprt = 131 container_of(kref, struct svc_xprt, xpt_ref); 132 struct module *owner = xprt->xpt_class->xcl_owner; 133 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 134 svcauth_unix_info_release(xprt); 135 put_net(xprt->xpt_net); 136 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 137 if (xprt->xpt_bc_xprt) 138 xprt_put(xprt->xpt_bc_xprt); 139 xprt->xpt_ops->xpo_free(xprt); 140 module_put(owner); 141 } 142 143 void svc_xprt_put(struct svc_xprt *xprt) 144 { 145 kref_put(&xprt->xpt_ref, svc_xprt_free); 146 } 147 EXPORT_SYMBOL_GPL(svc_xprt_put); 148 149 /* 150 * Called by transport drivers to initialize the transport independent 151 * portion of the transport instance. 152 */ 153 void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, 154 struct svc_xprt *xprt, struct svc_serv *serv) 155 { 156 memset(xprt, 0, sizeof(*xprt)); 157 xprt->xpt_class = xcl; 158 xprt->xpt_ops = xcl->xcl_ops; 159 kref_init(&xprt->xpt_ref); 160 xprt->xpt_server = serv; 161 INIT_LIST_HEAD(&xprt->xpt_list); 162 INIT_LIST_HEAD(&xprt->xpt_ready); 163 INIT_LIST_HEAD(&xprt->xpt_deferred); 164 INIT_LIST_HEAD(&xprt->xpt_users); 165 mutex_init(&xprt->xpt_mutex); 166 spin_lock_init(&xprt->xpt_lock); 167 set_bit(XPT_BUSY, &xprt->xpt_flags); 168 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 169 xprt->xpt_net = get_net(net); 170 } 171 EXPORT_SYMBOL_GPL(svc_xprt_init); 172 173 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 174 struct svc_serv *serv, 175 struct net *net, 176 const int family, 177 const unsigned short port, 178 int flags) 179 { 180 struct sockaddr_in sin = { 181 .sin_family = AF_INET, 182 .sin_addr.s_addr = htonl(INADDR_ANY), 183 .sin_port = htons(port), 184 }; 185 #if IS_ENABLED(CONFIG_IPV6) 186 struct sockaddr_in6 sin6 = { 187 .sin6_family = AF_INET6, 188 .sin6_addr = IN6ADDR_ANY_INIT, 189 .sin6_port = htons(port), 190 }; 191 #endif 192 struct sockaddr *sap; 193 size_t len; 194 195 switch (family) { 196 case PF_INET: 197 sap = (struct sockaddr *)&sin; 198 len = sizeof(sin); 199 break; 200 #if IS_ENABLED(CONFIG_IPV6) 201 case PF_INET6: 202 sap = (struct sockaddr *)&sin6; 203 len = sizeof(sin6); 204 break; 205 #endif 206 default: 207 return ERR_PTR(-EAFNOSUPPORT); 208 } 209 210 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 211 } 212 213 /* 214 * svc_xprt_received conditionally queues the transport for processing 215 * by another thread. The caller must hold the XPT_BUSY bit and must 216 * not thereafter touch transport data. 217 * 218 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 219 * insufficient) data. 220 */ 221 static void svc_xprt_received(struct svc_xprt *xprt) 222 { 223 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { 224 WARN_ONCE(1, "xprt=0x%p already busy!", xprt); 225 return; 226 } 227 228 /* As soon as we clear busy, the xprt could be closed and 229 * 'put', so we need a reference to call svc_enqueue_xprt with: 230 */ 231 svc_xprt_get(xprt); 232 smp_mb__before_atomic(); 233 clear_bit(XPT_BUSY, &xprt->xpt_flags); 234 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 235 svc_xprt_put(xprt); 236 } 237 238 void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) 239 { 240 clear_bit(XPT_TEMP, &new->xpt_flags); 241 spin_lock_bh(&serv->sv_lock); 242 list_add(&new->xpt_list, &serv->sv_permsocks); 243 spin_unlock_bh(&serv->sv_lock); 244 svc_xprt_received(new); 245 } 246 247 int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 248 struct net *net, const int family, 249 const unsigned short port, int flags) 250 { 251 struct svc_xprt_class *xcl; 252 253 spin_lock(&svc_xprt_class_lock); 254 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 255 struct svc_xprt *newxprt; 256 unsigned short newport; 257 258 if (strcmp(xprt_name, xcl->xcl_name)) 259 continue; 260 261 if (!try_module_get(xcl->xcl_owner)) 262 goto err; 263 264 spin_unlock(&svc_xprt_class_lock); 265 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 266 if (IS_ERR(newxprt)) { 267 module_put(xcl->xcl_owner); 268 return PTR_ERR(newxprt); 269 } 270 svc_add_new_perm_xprt(serv, newxprt); 271 newport = svc_xprt_local_port(newxprt); 272 return newport; 273 } 274 err: 275 spin_unlock(&svc_xprt_class_lock); 276 /* This errno is exposed to user space. Provide a reasonable 277 * perror msg for a bad transport. */ 278 return -EPROTONOSUPPORT; 279 } 280 281 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 282 struct net *net, const int family, 283 const unsigned short port, int flags) 284 { 285 int err; 286 287 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 288 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 289 if (err == -EPROTONOSUPPORT) { 290 request_module("svc%s", xprt_name); 291 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 292 } 293 if (err) 294 dprintk("svc: transport %s not found, err %d\n", 295 xprt_name, err); 296 return err; 297 } 298 EXPORT_SYMBOL_GPL(svc_create_xprt); 299 300 /* 301 * Copy the local and remote xprt addresses to the rqstp structure 302 */ 303 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 304 { 305 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 306 rqstp->rq_addrlen = xprt->xpt_remotelen; 307 308 /* 309 * Destination address in request is needed for binding the 310 * source address in RPC replies/callbacks later. 311 */ 312 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); 313 rqstp->rq_daddrlen = xprt->xpt_locallen; 314 } 315 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 316 317 /** 318 * svc_print_addr - Format rq_addr field for printing 319 * @rqstp: svc_rqst struct containing address to print 320 * @buf: target buffer for formatted address 321 * @len: length of target buffer 322 * 323 */ 324 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 325 { 326 return __svc_print_addr(svc_addr(rqstp), buf, len); 327 } 328 EXPORT_SYMBOL_GPL(svc_print_addr); 329 330 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 331 { 332 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 333 return true; 334 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) 335 return xprt->xpt_ops->xpo_has_wspace(xprt); 336 return false; 337 } 338 339 void svc_xprt_do_enqueue(struct svc_xprt *xprt) 340 { 341 struct svc_pool *pool; 342 struct svc_rqst *rqstp = NULL; 343 int cpu; 344 bool queued = false; 345 346 if (!svc_xprt_has_something_to_do(xprt)) 347 goto out; 348 349 /* Mark transport as busy. It will remain in this state until 350 * the provider calls svc_xprt_received. We update XPT_BUSY 351 * atomically because it also guards against trying to enqueue 352 * the transport twice. 353 */ 354 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { 355 /* Don't enqueue transport while already enqueued */ 356 dprintk("svc: transport %p busy, not enqueued\n", xprt); 357 goto out; 358 } 359 360 cpu = get_cpu(); 361 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 362 363 atomic_long_inc(&pool->sp_stats.packets); 364 365 redo_search: 366 /* find a thread for this xprt */ 367 rcu_read_lock(); 368 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 369 /* Do a lockless check first */ 370 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 371 continue; 372 373 /* 374 * Once the xprt has been queued, it can only be dequeued by 375 * the task that intends to service it. All we can do at that 376 * point is to try to wake this thread back up so that it can 377 * do so. 378 */ 379 if (!queued) { 380 spin_lock_bh(&rqstp->rq_lock); 381 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) { 382 /* already busy, move on... */ 383 spin_unlock_bh(&rqstp->rq_lock); 384 continue; 385 } 386 387 /* this one will do */ 388 rqstp->rq_xprt = xprt; 389 svc_xprt_get(xprt); 390 spin_unlock_bh(&rqstp->rq_lock); 391 } 392 rcu_read_unlock(); 393 394 atomic_long_inc(&pool->sp_stats.threads_woken); 395 wake_up_process(rqstp->rq_task); 396 put_cpu(); 397 goto out; 398 } 399 rcu_read_unlock(); 400 401 /* 402 * We didn't find an idle thread to use, so we need to queue the xprt. 403 * Do so and then search again. If we find one, we can't hook this one 404 * up to it directly but we can wake the thread up in the hopes that it 405 * will pick it up once it searches for a xprt to service. 406 */ 407 if (!queued) { 408 queued = true; 409 dprintk("svc: transport %p put into queue\n", xprt); 410 spin_lock_bh(&pool->sp_lock); 411 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 412 pool->sp_stats.sockets_queued++; 413 spin_unlock_bh(&pool->sp_lock); 414 goto redo_search; 415 } 416 rqstp = NULL; 417 put_cpu(); 418 out: 419 trace_svc_xprt_do_enqueue(xprt, rqstp); 420 } 421 EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); 422 423 /* 424 * Queue up a transport with data pending. If there are idle nfsd 425 * processes, wake 'em up. 426 * 427 */ 428 void svc_xprt_enqueue(struct svc_xprt *xprt) 429 { 430 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) 431 return; 432 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 433 } 434 EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 435 436 /* 437 * Dequeue the first transport, if there is one. 438 */ 439 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 440 { 441 struct svc_xprt *xprt = NULL; 442 443 if (list_empty(&pool->sp_sockets)) 444 goto out; 445 446 spin_lock_bh(&pool->sp_lock); 447 if (likely(!list_empty(&pool->sp_sockets))) { 448 xprt = list_first_entry(&pool->sp_sockets, 449 struct svc_xprt, xpt_ready); 450 list_del_init(&xprt->xpt_ready); 451 svc_xprt_get(xprt); 452 453 dprintk("svc: transport %p dequeued, inuse=%d\n", 454 xprt, atomic_read(&xprt->xpt_ref.refcount)); 455 } 456 spin_unlock_bh(&pool->sp_lock); 457 out: 458 trace_svc_xprt_dequeue(xprt); 459 return xprt; 460 } 461 462 /** 463 * svc_reserve - change the space reserved for the reply to a request. 464 * @rqstp: The request in question 465 * @space: new max space to reserve 466 * 467 * Each request reserves some space on the output queue of the transport 468 * to make sure the reply fits. This function reduces that reserved 469 * space to be the amount of space used already, plus @space. 470 * 471 */ 472 void svc_reserve(struct svc_rqst *rqstp, int space) 473 { 474 space += rqstp->rq_res.head[0].iov_len; 475 476 if (space < rqstp->rq_reserved) { 477 struct svc_xprt *xprt = rqstp->rq_xprt; 478 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 479 rqstp->rq_reserved = space; 480 481 if (xprt->xpt_ops->xpo_adjust_wspace) 482 xprt->xpt_ops->xpo_adjust_wspace(xprt); 483 svc_xprt_enqueue(xprt); 484 } 485 } 486 EXPORT_SYMBOL_GPL(svc_reserve); 487 488 static void svc_xprt_release(struct svc_rqst *rqstp) 489 { 490 struct svc_xprt *xprt = rqstp->rq_xprt; 491 492 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 493 494 kfree(rqstp->rq_deferred); 495 rqstp->rq_deferred = NULL; 496 497 svc_free_res_pages(rqstp); 498 rqstp->rq_res.page_len = 0; 499 rqstp->rq_res.page_base = 0; 500 501 /* Reset response buffer and release 502 * the reservation. 503 * But first, check that enough space was reserved 504 * for the reply, otherwise we have a bug! 505 */ 506 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 507 printk(KERN_ERR "RPC request reserved %d but used %d\n", 508 rqstp->rq_reserved, 509 rqstp->rq_res.len); 510 511 rqstp->rq_res.head[0].iov_len = 0; 512 svc_reserve(rqstp, 0); 513 rqstp->rq_xprt = NULL; 514 515 svc_xprt_put(xprt); 516 } 517 518 /* 519 * Some svc_serv's will have occasional work to do, even when a xprt is not 520 * waiting to be serviced. This function is there to "kick" a task in one of 521 * those services so that it can wake up and do that work. Note that we only 522 * bother with pool 0 as we don't need to wake up more than one thread for 523 * this purpose. 524 */ 525 void svc_wake_up(struct svc_serv *serv) 526 { 527 struct svc_rqst *rqstp; 528 struct svc_pool *pool; 529 530 pool = &serv->sv_pools[0]; 531 532 rcu_read_lock(); 533 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 534 /* skip any that aren't queued */ 535 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 536 continue; 537 rcu_read_unlock(); 538 dprintk("svc: daemon %p woken up.\n", rqstp); 539 wake_up_process(rqstp->rq_task); 540 trace_svc_wake_up(rqstp->rq_task->pid); 541 return; 542 } 543 rcu_read_unlock(); 544 545 /* No free entries available */ 546 set_bit(SP_TASK_PENDING, &pool->sp_flags); 547 smp_wmb(); 548 trace_svc_wake_up(0); 549 } 550 EXPORT_SYMBOL_GPL(svc_wake_up); 551 552 int svc_port_is_privileged(struct sockaddr *sin) 553 { 554 switch (sin->sa_family) { 555 case AF_INET: 556 return ntohs(((struct sockaddr_in *)sin)->sin_port) 557 < PROT_SOCK; 558 case AF_INET6: 559 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 560 < PROT_SOCK; 561 default: 562 return 0; 563 } 564 } 565 566 /* 567 * Make sure that we don't have too many active connections. If we have, 568 * something must be dropped. It's not clear what will happen if we allow 569 * "too many" connections, but when dealing with network-facing software, 570 * we have to code defensively. Here we do that by imposing hard limits. 571 * 572 * There's no point in trying to do random drop here for DoS 573 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 574 * attacker can easily beat that. 575 * 576 * The only somewhat efficient mechanism would be if drop old 577 * connections from the same IP first. But right now we don't even 578 * record the client IP in svc_sock. 579 * 580 * single-threaded services that expect a lot of clients will probably 581 * need to set sv_maxconn to override the default value which is based 582 * on the number of threads 583 */ 584 static void svc_check_conn_limits(struct svc_serv *serv) 585 { 586 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 587 (serv->sv_nrthreads+3) * 20; 588 589 if (serv->sv_tmpcnt > limit) { 590 struct svc_xprt *xprt = NULL; 591 spin_lock_bh(&serv->sv_lock); 592 if (!list_empty(&serv->sv_tempsocks)) { 593 /* Try to help the admin */ 594 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", 595 serv->sv_name, serv->sv_maxconn ? 596 "max number of connections" : 597 "number of threads"); 598 /* 599 * Always select the oldest connection. It's not fair, 600 * but so is life 601 */ 602 xprt = list_entry(serv->sv_tempsocks.prev, 603 struct svc_xprt, 604 xpt_list); 605 set_bit(XPT_CLOSE, &xprt->xpt_flags); 606 svc_xprt_get(xprt); 607 } 608 spin_unlock_bh(&serv->sv_lock); 609 610 if (xprt) { 611 svc_xprt_enqueue(xprt); 612 svc_xprt_put(xprt); 613 } 614 } 615 } 616 617 static int svc_alloc_arg(struct svc_rqst *rqstp) 618 { 619 struct svc_serv *serv = rqstp->rq_server; 620 struct xdr_buf *arg; 621 int pages; 622 int i; 623 624 /* now allocate needed pages. If we get a failure, sleep briefly */ 625 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 626 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES); 627 if (pages >= RPCSVC_MAXPAGES) 628 /* use as many pages as possible */ 629 pages = RPCSVC_MAXPAGES - 1; 630 for (i = 0; i < pages ; i++) 631 while (rqstp->rq_pages[i] == NULL) { 632 struct page *p = alloc_page(GFP_KERNEL); 633 if (!p) { 634 set_current_state(TASK_INTERRUPTIBLE); 635 if (signalled() || kthread_should_stop()) { 636 set_current_state(TASK_RUNNING); 637 return -EINTR; 638 } 639 schedule_timeout(msecs_to_jiffies(500)); 640 } 641 rqstp->rq_pages[i] = p; 642 } 643 rqstp->rq_page_end = &rqstp->rq_pages[i]; 644 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 645 646 /* Make arg->head point to first page and arg->pages point to rest */ 647 arg = &rqstp->rq_arg; 648 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 649 arg->head[0].iov_len = PAGE_SIZE; 650 arg->pages = rqstp->rq_pages + 1; 651 arg->page_base = 0; 652 /* save at least one page for response */ 653 arg->page_len = (pages-2)*PAGE_SIZE; 654 arg->len = (pages-1)*PAGE_SIZE; 655 arg->tail[0].iov_len = 0; 656 return 0; 657 } 658 659 static bool 660 rqst_should_sleep(struct svc_rqst *rqstp) 661 { 662 struct svc_pool *pool = rqstp->rq_pool; 663 664 /* did someone call svc_wake_up? */ 665 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) 666 return false; 667 668 /* was a socket queued? */ 669 if (!list_empty(&pool->sp_sockets)) 670 return false; 671 672 /* are we shutting down? */ 673 if (signalled() || kthread_should_stop()) 674 return false; 675 676 /* are we freezing? */ 677 if (freezing(current)) 678 return false; 679 680 return true; 681 } 682 683 static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 684 { 685 struct svc_xprt *xprt; 686 struct svc_pool *pool = rqstp->rq_pool; 687 long time_left = 0; 688 689 /* rq_xprt should be clear on entry */ 690 WARN_ON_ONCE(rqstp->rq_xprt); 691 692 /* Normally we will wait up to 5 seconds for any required 693 * cache information to be provided. 694 */ 695 rqstp->rq_chandle.thread_wait = 5*HZ; 696 697 xprt = svc_xprt_dequeue(pool); 698 if (xprt) { 699 rqstp->rq_xprt = xprt; 700 701 /* As there is a shortage of threads and this request 702 * had to be queued, don't allow the thread to wait so 703 * long for cache updates. 704 */ 705 rqstp->rq_chandle.thread_wait = 1*HZ; 706 clear_bit(SP_TASK_PENDING, &pool->sp_flags); 707 return xprt; 708 } 709 710 /* 711 * We have to be able to interrupt this wait 712 * to bring down the daemons ... 713 */ 714 set_current_state(TASK_INTERRUPTIBLE); 715 clear_bit(RQ_BUSY, &rqstp->rq_flags); 716 smp_mb(); 717 718 if (likely(rqst_should_sleep(rqstp))) 719 time_left = schedule_timeout(timeout); 720 else 721 __set_current_state(TASK_RUNNING); 722 723 try_to_freeze(); 724 725 spin_lock_bh(&rqstp->rq_lock); 726 set_bit(RQ_BUSY, &rqstp->rq_flags); 727 spin_unlock_bh(&rqstp->rq_lock); 728 729 xprt = rqstp->rq_xprt; 730 if (xprt != NULL) 731 return xprt; 732 733 if (!time_left) 734 atomic_long_inc(&pool->sp_stats.threads_timedout); 735 736 if (signalled() || kthread_should_stop()) 737 return ERR_PTR(-EINTR); 738 return ERR_PTR(-EAGAIN); 739 } 740 741 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 742 { 743 spin_lock_bh(&serv->sv_lock); 744 set_bit(XPT_TEMP, &newxpt->xpt_flags); 745 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 746 serv->sv_tmpcnt++; 747 if (serv->sv_temptimer.function == NULL) { 748 /* setup timer to age temp transports */ 749 setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, 750 (unsigned long)serv); 751 mod_timer(&serv->sv_temptimer, 752 jiffies + svc_conn_age_period * HZ); 753 } 754 spin_unlock_bh(&serv->sv_lock); 755 svc_xprt_received(newxpt); 756 } 757 758 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) 759 { 760 struct svc_serv *serv = rqstp->rq_server; 761 int len = 0; 762 763 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 764 dprintk("svc_recv: found XPT_CLOSE\n"); 765 svc_delete_xprt(xprt); 766 /* Leave XPT_BUSY set on the dead xprt: */ 767 goto out; 768 } 769 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 770 struct svc_xprt *newxpt; 771 /* 772 * We know this module_get will succeed because the 773 * listener holds a reference too 774 */ 775 __module_get(xprt->xpt_class->xcl_owner); 776 svc_check_conn_limits(xprt->xpt_server); 777 newxpt = xprt->xpt_ops->xpo_accept(xprt); 778 if (newxpt) 779 svc_add_new_temp_xprt(serv, newxpt); 780 else 781 module_put(xprt->xpt_class->xcl_owner); 782 } else { 783 /* XPT_DATA|XPT_DEFERRED case: */ 784 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 785 rqstp, rqstp->rq_pool->sp_id, xprt, 786 atomic_read(&xprt->xpt_ref.refcount)); 787 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 788 if (rqstp->rq_deferred) 789 len = svc_deferred_recv(rqstp); 790 else 791 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 792 dprintk("svc: got len=%d\n", len); 793 rqstp->rq_reserved = serv->sv_max_mesg; 794 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 795 } 796 /* clear XPT_BUSY: */ 797 svc_xprt_received(xprt); 798 out: 799 trace_svc_handle_xprt(xprt, len); 800 return len; 801 } 802 803 /* 804 * Receive the next request on any transport. This code is carefully 805 * organised not to touch any cachelines in the shared svc_serv 806 * structure, only cachelines in the local svc_pool. 807 */ 808 int svc_recv(struct svc_rqst *rqstp, long timeout) 809 { 810 struct svc_xprt *xprt = NULL; 811 struct svc_serv *serv = rqstp->rq_server; 812 int len, err; 813 814 dprintk("svc: server %p waiting for data (to = %ld)\n", 815 rqstp, timeout); 816 817 if (rqstp->rq_xprt) 818 printk(KERN_ERR 819 "svc_recv: service %p, transport not NULL!\n", 820 rqstp); 821 822 err = svc_alloc_arg(rqstp); 823 if (err) 824 goto out; 825 826 try_to_freeze(); 827 cond_resched(); 828 err = -EINTR; 829 if (signalled() || kthread_should_stop()) 830 goto out; 831 832 xprt = svc_get_next_xprt(rqstp, timeout); 833 if (IS_ERR(xprt)) { 834 err = PTR_ERR(xprt); 835 goto out; 836 } 837 838 len = svc_handle_xprt(rqstp, xprt); 839 840 /* No data, incomplete (TCP) read, or accept() */ 841 err = -EAGAIN; 842 if (len <= 0) 843 goto out_release; 844 845 clear_bit(XPT_OLD, &xprt->xpt_flags); 846 847 if (xprt->xpt_ops->xpo_secure_port(rqstp)) 848 set_bit(RQ_SECURE, &rqstp->rq_flags); 849 else 850 clear_bit(RQ_SECURE, &rqstp->rq_flags); 851 rqstp->rq_chandle.defer = svc_defer; 852 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); 853 854 if (serv->sv_stats) 855 serv->sv_stats->netcnt++; 856 trace_svc_recv(rqstp, len); 857 return len; 858 out_release: 859 rqstp->rq_res.len = 0; 860 svc_xprt_release(rqstp); 861 out: 862 trace_svc_recv(rqstp, err); 863 return err; 864 } 865 EXPORT_SYMBOL_GPL(svc_recv); 866 867 /* 868 * Drop request 869 */ 870 void svc_drop(struct svc_rqst *rqstp) 871 { 872 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 873 svc_xprt_release(rqstp); 874 } 875 EXPORT_SYMBOL_GPL(svc_drop); 876 877 /* 878 * Return reply to client. 879 */ 880 int svc_send(struct svc_rqst *rqstp) 881 { 882 struct svc_xprt *xprt; 883 int len = -EFAULT; 884 struct xdr_buf *xb; 885 886 xprt = rqstp->rq_xprt; 887 if (!xprt) 888 goto out; 889 890 /* release the receive skb before sending the reply */ 891 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 892 893 /* calculate over-all length */ 894 xb = &rqstp->rq_res; 895 xb->len = xb->head[0].iov_len + 896 xb->page_len + 897 xb->tail[0].iov_len; 898 899 /* Grab mutex to serialize outgoing data. */ 900 mutex_lock(&xprt->xpt_mutex); 901 if (test_bit(XPT_DEAD, &xprt->xpt_flags) 902 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) 903 len = -ENOTCONN; 904 else 905 len = xprt->xpt_ops->xpo_sendto(rqstp); 906 mutex_unlock(&xprt->xpt_mutex); 907 rpc_wake_up(&xprt->xpt_bc_pending); 908 svc_xprt_release(rqstp); 909 910 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 911 len = 0; 912 out: 913 trace_svc_send(rqstp, len); 914 return len; 915 } 916 917 /* 918 * Timer function to close old temporary transports, using 919 * a mark-and-sweep algorithm. 920 */ 921 static void svc_age_temp_xprts(unsigned long closure) 922 { 923 struct svc_serv *serv = (struct svc_serv *)closure; 924 struct svc_xprt *xprt; 925 struct list_head *le, *next; 926 927 dprintk("svc_age_temp_xprts\n"); 928 929 if (!spin_trylock_bh(&serv->sv_lock)) { 930 /* busy, try again 1 sec later */ 931 dprintk("svc_age_temp_xprts: busy\n"); 932 mod_timer(&serv->sv_temptimer, jiffies + HZ); 933 return; 934 } 935 936 list_for_each_safe(le, next, &serv->sv_tempsocks) { 937 xprt = list_entry(le, struct svc_xprt, xpt_list); 938 939 /* First time through, just mark it OLD. Second time 940 * through, close it. */ 941 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 942 continue; 943 if (atomic_read(&xprt->xpt_ref.refcount) > 1 || 944 test_bit(XPT_BUSY, &xprt->xpt_flags)) 945 continue; 946 list_del_init(le); 947 set_bit(XPT_CLOSE, &xprt->xpt_flags); 948 dprintk("queuing xprt %p for closing\n", xprt); 949 950 /* a thread will dequeue and close it soon */ 951 svc_xprt_enqueue(xprt); 952 } 953 spin_unlock_bh(&serv->sv_lock); 954 955 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 956 } 957 958 /* Close temporary transports whose xpt_local matches server_addr immediately 959 * instead of waiting for them to be picked up by the timer. 960 * 961 * This is meant to be called from a notifier_block that runs when an ip 962 * address is deleted. 963 */ 964 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 965 { 966 struct svc_xprt *xprt; 967 struct svc_sock *svsk; 968 struct socket *sock; 969 struct list_head *le, *next; 970 LIST_HEAD(to_be_closed); 971 struct linger no_linger = { 972 .l_onoff = 1, 973 .l_linger = 0, 974 }; 975 976 spin_lock_bh(&serv->sv_lock); 977 list_for_each_safe(le, next, &serv->sv_tempsocks) { 978 xprt = list_entry(le, struct svc_xprt, xpt_list); 979 if (rpc_cmp_addr(server_addr, (struct sockaddr *) 980 &xprt->xpt_local)) { 981 dprintk("svc_age_temp_xprts_now: found %p\n", xprt); 982 list_move(le, &to_be_closed); 983 } 984 } 985 spin_unlock_bh(&serv->sv_lock); 986 987 while (!list_empty(&to_be_closed)) { 988 le = to_be_closed.next; 989 list_del_init(le); 990 xprt = list_entry(le, struct svc_xprt, xpt_list); 991 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); 992 svsk = container_of(xprt, struct svc_sock, sk_xprt); 993 sock = svsk->sk_sock; 994 kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, 995 (char *)&no_linger, sizeof(no_linger)); 996 svc_close_xprt(xprt); 997 } 998 } 999 EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); 1000 1001 static void call_xpt_users(struct svc_xprt *xprt) 1002 { 1003 struct svc_xpt_user *u; 1004 1005 spin_lock(&xprt->xpt_lock); 1006 while (!list_empty(&xprt->xpt_users)) { 1007 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 1008 list_del(&u->list); 1009 u->callback(u); 1010 } 1011 spin_unlock(&xprt->xpt_lock); 1012 } 1013 1014 /* 1015 * Remove a dead transport 1016 */ 1017 static void svc_delete_xprt(struct svc_xprt *xprt) 1018 { 1019 struct svc_serv *serv = xprt->xpt_server; 1020 struct svc_deferred_req *dr; 1021 1022 /* Only do this once */ 1023 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 1024 BUG(); 1025 1026 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 1027 xprt->xpt_ops->xpo_detach(xprt); 1028 1029 spin_lock_bh(&serv->sv_lock); 1030 list_del_init(&xprt->xpt_list); 1031 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); 1032 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 1033 serv->sv_tmpcnt--; 1034 spin_unlock_bh(&serv->sv_lock); 1035 1036 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 1037 kfree(dr); 1038 1039 call_xpt_users(xprt); 1040 svc_xprt_put(xprt); 1041 } 1042 1043 void svc_close_xprt(struct svc_xprt *xprt) 1044 { 1045 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1046 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 1047 /* someone else will have to effect the close */ 1048 return; 1049 /* 1050 * We expect svc_close_xprt() to work even when no threads are 1051 * running (e.g., while configuring the server before starting 1052 * any threads), so if the transport isn't busy, we delete 1053 * it ourself: 1054 */ 1055 svc_delete_xprt(xprt); 1056 } 1057 EXPORT_SYMBOL_GPL(svc_close_xprt); 1058 1059 static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 1060 { 1061 struct svc_xprt *xprt; 1062 int ret = 0; 1063 1064 spin_lock(&serv->sv_lock); 1065 list_for_each_entry(xprt, xprt_list, xpt_list) { 1066 if (xprt->xpt_net != net) 1067 continue; 1068 ret++; 1069 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1070 svc_xprt_enqueue(xprt); 1071 } 1072 spin_unlock(&serv->sv_lock); 1073 return ret; 1074 } 1075 1076 static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) 1077 { 1078 struct svc_pool *pool; 1079 struct svc_xprt *xprt; 1080 struct svc_xprt *tmp; 1081 int i; 1082 1083 for (i = 0; i < serv->sv_nrpools; i++) { 1084 pool = &serv->sv_pools[i]; 1085 1086 spin_lock_bh(&pool->sp_lock); 1087 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { 1088 if (xprt->xpt_net != net) 1089 continue; 1090 list_del_init(&xprt->xpt_ready); 1091 spin_unlock_bh(&pool->sp_lock); 1092 return xprt; 1093 } 1094 spin_unlock_bh(&pool->sp_lock); 1095 } 1096 return NULL; 1097 } 1098 1099 static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) 1100 { 1101 struct svc_xprt *xprt; 1102 1103 while ((xprt = svc_dequeue_net(serv, net))) { 1104 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1105 svc_delete_xprt(xprt); 1106 } 1107 } 1108 1109 /* 1110 * Server threads may still be running (especially in the case where the 1111 * service is still running in other network namespaces). 1112 * 1113 * So we shut down sockets the same way we would on a running server, by 1114 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do 1115 * the close. In the case there are no such other threads, 1116 * threads running, svc_clean_up_xprts() does a simple version of a 1117 * server's main event loop, and in the case where there are other 1118 * threads, we may need to wait a little while and then check again to 1119 * see if they're done. 1120 */ 1121 void svc_close_net(struct svc_serv *serv, struct net *net) 1122 { 1123 int delay = 0; 1124 1125 while (svc_close_list(serv, &serv->sv_permsocks, net) + 1126 svc_close_list(serv, &serv->sv_tempsocks, net)) { 1127 1128 svc_clean_up_xprts(serv, net); 1129 msleep(delay++); 1130 } 1131 } 1132 1133 /* 1134 * Handle defer and revisit of requests 1135 */ 1136 1137 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1138 { 1139 struct svc_deferred_req *dr = 1140 container_of(dreq, struct svc_deferred_req, handle); 1141 struct svc_xprt *xprt = dr->xprt; 1142 1143 spin_lock(&xprt->xpt_lock); 1144 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1145 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 1146 spin_unlock(&xprt->xpt_lock); 1147 dprintk("revisit canceled\n"); 1148 svc_xprt_put(xprt); 1149 kfree(dr); 1150 return; 1151 } 1152 dprintk("revisit queued\n"); 1153 dr->xprt = NULL; 1154 list_add(&dr->handle.recent, &xprt->xpt_deferred); 1155 spin_unlock(&xprt->xpt_lock); 1156 svc_xprt_enqueue(xprt); 1157 svc_xprt_put(xprt); 1158 } 1159 1160 /* 1161 * Save the request off for later processing. The request buffer looks 1162 * like this: 1163 * 1164 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 1165 * 1166 * This code can only handle requests that consist of an xprt-header 1167 * and rpc-header. 1168 */ 1169 static struct cache_deferred_req *svc_defer(struct cache_req *req) 1170 { 1171 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1172 struct svc_deferred_req *dr; 1173 1174 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) 1175 return NULL; /* if more than a page, give up FIXME */ 1176 if (rqstp->rq_deferred) { 1177 dr = rqstp->rq_deferred; 1178 rqstp->rq_deferred = NULL; 1179 } else { 1180 size_t skip; 1181 size_t size; 1182 /* FIXME maybe discard if size too large */ 1183 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 1184 dr = kmalloc(size, GFP_KERNEL); 1185 if (dr == NULL) 1186 return NULL; 1187 1188 dr->handle.owner = rqstp->rq_server; 1189 dr->prot = rqstp->rq_prot; 1190 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1191 dr->addrlen = rqstp->rq_addrlen; 1192 dr->daddr = rqstp->rq_daddr; 1193 dr->argslen = rqstp->rq_arg.len >> 2; 1194 dr->xprt_hlen = rqstp->rq_xprt_hlen; 1195 1196 /* back up head to the start of the buffer and copy */ 1197 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1198 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1199 dr->argslen << 2); 1200 } 1201 svc_xprt_get(rqstp->rq_xprt); 1202 dr->xprt = rqstp->rq_xprt; 1203 set_bit(RQ_DROPME, &rqstp->rq_flags); 1204 1205 dr->handle.revisit = svc_revisit; 1206 return &dr->handle; 1207 } 1208 1209 /* 1210 * recv data from a deferred request into an active one 1211 */ 1212 static int svc_deferred_recv(struct svc_rqst *rqstp) 1213 { 1214 struct svc_deferred_req *dr = rqstp->rq_deferred; 1215 1216 /* setup iov_base past transport header */ 1217 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1218 /* The iov_len does not include the transport header bytes */ 1219 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1220 rqstp->rq_arg.page_len = 0; 1221 /* The rq_arg.len includes the transport header bytes */ 1222 rqstp->rq_arg.len = dr->argslen<<2; 1223 rqstp->rq_prot = dr->prot; 1224 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1225 rqstp->rq_addrlen = dr->addrlen; 1226 /* Save off transport header len in case we get deferred again */ 1227 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1228 rqstp->rq_daddr = dr->daddr; 1229 rqstp->rq_respages = rqstp->rq_pages; 1230 return (dr->argslen<<2) - dr->xprt_hlen; 1231 } 1232 1233 1234 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1235 { 1236 struct svc_deferred_req *dr = NULL; 1237 1238 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1239 return NULL; 1240 spin_lock(&xprt->xpt_lock); 1241 if (!list_empty(&xprt->xpt_deferred)) { 1242 dr = list_entry(xprt->xpt_deferred.next, 1243 struct svc_deferred_req, 1244 handle.recent); 1245 list_del_init(&dr->handle.recent); 1246 } else 1247 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1248 spin_unlock(&xprt->xpt_lock); 1249 return dr; 1250 } 1251 1252 /** 1253 * svc_find_xprt - find an RPC transport instance 1254 * @serv: pointer to svc_serv to search 1255 * @xcl_name: C string containing transport's class name 1256 * @net: owner net pointer 1257 * @af: Address family of transport's local address 1258 * @port: transport's IP port number 1259 * 1260 * Return the transport instance pointer for the endpoint accepting 1261 * connections/peer traffic from the specified transport class, 1262 * address family and port. 1263 * 1264 * Specifying 0 for the address family or port is effectively a 1265 * wild-card, and will result in matching the first transport in the 1266 * service's list that has a matching class name. 1267 */ 1268 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1269 struct net *net, const sa_family_t af, 1270 const unsigned short port) 1271 { 1272 struct svc_xprt *xprt; 1273 struct svc_xprt *found = NULL; 1274 1275 /* Sanity check the args */ 1276 if (serv == NULL || xcl_name == NULL) 1277 return found; 1278 1279 spin_lock_bh(&serv->sv_lock); 1280 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1281 if (xprt->xpt_net != net) 1282 continue; 1283 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1284 continue; 1285 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1286 continue; 1287 if (port != 0 && port != svc_xprt_local_port(xprt)) 1288 continue; 1289 found = xprt; 1290 svc_xprt_get(xprt); 1291 break; 1292 } 1293 spin_unlock_bh(&serv->sv_lock); 1294 return found; 1295 } 1296 EXPORT_SYMBOL_GPL(svc_find_xprt); 1297 1298 static int svc_one_xprt_name(const struct svc_xprt *xprt, 1299 char *pos, int remaining) 1300 { 1301 int len; 1302 1303 len = snprintf(pos, remaining, "%s %u\n", 1304 xprt->xpt_class->xcl_name, 1305 svc_xprt_local_port(xprt)); 1306 if (len >= remaining) 1307 return -ENAMETOOLONG; 1308 return len; 1309 } 1310 1311 /** 1312 * svc_xprt_names - format a buffer with a list of transport names 1313 * @serv: pointer to an RPC service 1314 * @buf: pointer to a buffer to be filled in 1315 * @buflen: length of buffer to be filled in 1316 * 1317 * Fills in @buf with a string containing a list of transport names, 1318 * each name terminated with '\n'. 1319 * 1320 * Returns positive length of the filled-in string on success; otherwise 1321 * a negative errno value is returned if an error occurs. 1322 */ 1323 int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 1324 { 1325 struct svc_xprt *xprt; 1326 int len, totlen; 1327 char *pos; 1328 1329 /* Sanity check args */ 1330 if (!serv) 1331 return 0; 1332 1333 spin_lock_bh(&serv->sv_lock); 1334 1335 pos = buf; 1336 totlen = 0; 1337 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1338 len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1339 if (len < 0) { 1340 *buf = '\0'; 1341 totlen = len; 1342 } 1343 if (len <= 0) 1344 break; 1345 1346 pos += len; 1347 totlen += len; 1348 } 1349 1350 spin_unlock_bh(&serv->sv_lock); 1351 return totlen; 1352 } 1353 EXPORT_SYMBOL_GPL(svc_xprt_names); 1354 1355 1356 /*----------------------------------------------------------------------------*/ 1357 1358 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1359 { 1360 unsigned int pidx = (unsigned int)*pos; 1361 struct svc_serv *serv = m->private; 1362 1363 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1364 1365 if (!pidx) 1366 return SEQ_START_TOKEN; 1367 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1368 } 1369 1370 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1371 { 1372 struct svc_pool *pool = p; 1373 struct svc_serv *serv = m->private; 1374 1375 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1376 1377 if (p == SEQ_START_TOKEN) { 1378 pool = &serv->sv_pools[0]; 1379 } else { 1380 unsigned int pidx = (pool - &serv->sv_pools[0]); 1381 if (pidx < serv->sv_nrpools-1) 1382 pool = &serv->sv_pools[pidx+1]; 1383 else 1384 pool = NULL; 1385 } 1386 ++*pos; 1387 return pool; 1388 } 1389 1390 static void svc_pool_stats_stop(struct seq_file *m, void *p) 1391 { 1392 } 1393 1394 static int svc_pool_stats_show(struct seq_file *m, void *p) 1395 { 1396 struct svc_pool *pool = p; 1397 1398 if (p == SEQ_START_TOKEN) { 1399 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 1400 return 0; 1401 } 1402 1403 seq_printf(m, "%u %lu %lu %lu %lu\n", 1404 pool->sp_id, 1405 (unsigned long)atomic_long_read(&pool->sp_stats.packets), 1406 pool->sp_stats.sockets_queued, 1407 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), 1408 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); 1409 1410 return 0; 1411 } 1412 1413 static const struct seq_operations svc_pool_stats_seq_ops = { 1414 .start = svc_pool_stats_start, 1415 .next = svc_pool_stats_next, 1416 .stop = svc_pool_stats_stop, 1417 .show = svc_pool_stats_show, 1418 }; 1419 1420 int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1421 { 1422 int err; 1423 1424 err = seq_open(file, &svc_pool_stats_seq_ops); 1425 if (!err) 1426 ((struct seq_file *) file->private_data)->private = serv; 1427 return err; 1428 } 1429 EXPORT_SYMBOL(svc_pool_stats_open); 1430 1431 /*----------------------------------------------------------------------------*/ 1432