1 /* 2 * linux/net/sunrpc/svc_xprt.c 3 * 4 * Author: Tom Tucker <tom@opengridcomputing.com> 5 */ 6 7 #include <linux/sched.h> 8 #include <linux/errno.h> 9 #include <linux/freezer.h> 10 #include <linux/kthread.h> 11 #include <linux/slab.h> 12 #include <net/sock.h> 13 #include <linux/sunrpc/addr.h> 14 #include <linux/sunrpc/stats.h> 15 #include <linux/sunrpc/svc_xprt.h> 16 #include <linux/sunrpc/svcsock.h> 17 #include <linux/sunrpc/xprt.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <trace/events/sunrpc.h> 21 22 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 23 24 static unsigned int svc_rpc_per_connection_limit __read_mostly; 25 module_param(svc_rpc_per_connection_limit, uint, 0644); 26 27 28 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 29 static int svc_deferred_recv(struct svc_rqst *rqstp); 30 static struct cache_deferred_req *svc_defer(struct cache_req *req); 31 static void svc_age_temp_xprts(struct timer_list *t); 32 static void svc_delete_xprt(struct svc_xprt *xprt); 33 34 /* apparently the "standard" is that clients close 35 * idle connections after 5 minutes, servers after 36 * 6 minutes 37 * http://www.connectathon.org/talks96/nfstcp.pdf 38 */ 39 static int svc_conn_age_period = 6*60; 40 41 /* List of registered transport classes */ 42 static DEFINE_SPINLOCK(svc_xprt_class_lock); 43 static LIST_HEAD(svc_xprt_class_list); 44 45 /* SMP locking strategy: 46 * 47 * svc_pool->sp_lock protects most of the fields of that pool. 48 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 49 * when both need to be taken (rare), svc_serv->sv_lock is first. 50 * The "service mutex" protects svc_serv->sv_nrthread. 51 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 52 * and the ->sk_info_authunix cache. 53 * 54 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being 55 * enqueued multiply. During normal transport processing this bit 56 * is set by svc_xprt_enqueue and cleared by svc_xprt_received. 57 * Providers should not manipulate this bit directly. 58 * 59 * Some flags can be set to certain values at any time 60 * providing that certain rules are followed: 61 * 62 * XPT_CONN, XPT_DATA: 63 * - Can be set or cleared at any time. 64 * - After a set, svc_xprt_enqueue must be called to enqueue 65 * the transport for processing. 66 * - After a clear, the transport must be read/accepted. 67 * If this succeeds, it must be set again. 68 * XPT_CLOSE: 69 * - Can set at any time. It is never cleared. 70 * XPT_DEAD: 71 * - Can only be set while XPT_BUSY is held which ensures 72 * that no other thread will be using the transport or will 73 * try to set XPT_DEAD. 74 */ 75 int svc_reg_xprt_class(struct svc_xprt_class *xcl) 76 { 77 struct svc_xprt_class *cl; 78 int res = -EEXIST; 79 80 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); 81 82 INIT_LIST_HEAD(&xcl->xcl_list); 83 spin_lock(&svc_xprt_class_lock); 84 /* Make sure there isn't already a class with the same name */ 85 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { 86 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) 87 goto out; 88 } 89 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); 90 res = 0; 91 out: 92 spin_unlock(&svc_xprt_class_lock); 93 return res; 94 } 95 EXPORT_SYMBOL_GPL(svc_reg_xprt_class); 96 97 void svc_unreg_xprt_class(struct svc_xprt_class *xcl) 98 { 99 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); 100 spin_lock(&svc_xprt_class_lock); 101 list_del_init(&xcl->xcl_list); 102 spin_unlock(&svc_xprt_class_lock); 103 } 104 EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); 105 106 /* 107 * Format the transport list for printing 108 */ 109 int svc_print_xprts(char *buf, int maxlen) 110 { 111 struct svc_xprt_class *xcl; 112 char tmpstr[80]; 113 int len = 0; 114 buf[0] = '\0'; 115 116 spin_lock(&svc_xprt_class_lock); 117 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 118 int slen; 119 120 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); 121 slen = strlen(tmpstr); 122 if (len + slen > maxlen) 123 break; 124 len += slen; 125 strcat(buf, tmpstr); 126 } 127 spin_unlock(&svc_xprt_class_lock); 128 129 return len; 130 } 131 132 static void svc_xprt_free(struct kref *kref) 133 { 134 struct svc_xprt *xprt = 135 container_of(kref, struct svc_xprt, xpt_ref); 136 struct module *owner = xprt->xpt_class->xcl_owner; 137 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) 138 svcauth_unix_info_release(xprt); 139 put_net(xprt->xpt_net); 140 /* See comment on corresponding get in xs_setup_bc_tcp(): */ 141 if (xprt->xpt_bc_xprt) 142 xprt_put(xprt->xpt_bc_xprt); 143 if (xprt->xpt_bc_xps) 144 xprt_switch_put(xprt->xpt_bc_xps); 145 xprt->xpt_ops->xpo_free(xprt); 146 module_put(owner); 147 } 148 149 void svc_xprt_put(struct svc_xprt *xprt) 150 { 151 kref_put(&xprt->xpt_ref, svc_xprt_free); 152 } 153 EXPORT_SYMBOL_GPL(svc_xprt_put); 154 155 /* 156 * Called by transport drivers to initialize the transport independent 157 * portion of the transport instance. 158 */ 159 void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, 160 struct svc_xprt *xprt, struct svc_serv *serv) 161 { 162 memset(xprt, 0, sizeof(*xprt)); 163 xprt->xpt_class = xcl; 164 xprt->xpt_ops = xcl->xcl_ops; 165 kref_init(&xprt->xpt_ref); 166 xprt->xpt_server = serv; 167 INIT_LIST_HEAD(&xprt->xpt_list); 168 INIT_LIST_HEAD(&xprt->xpt_ready); 169 INIT_LIST_HEAD(&xprt->xpt_deferred); 170 INIT_LIST_HEAD(&xprt->xpt_users); 171 mutex_init(&xprt->xpt_mutex); 172 spin_lock_init(&xprt->xpt_lock); 173 set_bit(XPT_BUSY, &xprt->xpt_flags); 174 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); 175 xprt->xpt_net = get_net(net); 176 strcpy(xprt->xpt_remotebuf, "uninitialized"); 177 } 178 EXPORT_SYMBOL_GPL(svc_xprt_init); 179 180 static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, 181 struct svc_serv *serv, 182 struct net *net, 183 const int family, 184 const unsigned short port, 185 int flags) 186 { 187 struct sockaddr_in sin = { 188 .sin_family = AF_INET, 189 .sin_addr.s_addr = htonl(INADDR_ANY), 190 .sin_port = htons(port), 191 }; 192 #if IS_ENABLED(CONFIG_IPV6) 193 struct sockaddr_in6 sin6 = { 194 .sin6_family = AF_INET6, 195 .sin6_addr = IN6ADDR_ANY_INIT, 196 .sin6_port = htons(port), 197 }; 198 #endif 199 struct sockaddr *sap; 200 size_t len; 201 202 switch (family) { 203 case PF_INET: 204 sap = (struct sockaddr *)&sin; 205 len = sizeof(sin); 206 break; 207 #if IS_ENABLED(CONFIG_IPV6) 208 case PF_INET6: 209 sap = (struct sockaddr *)&sin6; 210 len = sizeof(sin6); 211 break; 212 #endif 213 default: 214 return ERR_PTR(-EAFNOSUPPORT); 215 } 216 217 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); 218 } 219 220 /* 221 * svc_xprt_received conditionally queues the transport for processing 222 * by another thread. The caller must hold the XPT_BUSY bit and must 223 * not thereafter touch transport data. 224 * 225 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or 226 * insufficient) data. 227 */ 228 static void svc_xprt_received(struct svc_xprt *xprt) 229 { 230 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) { 231 WARN_ONCE(1, "xprt=0x%p already busy!", xprt); 232 return; 233 } 234 235 /* As soon as we clear busy, the xprt could be closed and 236 * 'put', so we need a reference to call svc_enqueue_xprt with: 237 */ 238 svc_xprt_get(xprt); 239 smp_mb__before_atomic(); 240 clear_bit(XPT_BUSY, &xprt->xpt_flags); 241 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 242 svc_xprt_put(xprt); 243 } 244 245 void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) 246 { 247 clear_bit(XPT_TEMP, &new->xpt_flags); 248 spin_lock_bh(&serv->sv_lock); 249 list_add(&new->xpt_list, &serv->sv_permsocks); 250 spin_unlock_bh(&serv->sv_lock); 251 svc_xprt_received(new); 252 } 253 254 static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 255 struct net *net, const int family, 256 const unsigned short port, int flags) 257 { 258 struct svc_xprt_class *xcl; 259 260 spin_lock(&svc_xprt_class_lock); 261 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { 262 struct svc_xprt *newxprt; 263 unsigned short newport; 264 265 if (strcmp(xprt_name, xcl->xcl_name)) 266 continue; 267 268 if (!try_module_get(xcl->xcl_owner)) 269 goto err; 270 271 spin_unlock(&svc_xprt_class_lock); 272 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); 273 if (IS_ERR(newxprt)) { 274 module_put(xcl->xcl_owner); 275 return PTR_ERR(newxprt); 276 } 277 svc_add_new_perm_xprt(serv, newxprt); 278 newport = svc_xprt_local_port(newxprt); 279 return newport; 280 } 281 err: 282 spin_unlock(&svc_xprt_class_lock); 283 /* This errno is exposed to user space. Provide a reasonable 284 * perror msg for a bad transport. */ 285 return -EPROTONOSUPPORT; 286 } 287 288 int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 289 struct net *net, const int family, 290 const unsigned short port, int flags) 291 { 292 int err; 293 294 dprintk("svc: creating transport %s[%d]\n", xprt_name, port); 295 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 296 if (err == -EPROTONOSUPPORT) { 297 request_module("svc%s", xprt_name); 298 err = _svc_create_xprt(serv, xprt_name, net, family, port, flags); 299 } 300 if (err) 301 dprintk("svc: transport %s not found, err %d\n", 302 xprt_name, err); 303 return err; 304 } 305 EXPORT_SYMBOL_GPL(svc_create_xprt); 306 307 /* 308 * Copy the local and remote xprt addresses to the rqstp structure 309 */ 310 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) 311 { 312 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); 313 rqstp->rq_addrlen = xprt->xpt_remotelen; 314 315 /* 316 * Destination address in request is needed for binding the 317 * source address in RPC replies/callbacks later. 318 */ 319 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); 320 rqstp->rq_daddrlen = xprt->xpt_locallen; 321 } 322 EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); 323 324 /** 325 * svc_print_addr - Format rq_addr field for printing 326 * @rqstp: svc_rqst struct containing address to print 327 * @buf: target buffer for formatted address 328 * @len: length of target buffer 329 * 330 */ 331 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 332 { 333 return __svc_print_addr(svc_addr(rqstp), buf, len); 334 } 335 EXPORT_SYMBOL_GPL(svc_print_addr); 336 337 static bool svc_xprt_slots_in_range(struct svc_xprt *xprt) 338 { 339 unsigned int limit = svc_rpc_per_connection_limit; 340 int nrqsts = atomic_read(&xprt->xpt_nr_rqsts); 341 342 return limit == 0 || (nrqsts >= 0 && nrqsts < limit); 343 } 344 345 static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt) 346 { 347 if (!test_bit(RQ_DATA, &rqstp->rq_flags)) { 348 if (!svc_xprt_slots_in_range(xprt)) 349 return false; 350 atomic_inc(&xprt->xpt_nr_rqsts); 351 set_bit(RQ_DATA, &rqstp->rq_flags); 352 } 353 return true; 354 } 355 356 static void svc_xprt_release_slot(struct svc_rqst *rqstp) 357 { 358 struct svc_xprt *xprt = rqstp->rq_xprt; 359 if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { 360 atomic_dec(&xprt->xpt_nr_rqsts); 361 svc_xprt_enqueue(xprt); 362 } 363 } 364 365 static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) 366 { 367 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) 368 return true; 369 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) { 370 if (xprt->xpt_ops->xpo_has_wspace(xprt) && 371 svc_xprt_slots_in_range(xprt)) 372 return true; 373 trace_svc_xprt_no_write_space(xprt); 374 return false; 375 } 376 return false; 377 } 378 379 void svc_xprt_do_enqueue(struct svc_xprt *xprt) 380 { 381 struct svc_pool *pool; 382 struct svc_rqst *rqstp = NULL; 383 int cpu; 384 385 if (!svc_xprt_has_something_to_do(xprt)) 386 return; 387 388 /* Mark transport as busy. It will remain in this state until 389 * the provider calls svc_xprt_received. We update XPT_BUSY 390 * atomically because it also guards against trying to enqueue 391 * the transport twice. 392 */ 393 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 394 return; 395 396 cpu = get_cpu(); 397 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); 398 399 atomic_long_inc(&pool->sp_stats.packets); 400 401 spin_lock_bh(&pool->sp_lock); 402 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); 403 pool->sp_stats.sockets_queued++; 404 spin_unlock_bh(&pool->sp_lock); 405 406 /* find a thread for this xprt */ 407 rcu_read_lock(); 408 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 409 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) 410 continue; 411 atomic_long_inc(&pool->sp_stats.threads_woken); 412 rqstp->rq_qtime = ktime_get(); 413 wake_up_process(rqstp->rq_task); 414 goto out_unlock; 415 } 416 set_bit(SP_CONGESTED, &pool->sp_flags); 417 rqstp = NULL; 418 out_unlock: 419 rcu_read_unlock(); 420 put_cpu(); 421 trace_svc_xprt_do_enqueue(xprt, rqstp); 422 } 423 EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue); 424 425 /* 426 * Queue up a transport with data pending. If there are idle nfsd 427 * processes, wake 'em up. 428 * 429 */ 430 void svc_xprt_enqueue(struct svc_xprt *xprt) 431 { 432 if (test_bit(XPT_BUSY, &xprt->xpt_flags)) 433 return; 434 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt); 435 } 436 EXPORT_SYMBOL_GPL(svc_xprt_enqueue); 437 438 /* 439 * Dequeue the first transport, if there is one. 440 */ 441 static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) 442 { 443 struct svc_xprt *xprt = NULL; 444 445 if (list_empty(&pool->sp_sockets)) 446 goto out; 447 448 spin_lock_bh(&pool->sp_lock); 449 if (likely(!list_empty(&pool->sp_sockets))) { 450 xprt = list_first_entry(&pool->sp_sockets, 451 struct svc_xprt, xpt_ready); 452 list_del_init(&xprt->xpt_ready); 453 svc_xprt_get(xprt); 454 } 455 spin_unlock_bh(&pool->sp_lock); 456 out: 457 return xprt; 458 } 459 460 /** 461 * svc_reserve - change the space reserved for the reply to a request. 462 * @rqstp: The request in question 463 * @space: new max space to reserve 464 * 465 * Each request reserves some space on the output queue of the transport 466 * to make sure the reply fits. This function reduces that reserved 467 * space to be the amount of space used already, plus @space. 468 * 469 */ 470 void svc_reserve(struct svc_rqst *rqstp, int space) 471 { 472 space += rqstp->rq_res.head[0].iov_len; 473 474 if (space < rqstp->rq_reserved) { 475 struct svc_xprt *xprt = rqstp->rq_xprt; 476 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); 477 rqstp->rq_reserved = space; 478 479 svc_xprt_enqueue(xprt); 480 } 481 } 482 EXPORT_SYMBOL_GPL(svc_reserve); 483 484 static void svc_xprt_release(struct svc_rqst *rqstp) 485 { 486 struct svc_xprt *xprt = rqstp->rq_xprt; 487 488 xprt->xpt_ops->xpo_release_rqst(rqstp); 489 490 kfree(rqstp->rq_deferred); 491 rqstp->rq_deferred = NULL; 492 493 svc_free_res_pages(rqstp); 494 rqstp->rq_res.page_len = 0; 495 rqstp->rq_res.page_base = 0; 496 497 /* Reset response buffer and release 498 * the reservation. 499 * But first, check that enough space was reserved 500 * for the reply, otherwise we have a bug! 501 */ 502 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 503 printk(KERN_ERR "RPC request reserved %d but used %d\n", 504 rqstp->rq_reserved, 505 rqstp->rq_res.len); 506 507 rqstp->rq_res.head[0].iov_len = 0; 508 svc_reserve(rqstp, 0); 509 svc_xprt_release_slot(rqstp); 510 rqstp->rq_xprt = NULL; 511 svc_xprt_put(xprt); 512 } 513 514 /* 515 * Some svc_serv's will have occasional work to do, even when a xprt is not 516 * waiting to be serviced. This function is there to "kick" a task in one of 517 * those services so that it can wake up and do that work. Note that we only 518 * bother with pool 0 as we don't need to wake up more than one thread for 519 * this purpose. 520 */ 521 void svc_wake_up(struct svc_serv *serv) 522 { 523 struct svc_rqst *rqstp; 524 struct svc_pool *pool; 525 526 pool = &serv->sv_pools[0]; 527 528 rcu_read_lock(); 529 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 530 /* skip any that aren't queued */ 531 if (test_bit(RQ_BUSY, &rqstp->rq_flags)) 532 continue; 533 rcu_read_unlock(); 534 wake_up_process(rqstp->rq_task); 535 trace_svc_wake_up(rqstp->rq_task->pid); 536 return; 537 } 538 rcu_read_unlock(); 539 540 /* No free entries available */ 541 set_bit(SP_TASK_PENDING, &pool->sp_flags); 542 smp_wmb(); 543 trace_svc_wake_up(0); 544 } 545 EXPORT_SYMBOL_GPL(svc_wake_up); 546 547 int svc_port_is_privileged(struct sockaddr *sin) 548 { 549 switch (sin->sa_family) { 550 case AF_INET: 551 return ntohs(((struct sockaddr_in *)sin)->sin_port) 552 < PROT_SOCK; 553 case AF_INET6: 554 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 555 < PROT_SOCK; 556 default: 557 return 0; 558 } 559 } 560 561 /* 562 * Make sure that we don't have too many active connections. If we have, 563 * something must be dropped. It's not clear what will happen if we allow 564 * "too many" connections, but when dealing with network-facing software, 565 * we have to code defensively. Here we do that by imposing hard limits. 566 * 567 * There's no point in trying to do random drop here for DoS 568 * prevention. The NFS clients does 1 reconnect in 15 seconds. An 569 * attacker can easily beat that. 570 * 571 * The only somewhat efficient mechanism would be if drop old 572 * connections from the same IP first. But right now we don't even 573 * record the client IP in svc_sock. 574 * 575 * single-threaded services that expect a lot of clients will probably 576 * need to set sv_maxconn to override the default value which is based 577 * on the number of threads 578 */ 579 static void svc_check_conn_limits(struct svc_serv *serv) 580 { 581 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : 582 (serv->sv_nrthreads+3) * 20; 583 584 if (serv->sv_tmpcnt > limit) { 585 struct svc_xprt *xprt = NULL; 586 spin_lock_bh(&serv->sv_lock); 587 if (!list_empty(&serv->sv_tempsocks)) { 588 /* Try to help the admin */ 589 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", 590 serv->sv_name, serv->sv_maxconn ? 591 "max number of connections" : 592 "number of threads"); 593 /* 594 * Always select the oldest connection. It's not fair, 595 * but so is life 596 */ 597 xprt = list_entry(serv->sv_tempsocks.prev, 598 struct svc_xprt, 599 xpt_list); 600 set_bit(XPT_CLOSE, &xprt->xpt_flags); 601 svc_xprt_get(xprt); 602 } 603 spin_unlock_bh(&serv->sv_lock); 604 605 if (xprt) { 606 svc_xprt_enqueue(xprt); 607 svc_xprt_put(xprt); 608 } 609 } 610 } 611 612 static int svc_alloc_arg(struct svc_rqst *rqstp) 613 { 614 struct svc_serv *serv = rqstp->rq_server; 615 struct xdr_buf *arg; 616 int pages; 617 int i; 618 619 /* now allocate needed pages. If we get a failure, sleep briefly */ 620 pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; 621 if (pages > RPCSVC_MAXPAGES) { 622 pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", 623 pages, RPCSVC_MAXPAGES); 624 /* use as many pages as possible */ 625 pages = RPCSVC_MAXPAGES; 626 } 627 for (i = 0; i < pages ; i++) 628 while (rqstp->rq_pages[i] == NULL) { 629 struct page *p = alloc_page(GFP_KERNEL); 630 if (!p) { 631 set_current_state(TASK_INTERRUPTIBLE); 632 if (signalled() || kthread_should_stop()) { 633 set_current_state(TASK_RUNNING); 634 return -EINTR; 635 } 636 schedule_timeout(msecs_to_jiffies(500)); 637 } 638 rqstp->rq_pages[i] = p; 639 } 640 rqstp->rq_page_end = &rqstp->rq_pages[i]; 641 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 642 643 /* Make arg->head point to first page and arg->pages point to rest */ 644 arg = &rqstp->rq_arg; 645 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 646 arg->head[0].iov_len = PAGE_SIZE; 647 arg->pages = rqstp->rq_pages + 1; 648 arg->page_base = 0; 649 /* save at least one page for response */ 650 arg->page_len = (pages-2)*PAGE_SIZE; 651 arg->len = (pages-1)*PAGE_SIZE; 652 arg->tail[0].iov_len = 0; 653 return 0; 654 } 655 656 static bool 657 rqst_should_sleep(struct svc_rqst *rqstp) 658 { 659 struct svc_pool *pool = rqstp->rq_pool; 660 661 /* did someone call svc_wake_up? */ 662 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) 663 return false; 664 665 /* was a socket queued? */ 666 if (!list_empty(&pool->sp_sockets)) 667 return false; 668 669 /* are we shutting down? */ 670 if (signalled() || kthread_should_stop()) 671 return false; 672 673 /* are we freezing? */ 674 if (freezing(current)) 675 return false; 676 677 return true; 678 } 679 680 static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 681 { 682 struct svc_pool *pool = rqstp->rq_pool; 683 long time_left = 0; 684 685 /* rq_xprt should be clear on entry */ 686 WARN_ON_ONCE(rqstp->rq_xprt); 687 688 rqstp->rq_xprt = svc_xprt_dequeue(pool); 689 if (rqstp->rq_xprt) 690 goto out_found; 691 692 /* 693 * We have to be able to interrupt this wait 694 * to bring down the daemons ... 695 */ 696 set_current_state(TASK_INTERRUPTIBLE); 697 smp_mb__before_atomic(); 698 clear_bit(SP_CONGESTED, &pool->sp_flags); 699 clear_bit(RQ_BUSY, &rqstp->rq_flags); 700 smp_mb__after_atomic(); 701 702 if (likely(rqst_should_sleep(rqstp))) 703 time_left = schedule_timeout(timeout); 704 else 705 __set_current_state(TASK_RUNNING); 706 707 try_to_freeze(); 708 709 set_bit(RQ_BUSY, &rqstp->rq_flags); 710 smp_mb__after_atomic(); 711 rqstp->rq_xprt = svc_xprt_dequeue(pool); 712 if (rqstp->rq_xprt) 713 goto out_found; 714 715 if (!time_left) 716 atomic_long_inc(&pool->sp_stats.threads_timedout); 717 718 if (signalled() || kthread_should_stop()) 719 return ERR_PTR(-EINTR); 720 return ERR_PTR(-EAGAIN); 721 out_found: 722 /* Normally we will wait up to 5 seconds for any required 723 * cache information to be provided. 724 */ 725 if (!test_bit(SP_CONGESTED, &pool->sp_flags)) 726 rqstp->rq_chandle.thread_wait = 5*HZ; 727 else 728 rqstp->rq_chandle.thread_wait = 1*HZ; 729 trace_svc_xprt_dequeue(rqstp); 730 return rqstp->rq_xprt; 731 } 732 733 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 734 { 735 spin_lock_bh(&serv->sv_lock); 736 set_bit(XPT_TEMP, &newxpt->xpt_flags); 737 list_add(&newxpt->xpt_list, &serv->sv_tempsocks); 738 serv->sv_tmpcnt++; 739 if (serv->sv_temptimer.function == NULL) { 740 /* setup timer to age temp transports */ 741 serv->sv_temptimer.function = svc_age_temp_xprts; 742 mod_timer(&serv->sv_temptimer, 743 jiffies + svc_conn_age_period * HZ); 744 } 745 spin_unlock_bh(&serv->sv_lock); 746 svc_xprt_received(newxpt); 747 } 748 749 static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) 750 { 751 struct svc_serv *serv = rqstp->rq_server; 752 int len = 0; 753 754 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 755 dprintk("svc_recv: found XPT_CLOSE\n"); 756 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) 757 xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 758 svc_delete_xprt(xprt); 759 /* Leave XPT_BUSY set on the dead xprt: */ 760 goto out; 761 } 762 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 763 struct svc_xprt *newxpt; 764 /* 765 * We know this module_get will succeed because the 766 * listener holds a reference too 767 */ 768 __module_get(xprt->xpt_class->xcl_owner); 769 svc_check_conn_limits(xprt->xpt_server); 770 newxpt = xprt->xpt_ops->xpo_accept(xprt); 771 if (newxpt) 772 svc_add_new_temp_xprt(serv, newxpt); 773 else 774 module_put(xprt->xpt_class->xcl_owner); 775 } else if (svc_xprt_reserve_slot(rqstp, xprt)) { 776 /* XPT_DATA|XPT_DEFERRED case: */ 777 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 778 rqstp, rqstp->rq_pool->sp_id, xprt, 779 kref_read(&xprt->xpt_ref)); 780 rqstp->rq_deferred = svc_deferred_dequeue(xprt); 781 if (rqstp->rq_deferred) 782 len = svc_deferred_recv(rqstp); 783 else 784 len = xprt->xpt_ops->xpo_recvfrom(rqstp); 785 rqstp->rq_stime = ktime_get(); 786 rqstp->rq_reserved = serv->sv_max_mesg; 787 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 788 } 789 /* clear XPT_BUSY: */ 790 svc_xprt_received(xprt); 791 out: 792 trace_svc_handle_xprt(xprt, len); 793 return len; 794 } 795 796 /* 797 * Receive the next request on any transport. This code is carefully 798 * organised not to touch any cachelines in the shared svc_serv 799 * structure, only cachelines in the local svc_pool. 800 */ 801 int svc_recv(struct svc_rqst *rqstp, long timeout) 802 { 803 struct svc_xprt *xprt = NULL; 804 struct svc_serv *serv = rqstp->rq_server; 805 int len, err; 806 807 dprintk("svc: server %p waiting for data (to = %ld)\n", 808 rqstp, timeout); 809 810 if (rqstp->rq_xprt) 811 printk(KERN_ERR 812 "svc_recv: service %p, transport not NULL!\n", 813 rqstp); 814 815 err = svc_alloc_arg(rqstp); 816 if (err) 817 goto out; 818 819 try_to_freeze(); 820 cond_resched(); 821 err = -EINTR; 822 if (signalled() || kthread_should_stop()) 823 goto out; 824 825 xprt = svc_get_next_xprt(rqstp, timeout); 826 if (IS_ERR(xprt)) { 827 err = PTR_ERR(xprt); 828 goto out; 829 } 830 831 len = svc_handle_xprt(rqstp, xprt); 832 833 /* No data, incomplete (TCP) read, or accept() */ 834 err = -EAGAIN; 835 if (len <= 0) 836 goto out_release; 837 838 clear_bit(XPT_OLD, &xprt->xpt_flags); 839 840 xprt->xpt_ops->xpo_secure_port(rqstp); 841 rqstp->rq_chandle.defer = svc_defer; 842 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); 843 844 if (serv->sv_stats) 845 serv->sv_stats->netcnt++; 846 trace_svc_recv(rqstp, len); 847 return len; 848 out_release: 849 rqstp->rq_res.len = 0; 850 svc_xprt_release(rqstp); 851 out: 852 return err; 853 } 854 EXPORT_SYMBOL_GPL(svc_recv); 855 856 /* 857 * Drop request 858 */ 859 void svc_drop(struct svc_rqst *rqstp) 860 { 861 trace_svc_drop(rqstp); 862 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); 863 svc_xprt_release(rqstp); 864 } 865 EXPORT_SYMBOL_GPL(svc_drop); 866 867 /* 868 * Return reply to client. 869 */ 870 int svc_send(struct svc_rqst *rqstp) 871 { 872 struct svc_xprt *xprt; 873 int len = -EFAULT; 874 struct xdr_buf *xb; 875 876 xprt = rqstp->rq_xprt; 877 if (!xprt) 878 goto out; 879 880 /* release the receive skb before sending the reply */ 881 xprt->xpt_ops->xpo_release_rqst(rqstp); 882 883 /* calculate over-all length */ 884 xb = &rqstp->rq_res; 885 xb->len = xb->head[0].iov_len + 886 xb->page_len + 887 xb->tail[0].iov_len; 888 889 /* Grab mutex to serialize outgoing data. */ 890 mutex_lock(&xprt->xpt_mutex); 891 trace_svc_stats_latency(rqstp); 892 if (test_bit(XPT_DEAD, &xprt->xpt_flags) 893 || test_bit(XPT_CLOSE, &xprt->xpt_flags)) 894 len = -ENOTCONN; 895 else 896 len = xprt->xpt_ops->xpo_sendto(rqstp); 897 mutex_unlock(&xprt->xpt_mutex); 898 rpc_wake_up(&xprt->xpt_bc_pending); 899 trace_svc_send(rqstp, len); 900 svc_xprt_release(rqstp); 901 902 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 903 len = 0; 904 out: 905 return len; 906 } 907 908 /* 909 * Timer function to close old temporary transports, using 910 * a mark-and-sweep algorithm. 911 */ 912 static void svc_age_temp_xprts(struct timer_list *t) 913 { 914 struct svc_serv *serv = from_timer(serv, t, sv_temptimer); 915 struct svc_xprt *xprt; 916 struct list_head *le, *next; 917 918 dprintk("svc_age_temp_xprts\n"); 919 920 if (!spin_trylock_bh(&serv->sv_lock)) { 921 /* busy, try again 1 sec later */ 922 dprintk("svc_age_temp_xprts: busy\n"); 923 mod_timer(&serv->sv_temptimer, jiffies + HZ); 924 return; 925 } 926 927 list_for_each_safe(le, next, &serv->sv_tempsocks) { 928 xprt = list_entry(le, struct svc_xprt, xpt_list); 929 930 /* First time through, just mark it OLD. Second time 931 * through, close it. */ 932 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) 933 continue; 934 if (kref_read(&xprt->xpt_ref) > 1 || 935 test_bit(XPT_BUSY, &xprt->xpt_flags)) 936 continue; 937 list_del_init(le); 938 set_bit(XPT_CLOSE, &xprt->xpt_flags); 939 dprintk("queuing xprt %p for closing\n", xprt); 940 941 /* a thread will dequeue and close it soon */ 942 svc_xprt_enqueue(xprt); 943 } 944 spin_unlock_bh(&serv->sv_lock); 945 946 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 947 } 948 949 /* Close temporary transports whose xpt_local matches server_addr immediately 950 * instead of waiting for them to be picked up by the timer. 951 * 952 * This is meant to be called from a notifier_block that runs when an ip 953 * address is deleted. 954 */ 955 void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 956 { 957 struct svc_xprt *xprt; 958 struct list_head *le, *next; 959 LIST_HEAD(to_be_closed); 960 961 spin_lock_bh(&serv->sv_lock); 962 list_for_each_safe(le, next, &serv->sv_tempsocks) { 963 xprt = list_entry(le, struct svc_xprt, xpt_list); 964 if (rpc_cmp_addr(server_addr, (struct sockaddr *) 965 &xprt->xpt_local)) { 966 dprintk("svc_age_temp_xprts_now: found %p\n", xprt); 967 list_move(le, &to_be_closed); 968 } 969 } 970 spin_unlock_bh(&serv->sv_lock); 971 972 while (!list_empty(&to_be_closed)) { 973 le = to_be_closed.next; 974 list_del_init(le); 975 xprt = list_entry(le, struct svc_xprt, xpt_list); 976 set_bit(XPT_CLOSE, &xprt->xpt_flags); 977 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags); 978 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n", 979 xprt); 980 svc_xprt_enqueue(xprt); 981 } 982 } 983 EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); 984 985 static void call_xpt_users(struct svc_xprt *xprt) 986 { 987 struct svc_xpt_user *u; 988 989 spin_lock(&xprt->xpt_lock); 990 while (!list_empty(&xprt->xpt_users)) { 991 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); 992 list_del(&u->list); 993 u->callback(u); 994 } 995 spin_unlock(&xprt->xpt_lock); 996 } 997 998 /* 999 * Remove a dead transport 1000 */ 1001 static void svc_delete_xprt(struct svc_xprt *xprt) 1002 { 1003 struct svc_serv *serv = xprt->xpt_server; 1004 struct svc_deferred_req *dr; 1005 1006 /* Only do this once */ 1007 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) 1008 BUG(); 1009 1010 dprintk("svc: svc_delete_xprt(%p)\n", xprt); 1011 xprt->xpt_ops->xpo_detach(xprt); 1012 1013 spin_lock_bh(&serv->sv_lock); 1014 list_del_init(&xprt->xpt_list); 1015 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); 1016 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 1017 serv->sv_tmpcnt--; 1018 spin_unlock_bh(&serv->sv_lock); 1019 1020 while ((dr = svc_deferred_dequeue(xprt)) != NULL) 1021 kfree(dr); 1022 1023 call_xpt_users(xprt); 1024 svc_xprt_put(xprt); 1025 } 1026 1027 void svc_close_xprt(struct svc_xprt *xprt) 1028 { 1029 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1030 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) 1031 /* someone else will have to effect the close */ 1032 return; 1033 /* 1034 * We expect svc_close_xprt() to work even when no threads are 1035 * running (e.g., while configuring the server before starting 1036 * any threads), so if the transport isn't busy, we delete 1037 * it ourself: 1038 */ 1039 svc_delete_xprt(xprt); 1040 } 1041 EXPORT_SYMBOL_GPL(svc_close_xprt); 1042 1043 static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 1044 { 1045 struct svc_xprt *xprt; 1046 int ret = 0; 1047 1048 spin_lock(&serv->sv_lock); 1049 list_for_each_entry(xprt, xprt_list, xpt_list) { 1050 if (xprt->xpt_net != net) 1051 continue; 1052 ret++; 1053 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1054 svc_xprt_enqueue(xprt); 1055 } 1056 spin_unlock(&serv->sv_lock); 1057 return ret; 1058 } 1059 1060 static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) 1061 { 1062 struct svc_pool *pool; 1063 struct svc_xprt *xprt; 1064 struct svc_xprt *tmp; 1065 int i; 1066 1067 for (i = 0; i < serv->sv_nrpools; i++) { 1068 pool = &serv->sv_pools[i]; 1069 1070 spin_lock_bh(&pool->sp_lock); 1071 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { 1072 if (xprt->xpt_net != net) 1073 continue; 1074 list_del_init(&xprt->xpt_ready); 1075 spin_unlock_bh(&pool->sp_lock); 1076 return xprt; 1077 } 1078 spin_unlock_bh(&pool->sp_lock); 1079 } 1080 return NULL; 1081 } 1082 1083 static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) 1084 { 1085 struct svc_xprt *xprt; 1086 1087 while ((xprt = svc_dequeue_net(serv, net))) { 1088 set_bit(XPT_CLOSE, &xprt->xpt_flags); 1089 svc_delete_xprt(xprt); 1090 } 1091 } 1092 1093 /* 1094 * Server threads may still be running (especially in the case where the 1095 * service is still running in other network namespaces). 1096 * 1097 * So we shut down sockets the same way we would on a running server, by 1098 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do 1099 * the close. In the case there are no such other threads, 1100 * threads running, svc_clean_up_xprts() does a simple version of a 1101 * server's main event loop, and in the case where there are other 1102 * threads, we may need to wait a little while and then check again to 1103 * see if they're done. 1104 */ 1105 void svc_close_net(struct svc_serv *serv, struct net *net) 1106 { 1107 int delay = 0; 1108 1109 while (svc_close_list(serv, &serv->sv_permsocks, net) + 1110 svc_close_list(serv, &serv->sv_tempsocks, net)) { 1111 1112 svc_clean_up_xprts(serv, net); 1113 msleep(delay++); 1114 } 1115 } 1116 1117 /* 1118 * Handle defer and revisit of requests 1119 */ 1120 1121 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1122 { 1123 struct svc_deferred_req *dr = 1124 container_of(dreq, struct svc_deferred_req, handle); 1125 struct svc_xprt *xprt = dr->xprt; 1126 1127 spin_lock(&xprt->xpt_lock); 1128 set_bit(XPT_DEFERRED, &xprt->xpt_flags); 1129 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { 1130 spin_unlock(&xprt->xpt_lock); 1131 dprintk("revisit canceled\n"); 1132 svc_xprt_put(xprt); 1133 trace_svc_drop_deferred(dr); 1134 kfree(dr); 1135 return; 1136 } 1137 dprintk("revisit queued\n"); 1138 dr->xprt = NULL; 1139 list_add(&dr->handle.recent, &xprt->xpt_deferred); 1140 spin_unlock(&xprt->xpt_lock); 1141 svc_xprt_enqueue(xprt); 1142 svc_xprt_put(xprt); 1143 } 1144 1145 /* 1146 * Save the request off for later processing. The request buffer looks 1147 * like this: 1148 * 1149 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> 1150 * 1151 * This code can only handle requests that consist of an xprt-header 1152 * and rpc-header. 1153 */ 1154 static struct cache_deferred_req *svc_defer(struct cache_req *req) 1155 { 1156 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1157 struct svc_deferred_req *dr; 1158 1159 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags)) 1160 return NULL; /* if more than a page, give up FIXME */ 1161 if (rqstp->rq_deferred) { 1162 dr = rqstp->rq_deferred; 1163 rqstp->rq_deferred = NULL; 1164 } else { 1165 size_t skip; 1166 size_t size; 1167 /* FIXME maybe discard if size too large */ 1168 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; 1169 dr = kmalloc(size, GFP_KERNEL); 1170 if (dr == NULL) 1171 return NULL; 1172 1173 dr->handle.owner = rqstp->rq_server; 1174 dr->prot = rqstp->rq_prot; 1175 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1176 dr->addrlen = rqstp->rq_addrlen; 1177 dr->daddr = rqstp->rq_daddr; 1178 dr->argslen = rqstp->rq_arg.len >> 2; 1179 dr->xprt_hlen = rqstp->rq_xprt_hlen; 1180 1181 /* back up head to the start of the buffer and copy */ 1182 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1183 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, 1184 dr->argslen << 2); 1185 } 1186 svc_xprt_get(rqstp->rq_xprt); 1187 dr->xprt = rqstp->rq_xprt; 1188 set_bit(RQ_DROPME, &rqstp->rq_flags); 1189 1190 dr->handle.revisit = svc_revisit; 1191 trace_svc_defer(rqstp); 1192 return &dr->handle; 1193 } 1194 1195 /* 1196 * recv data from a deferred request into an active one 1197 */ 1198 static int svc_deferred_recv(struct svc_rqst *rqstp) 1199 { 1200 struct svc_deferred_req *dr = rqstp->rq_deferred; 1201 1202 /* setup iov_base past transport header */ 1203 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); 1204 /* The iov_len does not include the transport header bytes */ 1205 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; 1206 rqstp->rq_arg.page_len = 0; 1207 /* The rq_arg.len includes the transport header bytes */ 1208 rqstp->rq_arg.len = dr->argslen<<2; 1209 rqstp->rq_prot = dr->prot; 1210 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1211 rqstp->rq_addrlen = dr->addrlen; 1212 /* Save off transport header len in case we get deferred again */ 1213 rqstp->rq_xprt_hlen = dr->xprt_hlen; 1214 rqstp->rq_daddr = dr->daddr; 1215 rqstp->rq_respages = rqstp->rq_pages; 1216 return (dr->argslen<<2) - dr->xprt_hlen; 1217 } 1218 1219 1220 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) 1221 { 1222 struct svc_deferred_req *dr = NULL; 1223 1224 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) 1225 return NULL; 1226 spin_lock(&xprt->xpt_lock); 1227 if (!list_empty(&xprt->xpt_deferred)) { 1228 dr = list_entry(xprt->xpt_deferred.next, 1229 struct svc_deferred_req, 1230 handle.recent); 1231 list_del_init(&dr->handle.recent); 1232 trace_svc_revisit_deferred(dr); 1233 } else 1234 clear_bit(XPT_DEFERRED, &xprt->xpt_flags); 1235 spin_unlock(&xprt->xpt_lock); 1236 return dr; 1237 } 1238 1239 /** 1240 * svc_find_xprt - find an RPC transport instance 1241 * @serv: pointer to svc_serv to search 1242 * @xcl_name: C string containing transport's class name 1243 * @net: owner net pointer 1244 * @af: Address family of transport's local address 1245 * @port: transport's IP port number 1246 * 1247 * Return the transport instance pointer for the endpoint accepting 1248 * connections/peer traffic from the specified transport class, 1249 * address family and port. 1250 * 1251 * Specifying 0 for the address family or port is effectively a 1252 * wild-card, and will result in matching the first transport in the 1253 * service's list that has a matching class name. 1254 */ 1255 struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, 1256 struct net *net, const sa_family_t af, 1257 const unsigned short port) 1258 { 1259 struct svc_xprt *xprt; 1260 struct svc_xprt *found = NULL; 1261 1262 /* Sanity check the args */ 1263 if (serv == NULL || xcl_name == NULL) 1264 return found; 1265 1266 spin_lock_bh(&serv->sv_lock); 1267 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1268 if (xprt->xpt_net != net) 1269 continue; 1270 if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) 1271 continue; 1272 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) 1273 continue; 1274 if (port != 0 && port != svc_xprt_local_port(xprt)) 1275 continue; 1276 found = xprt; 1277 svc_xprt_get(xprt); 1278 break; 1279 } 1280 spin_unlock_bh(&serv->sv_lock); 1281 return found; 1282 } 1283 EXPORT_SYMBOL_GPL(svc_find_xprt); 1284 1285 static int svc_one_xprt_name(const struct svc_xprt *xprt, 1286 char *pos, int remaining) 1287 { 1288 int len; 1289 1290 len = snprintf(pos, remaining, "%s %u\n", 1291 xprt->xpt_class->xcl_name, 1292 svc_xprt_local_port(xprt)); 1293 if (len >= remaining) 1294 return -ENAMETOOLONG; 1295 return len; 1296 } 1297 1298 /** 1299 * svc_xprt_names - format a buffer with a list of transport names 1300 * @serv: pointer to an RPC service 1301 * @buf: pointer to a buffer to be filled in 1302 * @buflen: length of buffer to be filled in 1303 * 1304 * Fills in @buf with a string containing a list of transport names, 1305 * each name terminated with '\n'. 1306 * 1307 * Returns positive length of the filled-in string on success; otherwise 1308 * a negative errno value is returned if an error occurs. 1309 */ 1310 int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) 1311 { 1312 struct svc_xprt *xprt; 1313 int len, totlen; 1314 char *pos; 1315 1316 /* Sanity check args */ 1317 if (!serv) 1318 return 0; 1319 1320 spin_lock_bh(&serv->sv_lock); 1321 1322 pos = buf; 1323 totlen = 0; 1324 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { 1325 len = svc_one_xprt_name(xprt, pos, buflen - totlen); 1326 if (len < 0) { 1327 *buf = '\0'; 1328 totlen = len; 1329 } 1330 if (len <= 0) 1331 break; 1332 1333 pos += len; 1334 totlen += len; 1335 } 1336 1337 spin_unlock_bh(&serv->sv_lock); 1338 return totlen; 1339 } 1340 EXPORT_SYMBOL_GPL(svc_xprt_names); 1341 1342 1343 /*----------------------------------------------------------------------------*/ 1344 1345 static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) 1346 { 1347 unsigned int pidx = (unsigned int)*pos; 1348 struct svc_serv *serv = m->private; 1349 1350 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); 1351 1352 if (!pidx) 1353 return SEQ_START_TOKEN; 1354 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); 1355 } 1356 1357 static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) 1358 { 1359 struct svc_pool *pool = p; 1360 struct svc_serv *serv = m->private; 1361 1362 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); 1363 1364 if (p == SEQ_START_TOKEN) { 1365 pool = &serv->sv_pools[0]; 1366 } else { 1367 unsigned int pidx = (pool - &serv->sv_pools[0]); 1368 if (pidx < serv->sv_nrpools-1) 1369 pool = &serv->sv_pools[pidx+1]; 1370 else 1371 pool = NULL; 1372 } 1373 ++*pos; 1374 return pool; 1375 } 1376 1377 static void svc_pool_stats_stop(struct seq_file *m, void *p) 1378 { 1379 } 1380 1381 static int svc_pool_stats_show(struct seq_file *m, void *p) 1382 { 1383 struct svc_pool *pool = p; 1384 1385 if (p == SEQ_START_TOKEN) { 1386 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); 1387 return 0; 1388 } 1389 1390 seq_printf(m, "%u %lu %lu %lu %lu\n", 1391 pool->sp_id, 1392 (unsigned long)atomic_long_read(&pool->sp_stats.packets), 1393 pool->sp_stats.sockets_queued, 1394 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), 1395 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); 1396 1397 return 0; 1398 } 1399 1400 static const struct seq_operations svc_pool_stats_seq_ops = { 1401 .start = svc_pool_stats_start, 1402 .next = svc_pool_stats_next, 1403 .stop = svc_pool_stats_stop, 1404 .show = svc_pool_stats_show, 1405 }; 1406 1407 int svc_pool_stats_open(struct svc_serv *serv, struct file *file) 1408 { 1409 int err; 1410 1411 err = seq_open(file, &svc_pool_stats_seq_ops); 1412 if (!err) 1413 ((struct seq_file *) file->private_data)->private = serv; 1414 return err; 1415 } 1416 EXPORT_SYMBOL(svc_pool_stats_open); 1417 1418 /*----------------------------------------------------------------------------*/ 1419