1 /* 2 * linux/net/sunrpc/sched.c 3 * 4 * Scheduling for synchronous and asynchronous RPC requests. 5 * 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 7 * 8 * TCP NFS related read + write fixes 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 10 */ 11 12 #include <linux/module.h> 13 14 #include <linux/sched.h> 15 #include <linux/interrupt.h> 16 #include <linux/slab.h> 17 #include <linux/mempool.h> 18 #include <linux/smp.h> 19 #include <linux/spinlock.h> 20 #include <linux/mutex.h> 21 #include <linux/freezer.h> 22 23 #include <linux/sunrpc/clnt.h> 24 25 #include "sunrpc.h" 26 27 #ifdef RPC_DEBUG 28 #define RPCDBG_FACILITY RPCDBG_SCHED 29 #endif 30 31 #define CREATE_TRACE_POINTS 32 #include <trace/events/sunrpc.h> 33 34 /* 35 * RPC slabs and memory pools 36 */ 37 #define RPC_BUFFER_MAXSIZE (2048) 38 #define RPC_BUFFER_POOLSIZE (8) 39 #define RPC_TASK_POOLSIZE (8) 40 static struct kmem_cache *rpc_task_slabp __read_mostly; 41 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 42 static mempool_t *rpc_task_mempool __read_mostly; 43 static mempool_t *rpc_buffer_mempool __read_mostly; 44 45 static void rpc_async_schedule(struct work_struct *); 46 static void rpc_release_task(struct rpc_task *task); 47 static void __rpc_queue_timer_fn(unsigned long ptr); 48 49 /* 50 * RPC tasks sit here while waiting for conditions to improve. 51 */ 52 static struct rpc_wait_queue delay_queue; 53 54 /* 55 * rpciod-related stuff 56 */ 57 struct workqueue_struct *rpciod_workqueue; 58 59 /* 60 * Disable the timer for a given RPC task. Should be called with 61 * queue->lock and bh_disabled in order to avoid races within 62 * rpc_run_timer(). 63 */ 64 static void 65 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 66 { 67 if (task->tk_timeout == 0) 68 return; 69 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 70 task->tk_timeout = 0; 71 list_del(&task->u.tk_wait.timer_list); 72 if (list_empty(&queue->timer_list.list)) 73 del_timer(&queue->timer_list.timer); 74 } 75 76 static void 77 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 78 { 79 queue->timer_list.expires = expires; 80 mod_timer(&queue->timer_list.timer, expires); 81 } 82 83 /* 84 * Set up a timer for the current task. 85 */ 86 static void 87 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 88 { 89 if (!task->tk_timeout) 90 return; 91 92 dprintk("RPC: %5u setting alarm for %lu ms\n", 93 task->tk_pid, task->tk_timeout * 1000 / HZ); 94 95 task->u.tk_wait.expires = jiffies + task->tk_timeout; 96 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) 97 rpc_set_queue_timer(queue, task->u.tk_wait.expires); 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 99 } 100 101 static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) 102 { 103 struct list_head *q = &queue->tasks[queue->priority]; 104 struct rpc_task *task; 105 106 if (!list_empty(q)) { 107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 108 if (task->tk_owner == queue->owner) 109 list_move_tail(&task->u.tk_wait.list, q); 110 } 111 } 112 113 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 114 { 115 if (queue->priority != priority) { 116 /* Fairness: rotate the list when changing priority */ 117 rpc_rotate_queue_owner(queue); 118 queue->priority = priority; 119 } 120 } 121 122 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) 123 { 124 queue->owner = pid; 125 queue->nr = RPC_BATCH_COUNT; 126 } 127 128 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 129 { 130 rpc_set_waitqueue_priority(queue, queue->maxpriority); 131 rpc_set_waitqueue_owner(queue, 0); 132 } 133 134 /* 135 * Add new request to a priority queue. 136 */ 137 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 138 struct rpc_task *task, 139 unsigned char queue_priority) 140 { 141 struct list_head *q; 142 struct rpc_task *t; 143 144 INIT_LIST_HEAD(&task->u.tk_wait.links); 145 if (unlikely(queue_priority > queue->maxpriority)) 146 queue_priority = queue->maxpriority; 147 if (queue_priority > queue->priority) 148 rpc_set_waitqueue_priority(queue, queue_priority); 149 q = &queue->tasks[queue_priority]; 150 list_for_each_entry(t, q, u.tk_wait.list) { 151 if (t->tk_owner == task->tk_owner) { 152 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); 153 return; 154 } 155 } 156 list_add_tail(&task->u.tk_wait.list, q); 157 } 158 159 /* 160 * Add new request to wait queue. 161 * 162 * Swapper tasks always get inserted at the head of the queue. 163 * This should avoid many nasty memory deadlocks and hopefully 164 * improve overall performance. 165 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 166 */ 167 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 168 struct rpc_task *task, 169 unsigned char queue_priority) 170 { 171 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 172 if (RPC_IS_QUEUED(task)) 173 return; 174 175 if (RPC_IS_PRIORITY(queue)) 176 __rpc_add_wait_queue_priority(queue, task, queue_priority); 177 else if (RPC_IS_SWAPPER(task)) 178 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 179 else 180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 181 task->tk_waitqueue = queue; 182 queue->qlen++; 183 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 184 smp_wmb(); 185 rpc_set_queued(task); 186 187 dprintk("RPC: %5u added to queue %p \"%s\"\n", 188 task->tk_pid, queue, rpc_qname(queue)); 189 } 190 191 /* 192 * Remove request from a priority queue. 193 */ 194 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 195 { 196 struct rpc_task *t; 197 198 if (!list_empty(&task->u.tk_wait.links)) { 199 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); 200 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); 201 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); 202 } 203 } 204 205 /* 206 * Remove request from queue. 207 * Note: must be called with spin lock held. 208 */ 209 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 210 { 211 __rpc_disable_timer(queue, task); 212 if (RPC_IS_PRIORITY(queue)) 213 __rpc_remove_wait_queue_priority(task); 214 list_del(&task->u.tk_wait.list); 215 queue->qlen--; 216 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 217 task->tk_pid, queue, rpc_qname(queue)); 218 } 219 220 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 221 { 222 int i; 223 224 spin_lock_init(&queue->lock); 225 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 226 INIT_LIST_HEAD(&queue->tasks[i]); 227 queue->maxpriority = nr_queues - 1; 228 rpc_reset_waitqueue_priority(queue); 229 queue->qlen = 0; 230 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); 231 INIT_LIST_HEAD(&queue->timer_list.list); 232 rpc_assign_waitqueue_name(queue, qname); 233 } 234 235 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 236 { 237 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 238 } 239 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 240 241 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 242 { 243 __rpc_init_priority_wait_queue(queue, qname, 1); 244 } 245 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 246 247 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 248 { 249 del_timer_sync(&queue->timer_list.timer); 250 } 251 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 252 253 static int rpc_wait_bit_killable(void *word) 254 { 255 if (fatal_signal_pending(current)) 256 return -ERESTARTSYS; 257 freezable_schedule(); 258 return 0; 259 } 260 261 #ifdef RPC_DEBUG 262 static void rpc_task_set_debuginfo(struct rpc_task *task) 263 { 264 static atomic_t rpc_pid; 265 266 task->tk_pid = atomic_inc_return(&rpc_pid); 267 } 268 #else 269 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 270 { 271 } 272 #endif 273 274 static void rpc_set_active(struct rpc_task *task) 275 { 276 trace_rpc_task_begin(task->tk_client, task, NULL); 277 278 rpc_task_set_debuginfo(task); 279 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 280 } 281 282 /* 283 * Mark an RPC call as having completed by clearing the 'active' bit 284 * and then waking up all tasks that were sleeping. 285 */ 286 static int rpc_complete_task(struct rpc_task *task) 287 { 288 void *m = &task->tk_runstate; 289 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 290 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 291 unsigned long flags; 292 int ret; 293 294 trace_rpc_task_complete(task->tk_client, task, NULL); 295 296 spin_lock_irqsave(&wq->lock, flags); 297 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 298 ret = atomic_dec_and_test(&task->tk_count); 299 if (waitqueue_active(wq)) 300 __wake_up_locked_key(wq, TASK_NORMAL, &k); 301 spin_unlock_irqrestore(&wq->lock, flags); 302 return ret; 303 } 304 305 /* 306 * Allow callers to wait for completion of an RPC call 307 * 308 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 309 * to enforce taking of the wq->lock and hence avoid races with 310 * rpc_complete_task(). 311 */ 312 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 313 { 314 if (action == NULL) 315 action = rpc_wait_bit_killable; 316 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 317 action, TASK_KILLABLE); 318 } 319 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 320 321 /* 322 * Make an RPC task runnable. 323 * 324 * Note: If the task is ASYNC, and is being made runnable after sitting on an 325 * rpc_wait_queue, this must be called with the queue spinlock held to protect 326 * the wait queue operation. 327 */ 328 static void rpc_make_runnable(struct rpc_task *task) 329 { 330 rpc_clear_queued(task); 331 if (rpc_test_and_set_running(task)) 332 return; 333 if (RPC_IS_ASYNC(task)) { 334 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 335 queue_work(rpciod_workqueue, &task->u.tk_work); 336 } else 337 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 338 } 339 340 /* 341 * Prepare for sleeping on a wait queue. 342 * By always appending tasks to the list we ensure FIFO behavior. 343 * NB: An RPC task will only receive interrupt-driven events as long 344 * as it's on a wait queue. 345 */ 346 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 347 struct rpc_task *task, 348 rpc_action action, 349 unsigned char queue_priority) 350 { 351 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 352 task->tk_pid, rpc_qname(q), jiffies); 353 354 trace_rpc_task_sleep(task->tk_client, task, q); 355 356 __rpc_add_wait_queue(q, task, queue_priority); 357 358 WARN_ON_ONCE(task->tk_callback != NULL); 359 task->tk_callback = action; 360 __rpc_add_timer(q, task); 361 } 362 363 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 364 rpc_action action) 365 { 366 /* We shouldn't ever put an inactive task to sleep */ 367 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); 368 if (!RPC_IS_ACTIVATED(task)) { 369 task->tk_status = -EIO; 370 rpc_put_task_async(task); 371 return; 372 } 373 374 /* 375 * Protect the queue operations. 376 */ 377 spin_lock_bh(&q->lock); 378 __rpc_sleep_on_priority(q, task, action, task->tk_priority); 379 spin_unlock_bh(&q->lock); 380 } 381 EXPORT_SYMBOL_GPL(rpc_sleep_on); 382 383 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 384 rpc_action action, int priority) 385 { 386 /* We shouldn't ever put an inactive task to sleep */ 387 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); 388 if (!RPC_IS_ACTIVATED(task)) { 389 task->tk_status = -EIO; 390 rpc_put_task_async(task); 391 return; 392 } 393 394 /* 395 * Protect the queue operations. 396 */ 397 spin_lock_bh(&q->lock); 398 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); 399 spin_unlock_bh(&q->lock); 400 } 401 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 402 403 /** 404 * __rpc_do_wake_up_task - wake up a single rpc_task 405 * @queue: wait queue 406 * @task: task to be woken up 407 * 408 * Caller must hold queue->lock, and have cleared the task queued flag. 409 */ 410 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) 411 { 412 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 413 task->tk_pid, jiffies); 414 415 /* Has the task been executed yet? If not, we cannot wake it up! */ 416 if (!RPC_IS_ACTIVATED(task)) { 417 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 418 return; 419 } 420 421 trace_rpc_task_wakeup(task->tk_client, task, queue); 422 423 __rpc_remove_wait_queue(queue, task); 424 425 rpc_make_runnable(task); 426 427 dprintk("RPC: __rpc_wake_up_task done\n"); 428 } 429 430 /* 431 * Wake up a queued task while the queue lock is being held 432 */ 433 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) 434 { 435 if (RPC_IS_QUEUED(task)) { 436 smp_rmb(); 437 if (task->tk_waitqueue == queue) 438 __rpc_do_wake_up_task(queue, task); 439 } 440 } 441 442 /* 443 * Tests whether rpc queue is empty 444 */ 445 int rpc_queue_empty(struct rpc_wait_queue *queue) 446 { 447 int res; 448 449 spin_lock_bh(&queue->lock); 450 res = queue->qlen; 451 spin_unlock_bh(&queue->lock); 452 return res == 0; 453 } 454 EXPORT_SYMBOL_GPL(rpc_queue_empty); 455 456 /* 457 * Wake up a task on a specific queue 458 */ 459 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 460 { 461 spin_lock_bh(&queue->lock); 462 rpc_wake_up_task_queue_locked(queue, task); 463 spin_unlock_bh(&queue->lock); 464 } 465 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 466 467 /* 468 * Wake up the next task on a priority queue. 469 */ 470 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 471 { 472 struct list_head *q; 473 struct rpc_task *task; 474 475 /* 476 * Service a batch of tasks from a single owner. 477 */ 478 q = &queue->tasks[queue->priority]; 479 if (!list_empty(q)) { 480 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); 481 if (queue->owner == task->tk_owner) { 482 if (--queue->nr) 483 goto out; 484 list_move_tail(&task->u.tk_wait.list, q); 485 } 486 /* 487 * Check if we need to switch queues. 488 */ 489 goto new_owner; 490 } 491 492 /* 493 * Service the next queue. 494 */ 495 do { 496 if (q == &queue->tasks[0]) 497 q = &queue->tasks[queue->maxpriority]; 498 else 499 q = q - 1; 500 if (!list_empty(q)) { 501 task = list_entry(q->next, struct rpc_task, u.tk_wait.list); 502 goto new_queue; 503 } 504 } while (q != &queue->tasks[queue->priority]); 505 506 rpc_reset_waitqueue_priority(queue); 507 return NULL; 508 509 new_queue: 510 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 511 new_owner: 512 rpc_set_waitqueue_owner(queue, task->tk_owner); 513 out: 514 return task; 515 } 516 517 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 518 { 519 if (RPC_IS_PRIORITY(queue)) 520 return __rpc_find_next_queued_priority(queue); 521 if (!list_empty(&queue->tasks[0])) 522 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 523 return NULL; 524 } 525 526 /* 527 * Wake up the first task on the wait queue. 528 */ 529 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 530 bool (*func)(struct rpc_task *, void *), void *data) 531 { 532 struct rpc_task *task = NULL; 533 534 dprintk("RPC: wake_up_first(%p \"%s\")\n", 535 queue, rpc_qname(queue)); 536 spin_lock_bh(&queue->lock); 537 task = __rpc_find_next_queued(queue); 538 if (task != NULL) { 539 if (func(task, data)) 540 rpc_wake_up_task_queue_locked(queue, task); 541 else 542 task = NULL; 543 } 544 spin_unlock_bh(&queue->lock); 545 546 return task; 547 } 548 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 549 550 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 551 { 552 return true; 553 } 554 555 /* 556 * Wake up the next task on the wait queue. 557 */ 558 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 559 { 560 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 561 } 562 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 563 564 /** 565 * rpc_wake_up - wake up all rpc_tasks 566 * @queue: rpc_wait_queue on which the tasks are sleeping 567 * 568 * Grabs queue->lock 569 */ 570 void rpc_wake_up(struct rpc_wait_queue *queue) 571 { 572 struct list_head *head; 573 574 spin_lock_bh(&queue->lock); 575 head = &queue->tasks[queue->maxpriority]; 576 for (;;) { 577 while (!list_empty(head)) { 578 struct rpc_task *task; 579 task = list_first_entry(head, 580 struct rpc_task, 581 u.tk_wait.list); 582 rpc_wake_up_task_queue_locked(queue, task); 583 } 584 if (head == &queue->tasks[0]) 585 break; 586 head--; 587 } 588 spin_unlock_bh(&queue->lock); 589 } 590 EXPORT_SYMBOL_GPL(rpc_wake_up); 591 592 /** 593 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 594 * @queue: rpc_wait_queue on which the tasks are sleeping 595 * @status: status value to set 596 * 597 * Grabs queue->lock 598 */ 599 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 600 { 601 struct list_head *head; 602 603 spin_lock_bh(&queue->lock); 604 head = &queue->tasks[queue->maxpriority]; 605 for (;;) { 606 while (!list_empty(head)) { 607 struct rpc_task *task; 608 task = list_first_entry(head, 609 struct rpc_task, 610 u.tk_wait.list); 611 task->tk_status = status; 612 rpc_wake_up_task_queue_locked(queue, task); 613 } 614 if (head == &queue->tasks[0]) 615 break; 616 head--; 617 } 618 spin_unlock_bh(&queue->lock); 619 } 620 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 621 622 static void __rpc_queue_timer_fn(unsigned long ptr) 623 { 624 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; 625 struct rpc_task *task, *n; 626 unsigned long expires, now, timeo; 627 628 spin_lock(&queue->lock); 629 expires = now = jiffies; 630 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 631 timeo = task->u.tk_wait.expires; 632 if (time_after_eq(now, timeo)) { 633 dprintk("RPC: %5u timeout\n", task->tk_pid); 634 task->tk_status = -ETIMEDOUT; 635 rpc_wake_up_task_queue_locked(queue, task); 636 continue; 637 } 638 if (expires == now || time_after(expires, timeo)) 639 expires = timeo; 640 } 641 if (!list_empty(&queue->timer_list.list)) 642 rpc_set_queue_timer(queue, expires); 643 spin_unlock(&queue->lock); 644 } 645 646 static void __rpc_atrun(struct rpc_task *task) 647 { 648 task->tk_status = 0; 649 } 650 651 /* 652 * Run a task at a later time 653 */ 654 void rpc_delay(struct rpc_task *task, unsigned long delay) 655 { 656 task->tk_timeout = delay; 657 rpc_sleep_on(&delay_queue, task, __rpc_atrun); 658 } 659 EXPORT_SYMBOL_GPL(rpc_delay); 660 661 /* 662 * Helper to call task->tk_ops->rpc_call_prepare 663 */ 664 void rpc_prepare_task(struct rpc_task *task) 665 { 666 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 667 } 668 669 static void 670 rpc_init_task_statistics(struct rpc_task *task) 671 { 672 /* Initialize retry counters */ 673 task->tk_garb_retry = 2; 674 task->tk_cred_retry = 2; 675 task->tk_rebind_retry = 2; 676 677 /* starting timestamp */ 678 task->tk_start = ktime_get(); 679 } 680 681 static void 682 rpc_reset_task_statistics(struct rpc_task *task) 683 { 684 task->tk_timeouts = 0; 685 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); 686 687 rpc_init_task_statistics(task); 688 } 689 690 /* 691 * Helper that calls task->tk_ops->rpc_call_done if it exists 692 */ 693 void rpc_exit_task(struct rpc_task *task) 694 { 695 task->tk_action = NULL; 696 if (task->tk_ops->rpc_call_done != NULL) { 697 task->tk_ops->rpc_call_done(task, task->tk_calldata); 698 if (task->tk_action != NULL) { 699 WARN_ON(RPC_ASSASSINATED(task)); 700 /* Always release the RPC slot and buffer memory */ 701 xprt_release(task); 702 rpc_reset_task_statistics(task); 703 } 704 } 705 } 706 707 void rpc_exit(struct rpc_task *task, int status) 708 { 709 task->tk_status = status; 710 task->tk_action = rpc_exit_task; 711 if (RPC_IS_QUEUED(task)) 712 rpc_wake_up_queued_task(task->tk_waitqueue, task); 713 } 714 EXPORT_SYMBOL_GPL(rpc_exit); 715 716 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 717 { 718 if (ops->rpc_release != NULL) 719 ops->rpc_release(calldata); 720 } 721 722 /* 723 * This is the RPC `scheduler' (or rather, the finite state machine). 724 */ 725 static void __rpc_execute(struct rpc_task *task) 726 { 727 struct rpc_wait_queue *queue; 728 int task_is_async = RPC_IS_ASYNC(task); 729 int status = 0; 730 731 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 732 task->tk_pid, task->tk_flags); 733 734 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 735 if (RPC_IS_QUEUED(task)) 736 return; 737 738 for (;;) { 739 void (*do_action)(struct rpc_task *); 740 741 /* 742 * Execute any pending callback first. 743 */ 744 do_action = task->tk_callback; 745 task->tk_callback = NULL; 746 if (do_action == NULL) { 747 /* 748 * Perform the next FSM step. 749 * tk_action may be NULL if the task has been killed. 750 * In particular, note that rpc_killall_tasks may 751 * do this at any time, so beware when dereferencing. 752 */ 753 do_action = task->tk_action; 754 if (do_action == NULL) 755 break; 756 } 757 trace_rpc_task_run_action(task->tk_client, task, task->tk_action); 758 do_action(task); 759 760 /* 761 * Lockless check for whether task is sleeping or not. 762 */ 763 if (!RPC_IS_QUEUED(task)) 764 continue; 765 /* 766 * The queue->lock protects against races with 767 * rpc_make_runnable(). 768 * 769 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 770 * rpc_task, rpc_make_runnable() can assign it to a 771 * different workqueue. We therefore cannot assume that the 772 * rpc_task pointer may still be dereferenced. 773 */ 774 queue = task->tk_waitqueue; 775 spin_lock_bh(&queue->lock); 776 if (!RPC_IS_QUEUED(task)) { 777 spin_unlock_bh(&queue->lock); 778 continue; 779 } 780 rpc_clear_running(task); 781 spin_unlock_bh(&queue->lock); 782 if (task_is_async) 783 return; 784 785 /* sync task: sleep here */ 786 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 787 status = out_of_line_wait_on_bit(&task->tk_runstate, 788 RPC_TASK_QUEUED, rpc_wait_bit_killable, 789 TASK_KILLABLE); 790 if (status == -ERESTARTSYS) { 791 /* 792 * When a sync task receives a signal, it exits with 793 * -ERESTARTSYS. In order to catch any callbacks that 794 * clean up after sleeping on some queue, we don't 795 * break the loop here, but go around once more. 796 */ 797 dprintk("RPC: %5u got signal\n", task->tk_pid); 798 task->tk_flags |= RPC_TASK_KILLED; 799 rpc_exit(task, -ERESTARTSYS); 800 } 801 rpc_set_running(task); 802 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); 803 } 804 805 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, 806 task->tk_status); 807 /* Release all resources associated with the task */ 808 rpc_release_task(task); 809 } 810 811 /* 812 * User-visible entry point to the scheduler. 813 * 814 * This may be called recursively if e.g. an async NFS task updates 815 * the attributes and finds that dirty pages must be flushed. 816 * NOTE: Upon exit of this function the task is guaranteed to be 817 * released. In particular note that tk_release() will have 818 * been called, so your task memory may have been freed. 819 */ 820 void rpc_execute(struct rpc_task *task) 821 { 822 rpc_set_active(task); 823 rpc_make_runnable(task); 824 if (!RPC_IS_ASYNC(task)) 825 __rpc_execute(task); 826 } 827 828 static void rpc_async_schedule(struct work_struct *work) 829 { 830 current->flags |= PF_FSTRANS; 831 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 832 current->flags &= ~PF_FSTRANS; 833 } 834 835 /** 836 * rpc_malloc - allocate an RPC buffer 837 * @task: RPC task that will use this buffer 838 * @size: requested byte size 839 * 840 * To prevent rpciod from hanging, this allocator never sleeps, 841 * returning NULL if the request cannot be serviced immediately. 842 * The caller can arrange to sleep in a way that is safe for rpciod. 843 * 844 * Most requests are 'small' (under 2KiB) and can be serviced from a 845 * mempool, ensuring that NFS reads and writes can always proceed, 846 * and that there is good locality of reference for these buffers. 847 * 848 * In order to avoid memory starvation triggering more writebacks of 849 * NFS requests, we avoid using GFP_KERNEL. 850 */ 851 void *rpc_malloc(struct rpc_task *task, size_t size) 852 { 853 struct rpc_buffer *buf; 854 gfp_t gfp = GFP_NOWAIT; 855 856 if (RPC_IS_SWAPPER(task)) 857 gfp |= __GFP_MEMALLOC; 858 859 size += sizeof(struct rpc_buffer); 860 if (size <= RPC_BUFFER_MAXSIZE) 861 buf = mempool_alloc(rpc_buffer_mempool, gfp); 862 else 863 buf = kmalloc(size, gfp); 864 865 if (!buf) 866 return NULL; 867 868 buf->len = size; 869 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 870 task->tk_pid, size, buf); 871 return &buf->data; 872 } 873 EXPORT_SYMBOL_GPL(rpc_malloc); 874 875 /** 876 * rpc_free - free buffer allocated via rpc_malloc 877 * @buffer: buffer to free 878 * 879 */ 880 void rpc_free(void *buffer) 881 { 882 size_t size; 883 struct rpc_buffer *buf; 884 885 if (!buffer) 886 return; 887 888 buf = container_of(buffer, struct rpc_buffer, data); 889 size = buf->len; 890 891 dprintk("RPC: freeing buffer of size %zu at %p\n", 892 size, buf); 893 894 if (size <= RPC_BUFFER_MAXSIZE) 895 mempool_free(buf, rpc_buffer_mempool); 896 else 897 kfree(buf); 898 } 899 EXPORT_SYMBOL_GPL(rpc_free); 900 901 /* 902 * Creation and deletion of RPC task structures 903 */ 904 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 905 { 906 memset(task, 0, sizeof(*task)); 907 atomic_set(&task->tk_count, 1); 908 task->tk_flags = task_setup_data->flags; 909 task->tk_ops = task_setup_data->callback_ops; 910 task->tk_calldata = task_setup_data->callback_data; 911 INIT_LIST_HEAD(&task->tk_task); 912 913 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 914 task->tk_owner = current->tgid; 915 916 /* Initialize workqueue for async tasks */ 917 task->tk_workqueue = task_setup_data->workqueue; 918 919 if (task->tk_ops->rpc_call_prepare != NULL) 920 task->tk_action = rpc_prepare_task; 921 922 rpc_init_task_statistics(task); 923 924 dprintk("RPC: new task initialized, procpid %u\n", 925 task_pid_nr(current)); 926 } 927 928 static struct rpc_task * 929 rpc_alloc_task(void) 930 { 931 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); 932 } 933 934 /* 935 * Create a new task for the specified client. 936 */ 937 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 938 { 939 struct rpc_task *task = setup_data->task; 940 unsigned short flags = 0; 941 942 if (task == NULL) { 943 task = rpc_alloc_task(); 944 if (task == NULL) { 945 rpc_release_calldata(setup_data->callback_ops, 946 setup_data->callback_data); 947 return ERR_PTR(-ENOMEM); 948 } 949 flags = RPC_TASK_DYNAMIC; 950 } 951 952 rpc_init_task(task, setup_data); 953 task->tk_flags |= flags; 954 dprintk("RPC: allocated task %p\n", task); 955 return task; 956 } 957 958 /* 959 * rpc_free_task - release rpc task and perform cleanups 960 * 961 * Note that we free up the rpc_task _after_ rpc_release_calldata() 962 * in order to work around a workqueue dependency issue. 963 * 964 * Tejun Heo states: 965 * "Workqueue currently considers two work items to be the same if they're 966 * on the same address and won't execute them concurrently - ie. it 967 * makes a work item which is queued again while being executed wait 968 * for the previous execution to complete. 969 * 970 * If a work function frees the work item, and then waits for an event 971 * which should be performed by another work item and *that* work item 972 * recycles the freed work item, it can create a false dependency loop. 973 * There really is no reliable way to detect this short of verifying 974 * every memory free." 975 * 976 */ 977 static void rpc_free_task(struct rpc_task *task) 978 { 979 unsigned short tk_flags = task->tk_flags; 980 981 rpc_release_calldata(task->tk_ops, task->tk_calldata); 982 983 if (tk_flags & RPC_TASK_DYNAMIC) { 984 dprintk("RPC: %5u freeing task\n", task->tk_pid); 985 mempool_free(task, rpc_task_mempool); 986 } 987 } 988 989 static void rpc_async_release(struct work_struct *work) 990 { 991 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 992 } 993 994 static void rpc_release_resources_task(struct rpc_task *task) 995 { 996 xprt_release(task); 997 if (task->tk_msg.rpc_cred) { 998 put_rpccred(task->tk_msg.rpc_cred); 999 task->tk_msg.rpc_cred = NULL; 1000 } 1001 rpc_task_release_client(task); 1002 } 1003 1004 static void rpc_final_put_task(struct rpc_task *task, 1005 struct workqueue_struct *q) 1006 { 1007 if (q != NULL) { 1008 INIT_WORK(&task->u.tk_work, rpc_async_release); 1009 queue_work(q, &task->u.tk_work); 1010 } else 1011 rpc_free_task(task); 1012 } 1013 1014 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1015 { 1016 if (atomic_dec_and_test(&task->tk_count)) { 1017 rpc_release_resources_task(task); 1018 rpc_final_put_task(task, q); 1019 } 1020 } 1021 1022 void rpc_put_task(struct rpc_task *task) 1023 { 1024 rpc_do_put_task(task, NULL); 1025 } 1026 EXPORT_SYMBOL_GPL(rpc_put_task); 1027 1028 void rpc_put_task_async(struct rpc_task *task) 1029 { 1030 rpc_do_put_task(task, task->tk_workqueue); 1031 } 1032 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1033 1034 static void rpc_release_task(struct rpc_task *task) 1035 { 1036 dprintk("RPC: %5u release task\n", task->tk_pid); 1037 1038 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1039 1040 rpc_release_resources_task(task); 1041 1042 /* 1043 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1044 * so it should be safe to use task->tk_count as a test for whether 1045 * or not any other processes still hold references to our rpc_task. 1046 */ 1047 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1048 /* Wake up anyone who may be waiting for task completion */ 1049 if (!rpc_complete_task(task)) 1050 return; 1051 } else { 1052 if (!atomic_dec_and_test(&task->tk_count)) 1053 return; 1054 } 1055 rpc_final_put_task(task, task->tk_workqueue); 1056 } 1057 1058 int rpciod_up(void) 1059 { 1060 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1061 } 1062 1063 void rpciod_down(void) 1064 { 1065 module_put(THIS_MODULE); 1066 } 1067 1068 /* 1069 * Start up the rpciod workqueue. 1070 */ 1071 static int rpciod_start(void) 1072 { 1073 struct workqueue_struct *wq; 1074 1075 /* 1076 * Create the rpciod thread and wait for it to start. 1077 */ 1078 dprintk("RPC: creating workqueue rpciod\n"); 1079 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 1); 1080 rpciod_workqueue = wq; 1081 return rpciod_workqueue != NULL; 1082 } 1083 1084 static void rpciod_stop(void) 1085 { 1086 struct workqueue_struct *wq = NULL; 1087 1088 if (rpciod_workqueue == NULL) 1089 return; 1090 dprintk("RPC: destroying workqueue rpciod\n"); 1091 1092 wq = rpciod_workqueue; 1093 rpciod_workqueue = NULL; 1094 destroy_workqueue(wq); 1095 } 1096 1097 void 1098 rpc_destroy_mempool(void) 1099 { 1100 rpciod_stop(); 1101 if (rpc_buffer_mempool) 1102 mempool_destroy(rpc_buffer_mempool); 1103 if (rpc_task_mempool) 1104 mempool_destroy(rpc_task_mempool); 1105 if (rpc_task_slabp) 1106 kmem_cache_destroy(rpc_task_slabp); 1107 if (rpc_buffer_slabp) 1108 kmem_cache_destroy(rpc_buffer_slabp); 1109 rpc_destroy_wait_queue(&delay_queue); 1110 } 1111 1112 int 1113 rpc_init_mempool(void) 1114 { 1115 /* 1116 * The following is not strictly a mempool initialisation, 1117 * but there is no harm in doing it here 1118 */ 1119 rpc_init_wait_queue(&delay_queue, "delayq"); 1120 if (!rpciod_start()) 1121 goto err_nomem; 1122 1123 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1124 sizeof(struct rpc_task), 1125 0, SLAB_HWCACHE_ALIGN, 1126 NULL); 1127 if (!rpc_task_slabp) 1128 goto err_nomem; 1129 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1130 RPC_BUFFER_MAXSIZE, 1131 0, SLAB_HWCACHE_ALIGN, 1132 NULL); 1133 if (!rpc_buffer_slabp) 1134 goto err_nomem; 1135 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1136 rpc_task_slabp); 1137 if (!rpc_task_mempool) 1138 goto err_nomem; 1139 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1140 rpc_buffer_slabp); 1141 if (!rpc_buffer_mempool) 1142 goto err_nomem; 1143 return 0; 1144 err_nomem: 1145 rpc_destroy_mempool(); 1146 return -ENOMEM; 1147 } 1148