1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/sched.c 4 * 5 * Scheduling for synchronous and asynchronous RPC requests. 6 * 7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 8 * 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 */ 12 13 #include <linux/module.h> 14 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/mempool.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/mutex.h> 22 #include <linux/freezer.h> 23 #include <linux/sched/mm.h> 24 25 #include <linux/sunrpc/clnt.h> 26 #include <linux/sunrpc/metrics.h> 27 28 #include "sunrpc.h" 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/sunrpc.h> 32 33 /* 34 * RPC slabs and memory pools 35 */ 36 #define RPC_BUFFER_MAXSIZE (2048) 37 #define RPC_BUFFER_POOLSIZE (8) 38 #define RPC_TASK_POOLSIZE (8) 39 static struct kmem_cache *rpc_task_slabp __read_mostly; 40 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 41 static mempool_t *rpc_task_mempool __read_mostly; 42 static mempool_t *rpc_buffer_mempool __read_mostly; 43 44 static void rpc_async_schedule(struct work_struct *); 45 static void rpc_release_task(struct rpc_task *task); 46 static void __rpc_queue_timer_fn(struct work_struct *); 47 48 /* 49 * RPC tasks sit here while waiting for conditions to improve. 50 */ 51 static struct rpc_wait_queue delay_queue; 52 53 /* 54 * rpciod-related stuff 55 */ 56 struct workqueue_struct *rpciod_workqueue __read_mostly; 57 struct workqueue_struct *xprtiod_workqueue __read_mostly; 58 EXPORT_SYMBOL_GPL(xprtiod_workqueue); 59 60 gfp_t rpc_task_gfp_mask(void) 61 { 62 if (current->flags & PF_WQ_WORKER) 63 return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 64 return GFP_KERNEL; 65 } 66 EXPORT_SYMBOL_GPL(rpc_task_gfp_mask); 67 68 unsigned long 69 rpc_task_timeout(const struct rpc_task *task) 70 { 71 unsigned long timeout = READ_ONCE(task->tk_timeout); 72 73 if (timeout != 0) { 74 unsigned long now = jiffies; 75 if (time_before(now, timeout)) 76 return timeout - now; 77 } 78 return 0; 79 } 80 EXPORT_SYMBOL_GPL(rpc_task_timeout); 81 82 /* 83 * Disable the timer for a given RPC task. Should be called with 84 * queue->lock and bh_disabled in order to avoid races within 85 * rpc_run_timer(). 86 */ 87 static void 88 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 89 { 90 if (list_empty(&task->u.tk_wait.timer_list)) 91 return; 92 task->tk_timeout = 0; 93 list_del(&task->u.tk_wait.timer_list); 94 if (list_empty(&queue->timer_list.list)) 95 cancel_delayed_work(&queue->timer_list.dwork); 96 } 97 98 static void 99 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 100 { 101 unsigned long now = jiffies; 102 queue->timer_list.expires = expires; 103 if (time_before_eq(expires, now)) 104 expires = 0; 105 else 106 expires -= now; 107 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); 108 } 109 110 /* 111 * Set up a timer for the current task. 112 */ 113 static void 114 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, 115 unsigned long timeout) 116 { 117 task->tk_timeout = timeout; 118 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) 119 rpc_set_queue_timer(queue, timeout); 120 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 121 } 122 123 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 124 { 125 if (queue->priority != priority) { 126 queue->priority = priority; 127 queue->nr = 1U << priority; 128 } 129 } 130 131 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 132 { 133 rpc_set_waitqueue_priority(queue, queue->maxpriority); 134 } 135 136 /* 137 * Add a request to a queue list 138 */ 139 static void 140 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) 141 { 142 struct rpc_task *t; 143 144 list_for_each_entry(t, q, u.tk_wait.list) { 145 if (t->tk_owner == task->tk_owner) { 146 list_add_tail(&task->u.tk_wait.links, 147 &t->u.tk_wait.links); 148 /* Cache the queue head in task->u.tk_wait.list */ 149 task->u.tk_wait.list.next = q; 150 task->u.tk_wait.list.prev = NULL; 151 return; 152 } 153 } 154 INIT_LIST_HEAD(&task->u.tk_wait.links); 155 list_add_tail(&task->u.tk_wait.list, q); 156 } 157 158 /* 159 * Remove request from a queue list 160 */ 161 static void 162 __rpc_list_dequeue_task(struct rpc_task *task) 163 { 164 struct list_head *q; 165 struct rpc_task *t; 166 167 if (task->u.tk_wait.list.prev == NULL) { 168 list_del(&task->u.tk_wait.links); 169 return; 170 } 171 if (!list_empty(&task->u.tk_wait.links)) { 172 t = list_first_entry(&task->u.tk_wait.links, 173 struct rpc_task, 174 u.tk_wait.links); 175 /* Assume __rpc_list_enqueue_task() cached the queue head */ 176 q = t->u.tk_wait.list.next; 177 list_add_tail(&t->u.tk_wait.list, q); 178 list_del(&task->u.tk_wait.links); 179 } 180 list_del(&task->u.tk_wait.list); 181 } 182 183 /* 184 * Add new request to a priority queue. 185 */ 186 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 187 struct rpc_task *task, 188 unsigned char queue_priority) 189 { 190 if (unlikely(queue_priority > queue->maxpriority)) 191 queue_priority = queue->maxpriority; 192 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); 193 } 194 195 /* 196 * Add new request to wait queue. 197 */ 198 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 199 struct rpc_task *task, 200 unsigned char queue_priority) 201 { 202 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 203 if (RPC_IS_PRIORITY(queue)) 204 __rpc_add_wait_queue_priority(queue, task, queue_priority); 205 else 206 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 207 task->tk_waitqueue = queue; 208 queue->qlen++; 209 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 210 smp_wmb(); 211 rpc_set_queued(task); 212 } 213 214 /* 215 * Remove request from a priority queue. 216 */ 217 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 218 { 219 __rpc_list_dequeue_task(task); 220 } 221 222 /* 223 * Remove request from queue. 224 * Note: must be called with spin lock held. 225 */ 226 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 227 { 228 __rpc_disable_timer(queue, task); 229 if (RPC_IS_PRIORITY(queue)) 230 __rpc_remove_wait_queue_priority(task); 231 else 232 list_del(&task->u.tk_wait.list); 233 queue->qlen--; 234 } 235 236 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 237 { 238 int i; 239 240 spin_lock_init(&queue->lock); 241 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 242 INIT_LIST_HEAD(&queue->tasks[i]); 243 queue->maxpriority = nr_queues - 1; 244 rpc_reset_waitqueue_priority(queue); 245 queue->qlen = 0; 246 queue->timer_list.expires = 0; 247 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); 248 INIT_LIST_HEAD(&queue->timer_list.list); 249 rpc_assign_waitqueue_name(queue, qname); 250 } 251 252 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 253 { 254 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 255 } 256 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 257 258 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 259 { 260 __rpc_init_priority_wait_queue(queue, qname, 1); 261 } 262 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 263 264 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 265 { 266 cancel_delayed_work_sync(&queue->timer_list.dwork); 267 } 268 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 269 270 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 271 { 272 freezable_schedule_unsafe(); 273 if (signal_pending_state(mode, current)) 274 return -ERESTARTSYS; 275 return 0; 276 } 277 278 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 279 static void rpc_task_set_debuginfo(struct rpc_task *task) 280 { 281 struct rpc_clnt *clnt = task->tk_client; 282 283 /* Might be a task carrying a reverse-direction operation */ 284 if (!clnt) { 285 static atomic_t rpc_pid; 286 287 task->tk_pid = atomic_inc_return(&rpc_pid); 288 return; 289 } 290 291 task->tk_pid = atomic_inc_return(&clnt->cl_pid); 292 } 293 #else 294 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 295 { 296 } 297 #endif 298 299 static void rpc_set_active(struct rpc_task *task) 300 { 301 rpc_task_set_debuginfo(task); 302 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 303 trace_rpc_task_begin(task, NULL); 304 } 305 306 /* 307 * Mark an RPC call as having completed by clearing the 'active' bit 308 * and then waking up all tasks that were sleeping. 309 */ 310 static int rpc_complete_task(struct rpc_task *task) 311 { 312 void *m = &task->tk_runstate; 313 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 314 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 315 unsigned long flags; 316 int ret; 317 318 trace_rpc_task_complete(task, NULL); 319 320 spin_lock_irqsave(&wq->lock, flags); 321 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 322 ret = atomic_dec_and_test(&task->tk_count); 323 if (waitqueue_active(wq)) 324 __wake_up_locked_key(wq, TASK_NORMAL, &k); 325 spin_unlock_irqrestore(&wq->lock, flags); 326 return ret; 327 } 328 329 /* 330 * Allow callers to wait for completion of an RPC call 331 * 332 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 333 * to enforce taking of the wq->lock and hence avoid races with 334 * rpc_complete_task(). 335 */ 336 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) 337 { 338 if (action == NULL) 339 action = rpc_wait_bit_killable; 340 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 341 action, TASK_KILLABLE); 342 } 343 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 344 345 /* 346 * Make an RPC task runnable. 347 * 348 * Note: If the task is ASYNC, and is being made runnable after sitting on an 349 * rpc_wait_queue, this must be called with the queue spinlock held to protect 350 * the wait queue operation. 351 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), 352 * which is needed to ensure that __rpc_execute() doesn't loop (due to the 353 * lockless RPC_IS_QUEUED() test) before we've had a chance to test 354 * the RPC_TASK_RUNNING flag. 355 */ 356 static void rpc_make_runnable(struct workqueue_struct *wq, 357 struct rpc_task *task) 358 { 359 bool need_wakeup = !rpc_test_and_set_running(task); 360 361 rpc_clear_queued(task); 362 if (!need_wakeup) 363 return; 364 if (RPC_IS_ASYNC(task)) { 365 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 366 queue_work(wq, &task->u.tk_work); 367 } else 368 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 369 } 370 371 /* 372 * Prepare for sleeping on a wait queue. 373 * By always appending tasks to the list we ensure FIFO behavior. 374 * NB: An RPC task will only receive interrupt-driven events as long 375 * as it's on a wait queue. 376 */ 377 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, 378 struct rpc_task *task, 379 unsigned char queue_priority) 380 { 381 trace_rpc_task_sleep(task, q); 382 383 __rpc_add_wait_queue(q, task, queue_priority); 384 } 385 386 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 387 struct rpc_task *task, 388 unsigned char queue_priority) 389 { 390 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 391 return; 392 __rpc_do_sleep_on_priority(q, task, queue_priority); 393 } 394 395 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 396 struct rpc_task *task, unsigned long timeout, 397 unsigned char queue_priority) 398 { 399 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 400 return; 401 if (time_is_after_jiffies(timeout)) { 402 __rpc_do_sleep_on_priority(q, task, queue_priority); 403 __rpc_add_timer(q, task, timeout); 404 } else 405 task->tk_status = -ETIMEDOUT; 406 } 407 408 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) 409 { 410 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) 411 task->tk_callback = action; 412 } 413 414 static bool rpc_sleep_check_activated(struct rpc_task *task) 415 { 416 /* We shouldn't ever put an inactive task to sleep */ 417 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { 418 task->tk_status = -EIO; 419 rpc_put_task_async(task); 420 return false; 421 } 422 return true; 423 } 424 425 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, 426 rpc_action action, unsigned long timeout) 427 { 428 if (!rpc_sleep_check_activated(task)) 429 return; 430 431 rpc_set_tk_callback(task, action); 432 433 /* 434 * Protect the queue operations. 435 */ 436 spin_lock(&q->lock); 437 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 438 spin_unlock(&q->lock); 439 } 440 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 441 442 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 443 rpc_action action) 444 { 445 if (!rpc_sleep_check_activated(task)) 446 return; 447 448 rpc_set_tk_callback(task, action); 449 450 WARN_ON_ONCE(task->tk_timeout != 0); 451 /* 452 * Protect the queue operations. 453 */ 454 spin_lock(&q->lock); 455 __rpc_sleep_on_priority(q, task, task->tk_priority); 456 spin_unlock(&q->lock); 457 } 458 EXPORT_SYMBOL_GPL(rpc_sleep_on); 459 460 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 461 struct rpc_task *task, unsigned long timeout, int priority) 462 { 463 if (!rpc_sleep_check_activated(task)) 464 return; 465 466 priority -= RPC_PRIORITY_LOW; 467 /* 468 * Protect the queue operations. 469 */ 470 spin_lock(&q->lock); 471 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 472 spin_unlock(&q->lock); 473 } 474 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 475 476 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 477 int priority) 478 { 479 if (!rpc_sleep_check_activated(task)) 480 return; 481 482 WARN_ON_ONCE(task->tk_timeout != 0); 483 priority -= RPC_PRIORITY_LOW; 484 /* 485 * Protect the queue operations. 486 */ 487 spin_lock(&q->lock); 488 __rpc_sleep_on_priority(q, task, priority); 489 spin_unlock(&q->lock); 490 } 491 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 492 493 /** 494 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task 495 * @wq: workqueue on which to run task 496 * @queue: wait queue 497 * @task: task to be woken up 498 * 499 * Caller must hold queue->lock, and have cleared the task queued flag. 500 */ 501 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, 502 struct rpc_wait_queue *queue, 503 struct rpc_task *task) 504 { 505 /* Has the task been executed yet? If not, we cannot wake it up! */ 506 if (!RPC_IS_ACTIVATED(task)) { 507 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 508 return; 509 } 510 511 trace_rpc_task_wakeup(task, queue); 512 513 __rpc_remove_wait_queue(queue, task); 514 515 rpc_make_runnable(wq, task); 516 } 517 518 /* 519 * Wake up a queued task while the queue lock is being held 520 */ 521 static struct rpc_task * 522 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, 523 struct rpc_wait_queue *queue, struct rpc_task *task, 524 bool (*action)(struct rpc_task *, void *), void *data) 525 { 526 if (RPC_IS_QUEUED(task)) { 527 smp_rmb(); 528 if (task->tk_waitqueue == queue) { 529 if (action == NULL || action(task, data)) { 530 __rpc_do_wake_up_task_on_wq(wq, queue, task); 531 return task; 532 } 533 } 534 } 535 return NULL; 536 } 537 538 /* 539 * Wake up a queued task while the queue lock is being held 540 */ 541 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, 542 struct rpc_task *task) 543 { 544 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 545 task, NULL, NULL); 546 } 547 548 /* 549 * Wake up a task on a specific queue 550 */ 551 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 552 { 553 if (!RPC_IS_QUEUED(task)) 554 return; 555 spin_lock(&queue->lock); 556 rpc_wake_up_task_queue_locked(queue, task); 557 spin_unlock(&queue->lock); 558 } 559 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 560 561 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) 562 { 563 task->tk_status = *(int *)status; 564 return true; 565 } 566 567 static void 568 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, 569 struct rpc_task *task, int status) 570 { 571 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 572 task, rpc_task_action_set_status, &status); 573 } 574 575 /** 576 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status 577 * @queue: pointer to rpc_wait_queue 578 * @task: pointer to rpc_task 579 * @status: integer error value 580 * 581 * If @task is queued on @queue, then it is woken up, and @task->tk_status is 582 * set to the value of @status. 583 */ 584 void 585 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, 586 struct rpc_task *task, int status) 587 { 588 if (!RPC_IS_QUEUED(task)) 589 return; 590 spin_lock(&queue->lock); 591 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 592 spin_unlock(&queue->lock); 593 } 594 595 /* 596 * Wake up the next task on a priority queue. 597 */ 598 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 599 { 600 struct list_head *q; 601 struct rpc_task *task; 602 603 /* 604 * Service the privileged queue. 605 */ 606 q = &queue->tasks[RPC_NR_PRIORITY - 1]; 607 if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) { 608 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 609 goto out; 610 } 611 612 /* 613 * Service a batch of tasks from a single owner. 614 */ 615 q = &queue->tasks[queue->priority]; 616 if (!list_empty(q) && queue->nr) { 617 queue->nr--; 618 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 619 goto out; 620 } 621 622 /* 623 * Service the next queue. 624 */ 625 do { 626 if (q == &queue->tasks[0]) 627 q = &queue->tasks[queue->maxpriority]; 628 else 629 q = q - 1; 630 if (!list_empty(q)) { 631 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 632 goto new_queue; 633 } 634 } while (q != &queue->tasks[queue->priority]); 635 636 rpc_reset_waitqueue_priority(queue); 637 return NULL; 638 639 new_queue: 640 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 641 out: 642 return task; 643 } 644 645 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 646 { 647 if (RPC_IS_PRIORITY(queue)) 648 return __rpc_find_next_queued_priority(queue); 649 if (!list_empty(&queue->tasks[0])) 650 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 651 return NULL; 652 } 653 654 /* 655 * Wake up the first task on the wait queue. 656 */ 657 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 658 struct rpc_wait_queue *queue, 659 bool (*func)(struct rpc_task *, void *), void *data) 660 { 661 struct rpc_task *task = NULL; 662 663 spin_lock(&queue->lock); 664 task = __rpc_find_next_queued(queue); 665 if (task != NULL) 666 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 667 task, func, data); 668 spin_unlock(&queue->lock); 669 670 return task; 671 } 672 673 /* 674 * Wake up the first task on the wait queue. 675 */ 676 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 677 bool (*func)(struct rpc_task *, void *), void *data) 678 { 679 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); 680 } 681 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 682 683 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 684 { 685 return true; 686 } 687 688 /* 689 * Wake up the next task on the wait queue. 690 */ 691 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 692 { 693 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 694 } 695 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 696 697 /** 698 * rpc_wake_up_locked - wake up all rpc_tasks 699 * @queue: rpc_wait_queue on which the tasks are sleeping 700 * 701 */ 702 static void rpc_wake_up_locked(struct rpc_wait_queue *queue) 703 { 704 struct rpc_task *task; 705 706 for (;;) { 707 task = __rpc_find_next_queued(queue); 708 if (task == NULL) 709 break; 710 rpc_wake_up_task_queue_locked(queue, task); 711 } 712 } 713 714 /** 715 * rpc_wake_up - wake up all rpc_tasks 716 * @queue: rpc_wait_queue on which the tasks are sleeping 717 * 718 * Grabs queue->lock 719 */ 720 void rpc_wake_up(struct rpc_wait_queue *queue) 721 { 722 spin_lock(&queue->lock); 723 rpc_wake_up_locked(queue); 724 spin_unlock(&queue->lock); 725 } 726 EXPORT_SYMBOL_GPL(rpc_wake_up); 727 728 /** 729 * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. 730 * @queue: rpc_wait_queue on which the tasks are sleeping 731 * @status: status value to set 732 */ 733 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) 734 { 735 struct rpc_task *task; 736 737 for (;;) { 738 task = __rpc_find_next_queued(queue); 739 if (task == NULL) 740 break; 741 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 742 } 743 } 744 745 /** 746 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 747 * @queue: rpc_wait_queue on which the tasks are sleeping 748 * @status: status value to set 749 * 750 * Grabs queue->lock 751 */ 752 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 753 { 754 spin_lock(&queue->lock); 755 rpc_wake_up_status_locked(queue, status); 756 spin_unlock(&queue->lock); 757 } 758 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 759 760 static void __rpc_queue_timer_fn(struct work_struct *work) 761 { 762 struct rpc_wait_queue *queue = container_of(work, 763 struct rpc_wait_queue, 764 timer_list.dwork.work); 765 struct rpc_task *task, *n; 766 unsigned long expires, now, timeo; 767 768 spin_lock(&queue->lock); 769 expires = now = jiffies; 770 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 771 timeo = task->tk_timeout; 772 if (time_after_eq(now, timeo)) { 773 trace_rpc_task_timeout(task, task->tk_action); 774 task->tk_status = -ETIMEDOUT; 775 rpc_wake_up_task_queue_locked(queue, task); 776 continue; 777 } 778 if (expires == now || time_after(expires, timeo)) 779 expires = timeo; 780 } 781 if (!list_empty(&queue->timer_list.list)) 782 rpc_set_queue_timer(queue, expires); 783 spin_unlock(&queue->lock); 784 } 785 786 static void __rpc_atrun(struct rpc_task *task) 787 { 788 if (task->tk_status == -ETIMEDOUT) 789 task->tk_status = 0; 790 } 791 792 /* 793 * Run a task at a later time 794 */ 795 void rpc_delay(struct rpc_task *task, unsigned long delay) 796 { 797 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); 798 } 799 EXPORT_SYMBOL_GPL(rpc_delay); 800 801 /* 802 * Helper to call task->tk_ops->rpc_call_prepare 803 */ 804 void rpc_prepare_task(struct rpc_task *task) 805 { 806 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 807 } 808 809 static void 810 rpc_init_task_statistics(struct rpc_task *task) 811 { 812 /* Initialize retry counters */ 813 task->tk_garb_retry = 2; 814 task->tk_cred_retry = 2; 815 task->tk_rebind_retry = 2; 816 817 /* starting timestamp */ 818 task->tk_start = ktime_get(); 819 } 820 821 static void 822 rpc_reset_task_statistics(struct rpc_task *task) 823 { 824 task->tk_timeouts = 0; 825 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); 826 rpc_init_task_statistics(task); 827 } 828 829 /* 830 * Helper that calls task->tk_ops->rpc_call_done if it exists 831 */ 832 void rpc_exit_task(struct rpc_task *task) 833 { 834 trace_rpc_task_end(task, task->tk_action); 835 task->tk_action = NULL; 836 if (task->tk_ops->rpc_count_stats) 837 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 838 else if (task->tk_client) 839 rpc_count_iostats(task, task->tk_client->cl_metrics); 840 if (task->tk_ops->rpc_call_done != NULL) { 841 trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done); 842 task->tk_ops->rpc_call_done(task, task->tk_calldata); 843 if (task->tk_action != NULL) { 844 /* Always release the RPC slot and buffer memory */ 845 xprt_release(task); 846 rpc_reset_task_statistics(task); 847 } 848 } 849 } 850 851 void rpc_signal_task(struct rpc_task *task) 852 { 853 struct rpc_wait_queue *queue; 854 855 if (!RPC_IS_ACTIVATED(task)) 856 return; 857 858 trace_rpc_task_signalled(task, task->tk_action); 859 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 860 smp_mb__after_atomic(); 861 queue = READ_ONCE(task->tk_waitqueue); 862 if (queue) 863 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); 864 } 865 866 void rpc_exit(struct rpc_task *task, int status) 867 { 868 task->tk_status = status; 869 task->tk_action = rpc_exit_task; 870 rpc_wake_up_queued_task(task->tk_waitqueue, task); 871 } 872 EXPORT_SYMBOL_GPL(rpc_exit); 873 874 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 875 { 876 if (ops->rpc_release != NULL) 877 ops->rpc_release(calldata); 878 } 879 880 static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk) 881 { 882 if (!xprt) 883 return false; 884 if (!atomic_read(&xprt->swapper)) 885 return false; 886 return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk; 887 } 888 889 /* 890 * This is the RPC `scheduler' (or rather, the finite state machine). 891 */ 892 static void __rpc_execute(struct rpc_task *task) 893 { 894 struct rpc_wait_queue *queue; 895 int task_is_async = RPC_IS_ASYNC(task); 896 int status = 0; 897 unsigned long pflags = current->flags; 898 899 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 900 if (RPC_IS_QUEUED(task)) 901 return; 902 903 for (;;) { 904 void (*do_action)(struct rpc_task *); 905 906 /* 907 * Perform the next FSM step or a pending callback. 908 * 909 * tk_action may be NULL if the task has been killed. 910 * In particular, note that rpc_killall_tasks may 911 * do this at any time, so beware when dereferencing. 912 */ 913 do_action = task->tk_action; 914 if (task->tk_callback) { 915 do_action = task->tk_callback; 916 task->tk_callback = NULL; 917 } 918 if (!do_action) 919 break; 920 if (RPC_IS_SWAPPER(task) || 921 xprt_needs_memalloc(task->tk_xprt, task)) 922 current->flags |= PF_MEMALLOC; 923 924 trace_rpc_task_run_action(task, do_action); 925 do_action(task); 926 927 /* 928 * Lockless check for whether task is sleeping or not. 929 */ 930 if (!RPC_IS_QUEUED(task)) { 931 cond_resched(); 932 continue; 933 } 934 935 /* 936 * Signalled tasks should exit rather than sleep. 937 */ 938 if (RPC_SIGNALLED(task)) { 939 task->tk_rpc_status = -ERESTARTSYS; 940 rpc_exit(task, -ERESTARTSYS); 941 } 942 943 /* 944 * The queue->lock protects against races with 945 * rpc_make_runnable(). 946 * 947 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 948 * rpc_task, rpc_make_runnable() can assign it to a 949 * different workqueue. We therefore cannot assume that the 950 * rpc_task pointer may still be dereferenced. 951 */ 952 queue = task->tk_waitqueue; 953 spin_lock(&queue->lock); 954 if (!RPC_IS_QUEUED(task)) { 955 spin_unlock(&queue->lock); 956 continue; 957 } 958 rpc_clear_running(task); 959 spin_unlock(&queue->lock); 960 if (task_is_async) 961 goto out; 962 963 /* sync task: sleep here */ 964 trace_rpc_task_sync_sleep(task, task->tk_action); 965 status = out_of_line_wait_on_bit(&task->tk_runstate, 966 RPC_TASK_QUEUED, rpc_wait_bit_killable, 967 TASK_KILLABLE); 968 if (status < 0) { 969 /* 970 * When a sync task receives a signal, it exits with 971 * -ERESTARTSYS. In order to catch any callbacks that 972 * clean up after sleeping on some queue, we don't 973 * break the loop here, but go around once more. 974 */ 975 trace_rpc_task_signalled(task, task->tk_action); 976 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 977 task->tk_rpc_status = -ERESTARTSYS; 978 rpc_exit(task, -ERESTARTSYS); 979 } 980 trace_rpc_task_sync_wake(task, task->tk_action); 981 } 982 983 /* Release all resources associated with the task */ 984 rpc_release_task(task); 985 out: 986 current_restore_flags(pflags, PF_MEMALLOC); 987 } 988 989 /* 990 * User-visible entry point to the scheduler. 991 * 992 * This may be called recursively if e.g. an async NFS task updates 993 * the attributes and finds that dirty pages must be flushed. 994 * NOTE: Upon exit of this function the task is guaranteed to be 995 * released. In particular note that tk_release() will have 996 * been called, so your task memory may have been freed. 997 */ 998 void rpc_execute(struct rpc_task *task) 999 { 1000 bool is_async = RPC_IS_ASYNC(task); 1001 1002 rpc_set_active(task); 1003 rpc_make_runnable(rpciod_workqueue, task); 1004 if (!is_async) { 1005 unsigned int pflags = memalloc_nofs_save(); 1006 __rpc_execute(task); 1007 memalloc_nofs_restore(pflags); 1008 } 1009 } 1010 1011 static void rpc_async_schedule(struct work_struct *work) 1012 { 1013 unsigned int pflags = memalloc_nofs_save(); 1014 1015 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 1016 memalloc_nofs_restore(pflags); 1017 } 1018 1019 /** 1020 * rpc_malloc - allocate RPC buffer resources 1021 * @task: RPC task 1022 * 1023 * A single memory region is allocated, which is split between the 1024 * RPC call and RPC reply that this task is being used for. When 1025 * this RPC is retired, the memory is released by calling rpc_free. 1026 * 1027 * To prevent rpciod from hanging, this allocator never sleeps, 1028 * returning -ENOMEM and suppressing warning if the request cannot 1029 * be serviced immediately. The caller can arrange to sleep in a 1030 * way that is safe for rpciod. 1031 * 1032 * Most requests are 'small' (under 2KiB) and can be serviced from a 1033 * mempool, ensuring that NFS reads and writes can always proceed, 1034 * and that there is good locality of reference for these buffers. 1035 */ 1036 int rpc_malloc(struct rpc_task *task) 1037 { 1038 struct rpc_rqst *rqst = task->tk_rqstp; 1039 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 1040 struct rpc_buffer *buf; 1041 gfp_t gfp = rpc_task_gfp_mask(); 1042 1043 size += sizeof(struct rpc_buffer); 1044 if (size <= RPC_BUFFER_MAXSIZE) { 1045 buf = kmem_cache_alloc(rpc_buffer_slabp, gfp); 1046 /* Reach for the mempool if dynamic allocation fails */ 1047 if (!buf && RPC_IS_ASYNC(task)) 1048 buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT); 1049 } else 1050 buf = kmalloc(size, gfp); 1051 1052 if (!buf) 1053 return -ENOMEM; 1054 1055 buf->len = size; 1056 rqst->rq_buffer = buf->data; 1057 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 1058 return 0; 1059 } 1060 EXPORT_SYMBOL_GPL(rpc_malloc); 1061 1062 /** 1063 * rpc_free - free RPC buffer resources allocated via rpc_malloc 1064 * @task: RPC task 1065 * 1066 */ 1067 void rpc_free(struct rpc_task *task) 1068 { 1069 void *buffer = task->tk_rqstp->rq_buffer; 1070 size_t size; 1071 struct rpc_buffer *buf; 1072 1073 buf = container_of(buffer, struct rpc_buffer, data); 1074 size = buf->len; 1075 1076 if (size <= RPC_BUFFER_MAXSIZE) 1077 mempool_free(buf, rpc_buffer_mempool); 1078 else 1079 kfree(buf); 1080 } 1081 EXPORT_SYMBOL_GPL(rpc_free); 1082 1083 /* 1084 * Creation and deletion of RPC task structures 1085 */ 1086 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 1087 { 1088 memset(task, 0, sizeof(*task)); 1089 atomic_set(&task->tk_count, 1); 1090 task->tk_flags = task_setup_data->flags; 1091 task->tk_ops = task_setup_data->callback_ops; 1092 task->tk_calldata = task_setup_data->callback_data; 1093 INIT_LIST_HEAD(&task->tk_task); 1094 1095 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 1096 task->tk_owner = current->tgid; 1097 1098 /* Initialize workqueue for async tasks */ 1099 task->tk_workqueue = task_setup_data->workqueue; 1100 1101 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, 1102 xprt_get(task_setup_data->rpc_xprt)); 1103 1104 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); 1105 1106 if (task->tk_ops->rpc_call_prepare != NULL) 1107 task->tk_action = rpc_prepare_task; 1108 1109 rpc_init_task_statistics(task); 1110 } 1111 1112 static struct rpc_task *rpc_alloc_task(void) 1113 { 1114 struct rpc_task *task; 1115 1116 task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask()); 1117 if (task) 1118 return task; 1119 return mempool_alloc(rpc_task_mempool, GFP_NOWAIT); 1120 } 1121 1122 /* 1123 * Create a new task for the specified client. 1124 */ 1125 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 1126 { 1127 struct rpc_task *task = setup_data->task; 1128 unsigned short flags = 0; 1129 1130 if (task == NULL) { 1131 task = rpc_alloc_task(); 1132 if (task == NULL) { 1133 rpc_release_calldata(setup_data->callback_ops, 1134 setup_data->callback_data); 1135 return ERR_PTR(-ENOMEM); 1136 } 1137 flags = RPC_TASK_DYNAMIC; 1138 } 1139 1140 rpc_init_task(task, setup_data); 1141 task->tk_flags |= flags; 1142 return task; 1143 } 1144 1145 /* 1146 * rpc_free_task - release rpc task and perform cleanups 1147 * 1148 * Note that we free up the rpc_task _after_ rpc_release_calldata() 1149 * in order to work around a workqueue dependency issue. 1150 * 1151 * Tejun Heo states: 1152 * "Workqueue currently considers two work items to be the same if they're 1153 * on the same address and won't execute them concurrently - ie. it 1154 * makes a work item which is queued again while being executed wait 1155 * for the previous execution to complete. 1156 * 1157 * If a work function frees the work item, and then waits for an event 1158 * which should be performed by another work item and *that* work item 1159 * recycles the freed work item, it can create a false dependency loop. 1160 * There really is no reliable way to detect this short of verifying 1161 * every memory free." 1162 * 1163 */ 1164 static void rpc_free_task(struct rpc_task *task) 1165 { 1166 unsigned short tk_flags = task->tk_flags; 1167 1168 put_rpccred(task->tk_op_cred); 1169 rpc_release_calldata(task->tk_ops, task->tk_calldata); 1170 1171 if (tk_flags & RPC_TASK_DYNAMIC) 1172 mempool_free(task, rpc_task_mempool); 1173 } 1174 1175 static void rpc_async_release(struct work_struct *work) 1176 { 1177 unsigned int pflags = memalloc_nofs_save(); 1178 1179 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 1180 memalloc_nofs_restore(pflags); 1181 } 1182 1183 static void rpc_release_resources_task(struct rpc_task *task) 1184 { 1185 xprt_release(task); 1186 if (task->tk_msg.rpc_cred) { 1187 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1188 put_cred(task->tk_msg.rpc_cred); 1189 task->tk_msg.rpc_cred = NULL; 1190 } 1191 rpc_task_release_client(task); 1192 } 1193 1194 static void rpc_final_put_task(struct rpc_task *task, 1195 struct workqueue_struct *q) 1196 { 1197 if (q != NULL) { 1198 INIT_WORK(&task->u.tk_work, rpc_async_release); 1199 queue_work(q, &task->u.tk_work); 1200 } else 1201 rpc_free_task(task); 1202 } 1203 1204 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1205 { 1206 if (atomic_dec_and_test(&task->tk_count)) { 1207 rpc_release_resources_task(task); 1208 rpc_final_put_task(task, q); 1209 } 1210 } 1211 1212 void rpc_put_task(struct rpc_task *task) 1213 { 1214 rpc_do_put_task(task, NULL); 1215 } 1216 EXPORT_SYMBOL_GPL(rpc_put_task); 1217 1218 void rpc_put_task_async(struct rpc_task *task) 1219 { 1220 rpc_do_put_task(task, task->tk_workqueue); 1221 } 1222 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1223 1224 static void rpc_release_task(struct rpc_task *task) 1225 { 1226 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1227 1228 rpc_release_resources_task(task); 1229 1230 /* 1231 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1232 * so it should be safe to use task->tk_count as a test for whether 1233 * or not any other processes still hold references to our rpc_task. 1234 */ 1235 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1236 /* Wake up anyone who may be waiting for task completion */ 1237 if (!rpc_complete_task(task)) 1238 return; 1239 } else { 1240 if (!atomic_dec_and_test(&task->tk_count)) 1241 return; 1242 } 1243 rpc_final_put_task(task, task->tk_workqueue); 1244 } 1245 1246 int rpciod_up(void) 1247 { 1248 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1249 } 1250 1251 void rpciod_down(void) 1252 { 1253 module_put(THIS_MODULE); 1254 } 1255 1256 /* 1257 * Start up the rpciod workqueue. 1258 */ 1259 static int rpciod_start(void) 1260 { 1261 struct workqueue_struct *wq; 1262 1263 /* 1264 * Create the rpciod thread and wait for it to start. 1265 */ 1266 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1267 if (!wq) 1268 goto out_failed; 1269 rpciod_workqueue = wq; 1270 wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); 1271 if (!wq) 1272 goto free_rpciod; 1273 xprtiod_workqueue = wq; 1274 return 1; 1275 free_rpciod: 1276 wq = rpciod_workqueue; 1277 rpciod_workqueue = NULL; 1278 destroy_workqueue(wq); 1279 out_failed: 1280 return 0; 1281 } 1282 1283 static void rpciod_stop(void) 1284 { 1285 struct workqueue_struct *wq = NULL; 1286 1287 if (rpciod_workqueue == NULL) 1288 return; 1289 1290 wq = rpciod_workqueue; 1291 rpciod_workqueue = NULL; 1292 destroy_workqueue(wq); 1293 wq = xprtiod_workqueue; 1294 xprtiod_workqueue = NULL; 1295 destroy_workqueue(wq); 1296 } 1297 1298 void 1299 rpc_destroy_mempool(void) 1300 { 1301 rpciod_stop(); 1302 mempool_destroy(rpc_buffer_mempool); 1303 mempool_destroy(rpc_task_mempool); 1304 kmem_cache_destroy(rpc_task_slabp); 1305 kmem_cache_destroy(rpc_buffer_slabp); 1306 rpc_destroy_wait_queue(&delay_queue); 1307 } 1308 1309 int 1310 rpc_init_mempool(void) 1311 { 1312 /* 1313 * The following is not strictly a mempool initialisation, 1314 * but there is no harm in doing it here 1315 */ 1316 rpc_init_wait_queue(&delay_queue, "delayq"); 1317 if (!rpciod_start()) 1318 goto err_nomem; 1319 1320 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1321 sizeof(struct rpc_task), 1322 0, SLAB_HWCACHE_ALIGN, 1323 NULL); 1324 if (!rpc_task_slabp) 1325 goto err_nomem; 1326 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1327 RPC_BUFFER_MAXSIZE, 1328 0, SLAB_HWCACHE_ALIGN, 1329 NULL); 1330 if (!rpc_buffer_slabp) 1331 goto err_nomem; 1332 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1333 rpc_task_slabp); 1334 if (!rpc_task_mempool) 1335 goto err_nomem; 1336 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1337 rpc_buffer_slabp); 1338 if (!rpc_buffer_mempool) 1339 goto err_nomem; 1340 return 0; 1341 err_nomem: 1342 rpc_destroy_mempool(); 1343 return -ENOMEM; 1344 } 1345