1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/sched.c 4 * 5 * Scheduling for synchronous and asynchronous RPC requests. 6 * 7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 8 * 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 */ 12 13 #include <linux/module.h> 14 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/mempool.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/mutex.h> 22 #include <linux/freezer.h> 23 #include <linux/sched/mm.h> 24 25 #include <linux/sunrpc/clnt.h> 26 #include <linux/sunrpc/metrics.h> 27 28 #include "sunrpc.h" 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/sunrpc.h> 32 33 /* 34 * RPC slabs and memory pools 35 */ 36 #define RPC_BUFFER_MAXSIZE (2048) 37 #define RPC_BUFFER_POOLSIZE (8) 38 #define RPC_TASK_POOLSIZE (8) 39 static struct kmem_cache *rpc_task_slabp __read_mostly; 40 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 41 static mempool_t *rpc_task_mempool __read_mostly; 42 static mempool_t *rpc_buffer_mempool __read_mostly; 43 44 static void rpc_async_schedule(struct work_struct *); 45 static void rpc_release_task(struct rpc_task *task); 46 static void __rpc_queue_timer_fn(struct work_struct *); 47 48 /* 49 * RPC tasks sit here while waiting for conditions to improve. 50 */ 51 static struct rpc_wait_queue delay_queue; 52 53 /* 54 * rpciod-related stuff 55 */ 56 struct workqueue_struct *rpciod_workqueue __read_mostly; 57 struct workqueue_struct *xprtiod_workqueue __read_mostly; 58 EXPORT_SYMBOL_GPL(xprtiod_workqueue); 59 60 gfp_t rpc_task_gfp_mask(void) 61 { 62 if (current->flags & PF_WQ_WORKER) 63 return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 64 return GFP_KERNEL; 65 } 66 EXPORT_SYMBOL_GPL(rpc_task_gfp_mask); 67 68 unsigned long 69 rpc_task_timeout(const struct rpc_task *task) 70 { 71 unsigned long timeout = READ_ONCE(task->tk_timeout); 72 73 if (timeout != 0) { 74 unsigned long now = jiffies; 75 if (time_before(now, timeout)) 76 return timeout - now; 77 } 78 return 0; 79 } 80 EXPORT_SYMBOL_GPL(rpc_task_timeout); 81 82 /* 83 * Disable the timer for a given RPC task. Should be called with 84 * queue->lock and bh_disabled in order to avoid races within 85 * rpc_run_timer(). 86 */ 87 static void 88 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 89 { 90 if (list_empty(&task->u.tk_wait.timer_list)) 91 return; 92 task->tk_timeout = 0; 93 list_del(&task->u.tk_wait.timer_list); 94 if (list_empty(&queue->timer_list.list)) 95 cancel_delayed_work(&queue->timer_list.dwork); 96 } 97 98 static void 99 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 100 { 101 unsigned long now = jiffies; 102 queue->timer_list.expires = expires; 103 if (time_before_eq(expires, now)) 104 expires = 0; 105 else 106 expires -= now; 107 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); 108 } 109 110 /* 111 * Set up a timer for the current task. 112 */ 113 static void 114 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, 115 unsigned long timeout) 116 { 117 task->tk_timeout = timeout; 118 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) 119 rpc_set_queue_timer(queue, timeout); 120 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 121 } 122 123 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 124 { 125 if (queue->priority != priority) { 126 queue->priority = priority; 127 queue->nr = 1U << priority; 128 } 129 } 130 131 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 132 { 133 rpc_set_waitqueue_priority(queue, queue->maxpriority); 134 } 135 136 /* 137 * Add a request to a queue list 138 */ 139 static void 140 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) 141 { 142 struct rpc_task *t; 143 144 list_for_each_entry(t, q, u.tk_wait.list) { 145 if (t->tk_owner == task->tk_owner) { 146 list_add_tail(&task->u.tk_wait.links, 147 &t->u.tk_wait.links); 148 /* Cache the queue head in task->u.tk_wait.list */ 149 task->u.tk_wait.list.next = q; 150 task->u.tk_wait.list.prev = NULL; 151 return; 152 } 153 } 154 INIT_LIST_HEAD(&task->u.tk_wait.links); 155 list_add_tail(&task->u.tk_wait.list, q); 156 } 157 158 /* 159 * Remove request from a queue list 160 */ 161 static void 162 __rpc_list_dequeue_task(struct rpc_task *task) 163 { 164 struct list_head *q; 165 struct rpc_task *t; 166 167 if (task->u.tk_wait.list.prev == NULL) { 168 list_del(&task->u.tk_wait.links); 169 return; 170 } 171 if (!list_empty(&task->u.tk_wait.links)) { 172 t = list_first_entry(&task->u.tk_wait.links, 173 struct rpc_task, 174 u.tk_wait.links); 175 /* Assume __rpc_list_enqueue_task() cached the queue head */ 176 q = t->u.tk_wait.list.next; 177 list_add_tail(&t->u.tk_wait.list, q); 178 list_del(&task->u.tk_wait.links); 179 } 180 list_del(&task->u.tk_wait.list); 181 } 182 183 /* 184 * Add new request to a priority queue. 185 */ 186 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 187 struct rpc_task *task, 188 unsigned char queue_priority) 189 { 190 if (unlikely(queue_priority > queue->maxpriority)) 191 queue_priority = queue->maxpriority; 192 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); 193 } 194 195 /* 196 * Add new request to wait queue. 197 */ 198 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 199 struct rpc_task *task, 200 unsigned char queue_priority) 201 { 202 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 203 if (RPC_IS_PRIORITY(queue)) 204 __rpc_add_wait_queue_priority(queue, task, queue_priority); 205 else 206 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 207 task->tk_waitqueue = queue; 208 queue->qlen++; 209 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 210 smp_wmb(); 211 rpc_set_queued(task); 212 } 213 214 /* 215 * Remove request from a priority queue. 216 */ 217 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 218 { 219 __rpc_list_dequeue_task(task); 220 } 221 222 /* 223 * Remove request from queue. 224 * Note: must be called with spin lock held. 225 */ 226 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 227 { 228 __rpc_disable_timer(queue, task); 229 if (RPC_IS_PRIORITY(queue)) 230 __rpc_remove_wait_queue_priority(task); 231 else 232 list_del(&task->u.tk_wait.list); 233 queue->qlen--; 234 } 235 236 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 237 { 238 int i; 239 240 spin_lock_init(&queue->lock); 241 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 242 INIT_LIST_HEAD(&queue->tasks[i]); 243 queue->maxpriority = nr_queues - 1; 244 rpc_reset_waitqueue_priority(queue); 245 queue->qlen = 0; 246 queue->timer_list.expires = 0; 247 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); 248 INIT_LIST_HEAD(&queue->timer_list.list); 249 rpc_assign_waitqueue_name(queue, qname); 250 } 251 252 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 253 { 254 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 255 } 256 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 257 258 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 259 { 260 __rpc_init_priority_wait_queue(queue, qname, 1); 261 } 262 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 263 264 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 265 { 266 cancel_delayed_work_sync(&queue->timer_list.dwork); 267 } 268 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 269 270 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 271 { 272 schedule(); 273 if (signal_pending_state(mode, current)) 274 return -ERESTARTSYS; 275 return 0; 276 } 277 278 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 279 static void rpc_task_set_debuginfo(struct rpc_task *task) 280 { 281 struct rpc_clnt *clnt = task->tk_client; 282 283 /* Might be a task carrying a reverse-direction operation */ 284 if (!clnt) { 285 static atomic_t rpc_pid; 286 287 task->tk_pid = atomic_inc_return(&rpc_pid); 288 return; 289 } 290 291 task->tk_pid = atomic_inc_return(&clnt->cl_pid); 292 } 293 #else 294 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 295 { 296 } 297 #endif 298 299 static void rpc_set_active(struct rpc_task *task) 300 { 301 rpc_task_set_debuginfo(task); 302 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 303 trace_rpc_task_begin(task, NULL); 304 } 305 306 /* 307 * Mark an RPC call as having completed by clearing the 'active' bit 308 * and then waking up all tasks that were sleeping. 309 */ 310 static int rpc_complete_task(struct rpc_task *task) 311 { 312 void *m = &task->tk_runstate; 313 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 314 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 315 unsigned long flags; 316 int ret; 317 318 trace_rpc_task_complete(task, NULL); 319 320 spin_lock_irqsave(&wq->lock, flags); 321 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 322 ret = atomic_dec_and_test(&task->tk_count); 323 if (waitqueue_active(wq)) 324 __wake_up_locked_key(wq, TASK_NORMAL, &k); 325 spin_unlock_irqrestore(&wq->lock, flags); 326 return ret; 327 } 328 329 /* 330 * Allow callers to wait for completion of an RPC call 331 * 332 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 333 * to enforce taking of the wq->lock and hence avoid races with 334 * rpc_complete_task(). 335 */ 336 int rpc_wait_for_completion_task(struct rpc_task *task) 337 { 338 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 339 rpc_wait_bit_killable, TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 340 } 341 EXPORT_SYMBOL_GPL(rpc_wait_for_completion_task); 342 343 /* 344 * Make an RPC task runnable. 345 * 346 * Note: If the task is ASYNC, and is being made runnable after sitting on an 347 * rpc_wait_queue, this must be called with the queue spinlock held to protect 348 * the wait queue operation. 349 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), 350 * which is needed to ensure that __rpc_execute() doesn't loop (due to the 351 * lockless RPC_IS_QUEUED() test) before we've had a chance to test 352 * the RPC_TASK_RUNNING flag. 353 */ 354 static void rpc_make_runnable(struct workqueue_struct *wq, 355 struct rpc_task *task) 356 { 357 bool need_wakeup = !rpc_test_and_set_running(task); 358 359 rpc_clear_queued(task); 360 if (!need_wakeup) 361 return; 362 if (RPC_IS_ASYNC(task)) { 363 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 364 queue_work(wq, &task->u.tk_work); 365 } else 366 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 367 } 368 369 /* 370 * Prepare for sleeping on a wait queue. 371 * By always appending tasks to the list we ensure FIFO behavior. 372 * NB: An RPC task will only receive interrupt-driven events as long 373 * as it's on a wait queue. 374 */ 375 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q, 376 struct rpc_task *task, 377 unsigned char queue_priority) 378 { 379 trace_rpc_task_sleep(task, q); 380 381 __rpc_add_wait_queue(q, task, queue_priority); 382 } 383 384 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 385 struct rpc_task *task, 386 unsigned char queue_priority) 387 { 388 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 389 return; 390 __rpc_do_sleep_on_priority(q, task, queue_priority); 391 } 392 393 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 394 struct rpc_task *task, unsigned long timeout, 395 unsigned char queue_priority) 396 { 397 if (WARN_ON_ONCE(RPC_IS_QUEUED(task))) 398 return; 399 if (time_is_after_jiffies(timeout)) { 400 __rpc_do_sleep_on_priority(q, task, queue_priority); 401 __rpc_add_timer(q, task, timeout); 402 } else 403 task->tk_status = -ETIMEDOUT; 404 } 405 406 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) 407 { 408 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) 409 task->tk_callback = action; 410 } 411 412 static bool rpc_sleep_check_activated(struct rpc_task *task) 413 { 414 /* We shouldn't ever put an inactive task to sleep */ 415 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { 416 task->tk_status = -EIO; 417 rpc_put_task_async(task); 418 return false; 419 } 420 return true; 421 } 422 423 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, 424 rpc_action action, unsigned long timeout) 425 { 426 if (!rpc_sleep_check_activated(task)) 427 return; 428 429 rpc_set_tk_callback(task, action); 430 431 /* 432 * Protect the queue operations. 433 */ 434 spin_lock(&q->lock); 435 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 436 spin_unlock(&q->lock); 437 } 438 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 439 440 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 441 rpc_action action) 442 { 443 if (!rpc_sleep_check_activated(task)) 444 return; 445 446 rpc_set_tk_callback(task, action); 447 448 WARN_ON_ONCE(task->tk_timeout != 0); 449 /* 450 * Protect the queue operations. 451 */ 452 spin_lock(&q->lock); 453 __rpc_sleep_on_priority(q, task, task->tk_priority); 454 spin_unlock(&q->lock); 455 } 456 EXPORT_SYMBOL_GPL(rpc_sleep_on); 457 458 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 459 struct rpc_task *task, unsigned long timeout, int priority) 460 { 461 if (!rpc_sleep_check_activated(task)) 462 return; 463 464 priority -= RPC_PRIORITY_LOW; 465 /* 466 * Protect the queue operations. 467 */ 468 spin_lock(&q->lock); 469 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 470 spin_unlock(&q->lock); 471 } 472 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 473 474 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 475 int priority) 476 { 477 if (!rpc_sleep_check_activated(task)) 478 return; 479 480 WARN_ON_ONCE(task->tk_timeout != 0); 481 priority -= RPC_PRIORITY_LOW; 482 /* 483 * Protect the queue operations. 484 */ 485 spin_lock(&q->lock); 486 __rpc_sleep_on_priority(q, task, priority); 487 spin_unlock(&q->lock); 488 } 489 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 490 491 /** 492 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task 493 * @wq: workqueue on which to run task 494 * @queue: wait queue 495 * @task: task to be woken up 496 * 497 * Caller must hold queue->lock, and have cleared the task queued flag. 498 */ 499 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, 500 struct rpc_wait_queue *queue, 501 struct rpc_task *task) 502 { 503 /* Has the task been executed yet? If not, we cannot wake it up! */ 504 if (!RPC_IS_ACTIVATED(task)) { 505 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 506 return; 507 } 508 509 trace_rpc_task_wakeup(task, queue); 510 511 __rpc_remove_wait_queue(queue, task); 512 513 rpc_make_runnable(wq, task); 514 } 515 516 /* 517 * Wake up a queued task while the queue lock is being held 518 */ 519 static struct rpc_task * 520 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, 521 struct rpc_wait_queue *queue, struct rpc_task *task, 522 bool (*action)(struct rpc_task *, void *), void *data) 523 { 524 if (RPC_IS_QUEUED(task)) { 525 smp_rmb(); 526 if (task->tk_waitqueue == queue) { 527 if (action == NULL || action(task, data)) { 528 __rpc_do_wake_up_task_on_wq(wq, queue, task); 529 return task; 530 } 531 } 532 } 533 return NULL; 534 } 535 536 /* 537 * Wake up a queued task while the queue lock is being held 538 */ 539 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, 540 struct rpc_task *task) 541 { 542 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 543 task, NULL, NULL); 544 } 545 546 /* 547 * Wake up a task on a specific queue 548 */ 549 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 550 { 551 if (!RPC_IS_QUEUED(task)) 552 return; 553 spin_lock(&queue->lock); 554 rpc_wake_up_task_queue_locked(queue, task); 555 spin_unlock(&queue->lock); 556 } 557 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 558 559 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) 560 { 561 task->tk_status = *(int *)status; 562 return true; 563 } 564 565 static void 566 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, 567 struct rpc_task *task, int status) 568 { 569 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 570 task, rpc_task_action_set_status, &status); 571 } 572 573 /** 574 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status 575 * @queue: pointer to rpc_wait_queue 576 * @task: pointer to rpc_task 577 * @status: integer error value 578 * 579 * If @task is queued on @queue, then it is woken up, and @task->tk_status is 580 * set to the value of @status. 581 */ 582 void 583 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, 584 struct rpc_task *task, int status) 585 { 586 if (!RPC_IS_QUEUED(task)) 587 return; 588 spin_lock(&queue->lock); 589 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 590 spin_unlock(&queue->lock); 591 } 592 593 /* 594 * Wake up the next task on a priority queue. 595 */ 596 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 597 { 598 struct list_head *q; 599 struct rpc_task *task; 600 601 /* 602 * Service the privileged queue. 603 */ 604 q = &queue->tasks[RPC_NR_PRIORITY - 1]; 605 if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) { 606 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 607 goto out; 608 } 609 610 /* 611 * Service a batch of tasks from a single owner. 612 */ 613 q = &queue->tasks[queue->priority]; 614 if (!list_empty(q) && queue->nr) { 615 queue->nr--; 616 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 617 goto out; 618 } 619 620 /* 621 * Service the next queue. 622 */ 623 do { 624 if (q == &queue->tasks[0]) 625 q = &queue->tasks[queue->maxpriority]; 626 else 627 q = q - 1; 628 if (!list_empty(q)) { 629 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 630 goto new_queue; 631 } 632 } while (q != &queue->tasks[queue->priority]); 633 634 rpc_reset_waitqueue_priority(queue); 635 return NULL; 636 637 new_queue: 638 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 639 out: 640 return task; 641 } 642 643 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 644 { 645 if (RPC_IS_PRIORITY(queue)) 646 return __rpc_find_next_queued_priority(queue); 647 if (!list_empty(&queue->tasks[0])) 648 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 649 return NULL; 650 } 651 652 /* 653 * Wake up the first task on the wait queue. 654 */ 655 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 656 struct rpc_wait_queue *queue, 657 bool (*func)(struct rpc_task *, void *), void *data) 658 { 659 struct rpc_task *task = NULL; 660 661 spin_lock(&queue->lock); 662 task = __rpc_find_next_queued(queue); 663 if (task != NULL) 664 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 665 task, func, data); 666 spin_unlock(&queue->lock); 667 668 return task; 669 } 670 671 /* 672 * Wake up the first task on the wait queue. 673 */ 674 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 675 bool (*func)(struct rpc_task *, void *), void *data) 676 { 677 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); 678 } 679 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 680 681 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 682 { 683 return true; 684 } 685 686 /* 687 * Wake up the next task on the wait queue. 688 */ 689 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 690 { 691 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 692 } 693 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 694 695 /** 696 * rpc_wake_up_locked - wake up all rpc_tasks 697 * @queue: rpc_wait_queue on which the tasks are sleeping 698 * 699 */ 700 static void rpc_wake_up_locked(struct rpc_wait_queue *queue) 701 { 702 struct rpc_task *task; 703 704 for (;;) { 705 task = __rpc_find_next_queued(queue); 706 if (task == NULL) 707 break; 708 rpc_wake_up_task_queue_locked(queue, task); 709 } 710 } 711 712 /** 713 * rpc_wake_up - wake up all rpc_tasks 714 * @queue: rpc_wait_queue on which the tasks are sleeping 715 * 716 * Grabs queue->lock 717 */ 718 void rpc_wake_up(struct rpc_wait_queue *queue) 719 { 720 spin_lock(&queue->lock); 721 rpc_wake_up_locked(queue); 722 spin_unlock(&queue->lock); 723 } 724 EXPORT_SYMBOL_GPL(rpc_wake_up); 725 726 /** 727 * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value. 728 * @queue: rpc_wait_queue on which the tasks are sleeping 729 * @status: status value to set 730 */ 731 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status) 732 { 733 struct rpc_task *task; 734 735 for (;;) { 736 task = __rpc_find_next_queued(queue); 737 if (task == NULL) 738 break; 739 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 740 } 741 } 742 743 /** 744 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 745 * @queue: rpc_wait_queue on which the tasks are sleeping 746 * @status: status value to set 747 * 748 * Grabs queue->lock 749 */ 750 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 751 { 752 spin_lock(&queue->lock); 753 rpc_wake_up_status_locked(queue, status); 754 spin_unlock(&queue->lock); 755 } 756 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 757 758 static void __rpc_queue_timer_fn(struct work_struct *work) 759 { 760 struct rpc_wait_queue *queue = container_of(work, 761 struct rpc_wait_queue, 762 timer_list.dwork.work); 763 struct rpc_task *task, *n; 764 unsigned long expires, now, timeo; 765 766 spin_lock(&queue->lock); 767 expires = now = jiffies; 768 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 769 timeo = task->tk_timeout; 770 if (time_after_eq(now, timeo)) { 771 trace_rpc_task_timeout(task, task->tk_action); 772 task->tk_status = -ETIMEDOUT; 773 rpc_wake_up_task_queue_locked(queue, task); 774 continue; 775 } 776 if (expires == now || time_after(expires, timeo)) 777 expires = timeo; 778 } 779 if (!list_empty(&queue->timer_list.list)) 780 rpc_set_queue_timer(queue, expires); 781 spin_unlock(&queue->lock); 782 } 783 784 static void __rpc_atrun(struct rpc_task *task) 785 { 786 if (task->tk_status == -ETIMEDOUT) 787 task->tk_status = 0; 788 } 789 790 /* 791 * Run a task at a later time 792 */ 793 void rpc_delay(struct rpc_task *task, unsigned long delay) 794 { 795 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); 796 } 797 EXPORT_SYMBOL_GPL(rpc_delay); 798 799 /* 800 * Helper to call task->tk_ops->rpc_call_prepare 801 */ 802 void rpc_prepare_task(struct rpc_task *task) 803 { 804 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 805 } 806 807 static void 808 rpc_init_task_statistics(struct rpc_task *task) 809 { 810 /* Initialize retry counters */ 811 task->tk_garb_retry = 2; 812 task->tk_cred_retry = 2; 813 task->tk_rebind_retry = 2; 814 815 /* starting timestamp */ 816 task->tk_start = ktime_get(); 817 } 818 819 static void 820 rpc_reset_task_statistics(struct rpc_task *task) 821 { 822 task->tk_timeouts = 0; 823 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); 824 rpc_init_task_statistics(task); 825 } 826 827 /* 828 * Helper that calls task->tk_ops->rpc_call_done if it exists 829 */ 830 void rpc_exit_task(struct rpc_task *task) 831 { 832 trace_rpc_task_end(task, task->tk_action); 833 task->tk_action = NULL; 834 if (task->tk_ops->rpc_count_stats) 835 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 836 else if (task->tk_client) 837 rpc_count_iostats(task, task->tk_client->cl_metrics); 838 if (task->tk_ops->rpc_call_done != NULL) { 839 trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done); 840 task->tk_ops->rpc_call_done(task, task->tk_calldata); 841 if (task->tk_action != NULL) { 842 /* Always release the RPC slot and buffer memory */ 843 xprt_release(task); 844 rpc_reset_task_statistics(task); 845 } 846 } 847 } 848 849 void rpc_signal_task(struct rpc_task *task) 850 { 851 struct rpc_wait_queue *queue; 852 853 if (!RPC_IS_ACTIVATED(task)) 854 return; 855 856 trace_rpc_task_signalled(task, task->tk_action); 857 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 858 smp_mb__after_atomic(); 859 queue = READ_ONCE(task->tk_waitqueue); 860 if (queue) 861 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); 862 } 863 864 void rpc_exit(struct rpc_task *task, int status) 865 { 866 task->tk_status = status; 867 task->tk_action = rpc_exit_task; 868 rpc_wake_up_queued_task(task->tk_waitqueue, task); 869 } 870 EXPORT_SYMBOL_GPL(rpc_exit); 871 872 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 873 { 874 if (ops->rpc_release != NULL) 875 ops->rpc_release(calldata); 876 } 877 878 static bool xprt_needs_memalloc(struct rpc_xprt *xprt, struct rpc_task *tk) 879 { 880 if (!xprt) 881 return false; 882 if (!atomic_read(&xprt->swapper)) 883 return false; 884 return test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == tk; 885 } 886 887 /* 888 * This is the RPC `scheduler' (or rather, the finite state machine). 889 */ 890 static void __rpc_execute(struct rpc_task *task) 891 { 892 struct rpc_wait_queue *queue; 893 int task_is_async = RPC_IS_ASYNC(task); 894 int status = 0; 895 unsigned long pflags = current->flags; 896 897 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 898 if (RPC_IS_QUEUED(task)) 899 return; 900 901 for (;;) { 902 void (*do_action)(struct rpc_task *); 903 904 /* 905 * Perform the next FSM step or a pending callback. 906 * 907 * tk_action may be NULL if the task has been killed. 908 * In particular, note that rpc_killall_tasks may 909 * do this at any time, so beware when dereferencing. 910 */ 911 do_action = task->tk_action; 912 if (task->tk_callback) { 913 do_action = task->tk_callback; 914 task->tk_callback = NULL; 915 } 916 if (!do_action) 917 break; 918 if (RPC_IS_SWAPPER(task) || 919 xprt_needs_memalloc(task->tk_xprt, task)) 920 current->flags |= PF_MEMALLOC; 921 922 trace_rpc_task_run_action(task, do_action); 923 do_action(task); 924 925 /* 926 * Lockless check for whether task is sleeping or not. 927 */ 928 if (!RPC_IS_QUEUED(task)) { 929 cond_resched(); 930 continue; 931 } 932 933 /* 934 * Signalled tasks should exit rather than sleep. 935 */ 936 if (RPC_SIGNALLED(task)) { 937 task->tk_rpc_status = -ERESTARTSYS; 938 rpc_exit(task, -ERESTARTSYS); 939 } 940 941 /* 942 * The queue->lock protects against races with 943 * rpc_make_runnable(). 944 * 945 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 946 * rpc_task, rpc_make_runnable() can assign it to a 947 * different workqueue. We therefore cannot assume that the 948 * rpc_task pointer may still be dereferenced. 949 */ 950 queue = task->tk_waitqueue; 951 spin_lock(&queue->lock); 952 if (!RPC_IS_QUEUED(task)) { 953 spin_unlock(&queue->lock); 954 continue; 955 } 956 rpc_clear_running(task); 957 spin_unlock(&queue->lock); 958 if (task_is_async) 959 goto out; 960 961 /* sync task: sleep here */ 962 trace_rpc_task_sync_sleep(task, task->tk_action); 963 status = out_of_line_wait_on_bit(&task->tk_runstate, 964 RPC_TASK_QUEUED, rpc_wait_bit_killable, 965 TASK_KILLABLE|TASK_FREEZABLE); 966 if (status < 0) { 967 /* 968 * When a sync task receives a signal, it exits with 969 * -ERESTARTSYS. In order to catch any callbacks that 970 * clean up after sleeping on some queue, we don't 971 * break the loop here, but go around once more. 972 */ 973 trace_rpc_task_signalled(task, task->tk_action); 974 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 975 task->tk_rpc_status = -ERESTARTSYS; 976 rpc_exit(task, -ERESTARTSYS); 977 } 978 trace_rpc_task_sync_wake(task, task->tk_action); 979 } 980 981 /* Release all resources associated with the task */ 982 rpc_release_task(task); 983 out: 984 current_restore_flags(pflags, PF_MEMALLOC); 985 } 986 987 /* 988 * User-visible entry point to the scheduler. 989 * 990 * This may be called recursively if e.g. an async NFS task updates 991 * the attributes and finds that dirty pages must be flushed. 992 * NOTE: Upon exit of this function the task is guaranteed to be 993 * released. In particular note that tk_release() will have 994 * been called, so your task memory may have been freed. 995 */ 996 void rpc_execute(struct rpc_task *task) 997 { 998 bool is_async = RPC_IS_ASYNC(task); 999 1000 rpc_set_active(task); 1001 rpc_make_runnable(rpciod_workqueue, task); 1002 if (!is_async) { 1003 unsigned int pflags = memalloc_nofs_save(); 1004 __rpc_execute(task); 1005 memalloc_nofs_restore(pflags); 1006 } 1007 } 1008 1009 static void rpc_async_schedule(struct work_struct *work) 1010 { 1011 unsigned int pflags = memalloc_nofs_save(); 1012 1013 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 1014 memalloc_nofs_restore(pflags); 1015 } 1016 1017 /** 1018 * rpc_malloc - allocate RPC buffer resources 1019 * @task: RPC task 1020 * 1021 * A single memory region is allocated, which is split between the 1022 * RPC call and RPC reply that this task is being used for. When 1023 * this RPC is retired, the memory is released by calling rpc_free. 1024 * 1025 * To prevent rpciod from hanging, this allocator never sleeps, 1026 * returning -ENOMEM and suppressing warning if the request cannot 1027 * be serviced immediately. The caller can arrange to sleep in a 1028 * way that is safe for rpciod. 1029 * 1030 * Most requests are 'small' (under 2KiB) and can be serviced from a 1031 * mempool, ensuring that NFS reads and writes can always proceed, 1032 * and that there is good locality of reference for these buffers. 1033 */ 1034 int rpc_malloc(struct rpc_task *task) 1035 { 1036 struct rpc_rqst *rqst = task->tk_rqstp; 1037 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 1038 struct rpc_buffer *buf; 1039 gfp_t gfp = rpc_task_gfp_mask(); 1040 1041 size += sizeof(struct rpc_buffer); 1042 if (size <= RPC_BUFFER_MAXSIZE) { 1043 buf = kmem_cache_alloc(rpc_buffer_slabp, gfp); 1044 /* Reach for the mempool if dynamic allocation fails */ 1045 if (!buf && RPC_IS_ASYNC(task)) 1046 buf = mempool_alloc(rpc_buffer_mempool, GFP_NOWAIT); 1047 } else 1048 buf = kmalloc(size, gfp); 1049 1050 if (!buf) 1051 return -ENOMEM; 1052 1053 buf->len = size; 1054 rqst->rq_buffer = buf->data; 1055 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 1056 return 0; 1057 } 1058 EXPORT_SYMBOL_GPL(rpc_malloc); 1059 1060 /** 1061 * rpc_free - free RPC buffer resources allocated via rpc_malloc 1062 * @task: RPC task 1063 * 1064 */ 1065 void rpc_free(struct rpc_task *task) 1066 { 1067 void *buffer = task->tk_rqstp->rq_buffer; 1068 size_t size; 1069 struct rpc_buffer *buf; 1070 1071 buf = container_of(buffer, struct rpc_buffer, data); 1072 size = buf->len; 1073 1074 if (size <= RPC_BUFFER_MAXSIZE) 1075 mempool_free(buf, rpc_buffer_mempool); 1076 else 1077 kfree(buf); 1078 } 1079 EXPORT_SYMBOL_GPL(rpc_free); 1080 1081 /* 1082 * Creation and deletion of RPC task structures 1083 */ 1084 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 1085 { 1086 memset(task, 0, sizeof(*task)); 1087 atomic_set(&task->tk_count, 1); 1088 task->tk_flags = task_setup_data->flags; 1089 task->tk_ops = task_setup_data->callback_ops; 1090 task->tk_calldata = task_setup_data->callback_data; 1091 INIT_LIST_HEAD(&task->tk_task); 1092 1093 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 1094 task->tk_owner = current->tgid; 1095 1096 /* Initialize workqueue for async tasks */ 1097 task->tk_workqueue = task_setup_data->workqueue; 1098 1099 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, 1100 xprt_get(task_setup_data->rpc_xprt)); 1101 1102 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); 1103 1104 if (task->tk_ops->rpc_call_prepare != NULL) 1105 task->tk_action = rpc_prepare_task; 1106 1107 rpc_init_task_statistics(task); 1108 } 1109 1110 static struct rpc_task *rpc_alloc_task(void) 1111 { 1112 struct rpc_task *task; 1113 1114 task = kmem_cache_alloc(rpc_task_slabp, rpc_task_gfp_mask()); 1115 if (task) 1116 return task; 1117 return mempool_alloc(rpc_task_mempool, GFP_NOWAIT); 1118 } 1119 1120 /* 1121 * Create a new task for the specified client. 1122 */ 1123 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 1124 { 1125 struct rpc_task *task = setup_data->task; 1126 unsigned short flags = 0; 1127 1128 if (task == NULL) { 1129 task = rpc_alloc_task(); 1130 if (task == NULL) { 1131 rpc_release_calldata(setup_data->callback_ops, 1132 setup_data->callback_data); 1133 return ERR_PTR(-ENOMEM); 1134 } 1135 flags = RPC_TASK_DYNAMIC; 1136 } 1137 1138 rpc_init_task(task, setup_data); 1139 task->tk_flags |= flags; 1140 return task; 1141 } 1142 1143 /* 1144 * rpc_free_task - release rpc task and perform cleanups 1145 * 1146 * Note that we free up the rpc_task _after_ rpc_release_calldata() 1147 * in order to work around a workqueue dependency issue. 1148 * 1149 * Tejun Heo states: 1150 * "Workqueue currently considers two work items to be the same if they're 1151 * on the same address and won't execute them concurrently - ie. it 1152 * makes a work item which is queued again while being executed wait 1153 * for the previous execution to complete. 1154 * 1155 * If a work function frees the work item, and then waits for an event 1156 * which should be performed by another work item and *that* work item 1157 * recycles the freed work item, it can create a false dependency loop. 1158 * There really is no reliable way to detect this short of verifying 1159 * every memory free." 1160 * 1161 */ 1162 static void rpc_free_task(struct rpc_task *task) 1163 { 1164 unsigned short tk_flags = task->tk_flags; 1165 1166 put_rpccred(task->tk_op_cred); 1167 rpc_release_calldata(task->tk_ops, task->tk_calldata); 1168 1169 if (tk_flags & RPC_TASK_DYNAMIC) 1170 mempool_free(task, rpc_task_mempool); 1171 } 1172 1173 static void rpc_async_release(struct work_struct *work) 1174 { 1175 unsigned int pflags = memalloc_nofs_save(); 1176 1177 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 1178 memalloc_nofs_restore(pflags); 1179 } 1180 1181 static void rpc_release_resources_task(struct rpc_task *task) 1182 { 1183 xprt_release(task); 1184 if (task->tk_msg.rpc_cred) { 1185 if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) 1186 put_cred(task->tk_msg.rpc_cred); 1187 task->tk_msg.rpc_cred = NULL; 1188 } 1189 rpc_task_release_client(task); 1190 } 1191 1192 static void rpc_final_put_task(struct rpc_task *task, 1193 struct workqueue_struct *q) 1194 { 1195 if (q != NULL) { 1196 INIT_WORK(&task->u.tk_work, rpc_async_release); 1197 queue_work(q, &task->u.tk_work); 1198 } else 1199 rpc_free_task(task); 1200 } 1201 1202 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1203 { 1204 if (atomic_dec_and_test(&task->tk_count)) { 1205 rpc_release_resources_task(task); 1206 rpc_final_put_task(task, q); 1207 } 1208 } 1209 1210 void rpc_put_task(struct rpc_task *task) 1211 { 1212 rpc_do_put_task(task, NULL); 1213 } 1214 EXPORT_SYMBOL_GPL(rpc_put_task); 1215 1216 void rpc_put_task_async(struct rpc_task *task) 1217 { 1218 rpc_do_put_task(task, task->tk_workqueue); 1219 } 1220 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1221 1222 static void rpc_release_task(struct rpc_task *task) 1223 { 1224 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1225 1226 rpc_release_resources_task(task); 1227 1228 /* 1229 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1230 * so it should be safe to use task->tk_count as a test for whether 1231 * or not any other processes still hold references to our rpc_task. 1232 */ 1233 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1234 /* Wake up anyone who may be waiting for task completion */ 1235 if (!rpc_complete_task(task)) 1236 return; 1237 } else { 1238 if (!atomic_dec_and_test(&task->tk_count)) 1239 return; 1240 } 1241 rpc_final_put_task(task, task->tk_workqueue); 1242 } 1243 1244 int rpciod_up(void) 1245 { 1246 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1247 } 1248 1249 void rpciod_down(void) 1250 { 1251 module_put(THIS_MODULE); 1252 } 1253 1254 /* 1255 * Start up the rpciod workqueue. 1256 */ 1257 static int rpciod_start(void) 1258 { 1259 struct workqueue_struct *wq; 1260 1261 /* 1262 * Create the rpciod thread and wait for it to start. 1263 */ 1264 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1265 if (!wq) 1266 goto out_failed; 1267 rpciod_workqueue = wq; 1268 wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); 1269 if (!wq) 1270 goto free_rpciod; 1271 xprtiod_workqueue = wq; 1272 return 1; 1273 free_rpciod: 1274 wq = rpciod_workqueue; 1275 rpciod_workqueue = NULL; 1276 destroy_workqueue(wq); 1277 out_failed: 1278 return 0; 1279 } 1280 1281 static void rpciod_stop(void) 1282 { 1283 struct workqueue_struct *wq = NULL; 1284 1285 if (rpciod_workqueue == NULL) 1286 return; 1287 1288 wq = rpciod_workqueue; 1289 rpciod_workqueue = NULL; 1290 destroy_workqueue(wq); 1291 wq = xprtiod_workqueue; 1292 xprtiod_workqueue = NULL; 1293 destroy_workqueue(wq); 1294 } 1295 1296 void 1297 rpc_destroy_mempool(void) 1298 { 1299 rpciod_stop(); 1300 mempool_destroy(rpc_buffer_mempool); 1301 mempool_destroy(rpc_task_mempool); 1302 kmem_cache_destroy(rpc_task_slabp); 1303 kmem_cache_destroy(rpc_buffer_slabp); 1304 rpc_destroy_wait_queue(&delay_queue); 1305 } 1306 1307 int 1308 rpc_init_mempool(void) 1309 { 1310 /* 1311 * The following is not strictly a mempool initialisation, 1312 * but there is no harm in doing it here 1313 */ 1314 rpc_init_wait_queue(&delay_queue, "delayq"); 1315 if (!rpciod_start()) 1316 goto err_nomem; 1317 1318 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1319 sizeof(struct rpc_task), 1320 0, SLAB_HWCACHE_ALIGN, 1321 NULL); 1322 if (!rpc_task_slabp) 1323 goto err_nomem; 1324 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1325 RPC_BUFFER_MAXSIZE, 1326 0, SLAB_HWCACHE_ALIGN, 1327 NULL); 1328 if (!rpc_buffer_slabp) 1329 goto err_nomem; 1330 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1331 rpc_task_slabp); 1332 if (!rpc_task_mempool) 1333 goto err_nomem; 1334 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1335 rpc_buffer_slabp); 1336 if (!rpc_buffer_mempool) 1337 goto err_nomem; 1338 return 0; 1339 err_nomem: 1340 rpc_destroy_mempool(); 1341 return -ENOMEM; 1342 } 1343