1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/net/sunrpc/sched.c 4 * 5 * Scheduling for synchronous and asynchronous RPC requests. 6 * 7 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 8 * 9 * TCP NFS related read + write fixes 10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 11 */ 12 13 #include <linux/module.h> 14 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/mempool.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/mutex.h> 22 #include <linux/freezer.h> 23 #include <linux/sched/mm.h> 24 25 #include <linux/sunrpc/clnt.h> 26 #include <linux/sunrpc/metrics.h> 27 28 #include "sunrpc.h" 29 30 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 31 #define RPCDBG_FACILITY RPCDBG_SCHED 32 #endif 33 34 #define CREATE_TRACE_POINTS 35 #include <trace/events/sunrpc.h> 36 37 /* 38 * RPC slabs and memory pools 39 */ 40 #define RPC_BUFFER_MAXSIZE (2048) 41 #define RPC_BUFFER_POOLSIZE (8) 42 #define RPC_TASK_POOLSIZE (8) 43 static struct kmem_cache *rpc_task_slabp __read_mostly; 44 static struct kmem_cache *rpc_buffer_slabp __read_mostly; 45 static mempool_t *rpc_task_mempool __read_mostly; 46 static mempool_t *rpc_buffer_mempool __read_mostly; 47 48 static void rpc_async_schedule(struct work_struct *); 49 static void rpc_release_task(struct rpc_task *task); 50 static void __rpc_queue_timer_fn(struct work_struct *); 51 52 /* 53 * RPC tasks sit here while waiting for conditions to improve. 54 */ 55 static struct rpc_wait_queue delay_queue; 56 57 /* 58 * rpciod-related stuff 59 */ 60 struct workqueue_struct *rpciod_workqueue __read_mostly; 61 struct workqueue_struct *xprtiod_workqueue __read_mostly; 62 EXPORT_SYMBOL_GPL(xprtiod_workqueue); 63 64 unsigned long 65 rpc_task_timeout(const struct rpc_task *task) 66 { 67 unsigned long timeout = READ_ONCE(task->tk_timeout); 68 69 if (timeout != 0) { 70 unsigned long now = jiffies; 71 if (time_before(now, timeout)) 72 return timeout - now; 73 } 74 return 0; 75 } 76 EXPORT_SYMBOL_GPL(rpc_task_timeout); 77 78 /* 79 * Disable the timer for a given RPC task. Should be called with 80 * queue->lock and bh_disabled in order to avoid races within 81 * rpc_run_timer(). 82 */ 83 static void 84 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) 85 { 86 if (list_empty(&task->u.tk_wait.timer_list)) 87 return; 88 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 89 task->tk_timeout = 0; 90 list_del(&task->u.tk_wait.timer_list); 91 if (list_empty(&queue->timer_list.list)) 92 cancel_delayed_work(&queue->timer_list.dwork); 93 } 94 95 static void 96 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) 97 { 98 unsigned long now = jiffies; 99 queue->timer_list.expires = expires; 100 if (time_before_eq(expires, now)) 101 expires = 0; 102 else 103 expires -= now; 104 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); 105 } 106 107 /* 108 * Set up a timer for the current task. 109 */ 110 static void 111 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, 112 unsigned long timeout) 113 { 114 dprintk("RPC: %5u setting alarm for %u ms\n", 115 task->tk_pid, jiffies_to_msecs(timeout - jiffies)); 116 117 task->tk_timeout = timeout; 118 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) 119 rpc_set_queue_timer(queue, timeout); 120 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); 121 } 122 123 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) 124 { 125 if (queue->priority != priority) { 126 queue->priority = priority; 127 queue->nr = 1U << priority; 128 } 129 } 130 131 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) 132 { 133 rpc_set_waitqueue_priority(queue, queue->maxpriority); 134 } 135 136 /* 137 * Add a request to a queue list 138 */ 139 static void 140 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) 141 { 142 struct rpc_task *t; 143 144 list_for_each_entry(t, q, u.tk_wait.list) { 145 if (t->tk_owner == task->tk_owner) { 146 list_add_tail(&task->u.tk_wait.links, 147 &t->u.tk_wait.links); 148 /* Cache the queue head in task->u.tk_wait.list */ 149 task->u.tk_wait.list.next = q; 150 task->u.tk_wait.list.prev = NULL; 151 return; 152 } 153 } 154 INIT_LIST_HEAD(&task->u.tk_wait.links); 155 list_add_tail(&task->u.tk_wait.list, q); 156 } 157 158 /* 159 * Remove request from a queue list 160 */ 161 static void 162 __rpc_list_dequeue_task(struct rpc_task *task) 163 { 164 struct list_head *q; 165 struct rpc_task *t; 166 167 if (task->u.tk_wait.list.prev == NULL) { 168 list_del(&task->u.tk_wait.links); 169 return; 170 } 171 if (!list_empty(&task->u.tk_wait.links)) { 172 t = list_first_entry(&task->u.tk_wait.links, 173 struct rpc_task, 174 u.tk_wait.links); 175 /* Assume __rpc_list_enqueue_task() cached the queue head */ 176 q = t->u.tk_wait.list.next; 177 list_add_tail(&t->u.tk_wait.list, q); 178 list_del(&task->u.tk_wait.links); 179 } 180 list_del(&task->u.tk_wait.list); 181 } 182 183 /* 184 * Add new request to a priority queue. 185 */ 186 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, 187 struct rpc_task *task, 188 unsigned char queue_priority) 189 { 190 if (unlikely(queue_priority > queue->maxpriority)) 191 queue_priority = queue->maxpriority; 192 __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); 193 } 194 195 /* 196 * Add new request to wait queue. 197 * 198 * Swapper tasks always get inserted at the head of the queue. 199 * This should avoid many nasty memory deadlocks and hopefully 200 * improve overall performance. 201 * Everyone else gets appended to the queue to ensure proper FIFO behavior. 202 */ 203 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, 204 struct rpc_task *task, 205 unsigned char queue_priority) 206 { 207 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 208 if (RPC_IS_QUEUED(task)) 209 return; 210 211 INIT_LIST_HEAD(&task->u.tk_wait.timer_list); 212 if (RPC_IS_PRIORITY(queue)) 213 __rpc_add_wait_queue_priority(queue, task, queue_priority); 214 else if (RPC_IS_SWAPPER(task)) 215 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 216 else 217 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 218 task->tk_waitqueue = queue; 219 queue->qlen++; 220 /* barrier matches the read in rpc_wake_up_task_queue_locked() */ 221 smp_wmb(); 222 rpc_set_queued(task); 223 224 dprintk("RPC: %5u added to queue %p \"%s\"\n", 225 task->tk_pid, queue, rpc_qname(queue)); 226 } 227 228 /* 229 * Remove request from a priority queue. 230 */ 231 static void __rpc_remove_wait_queue_priority(struct rpc_task *task) 232 { 233 __rpc_list_dequeue_task(task); 234 } 235 236 /* 237 * Remove request from queue. 238 * Note: must be called with spin lock held. 239 */ 240 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) 241 { 242 __rpc_disable_timer(queue, task); 243 if (RPC_IS_PRIORITY(queue)) 244 __rpc_remove_wait_queue_priority(task); 245 else 246 list_del(&task->u.tk_wait.list); 247 queue->qlen--; 248 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 249 task->tk_pid, queue, rpc_qname(queue)); 250 } 251 252 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) 253 { 254 int i; 255 256 spin_lock_init(&queue->lock); 257 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) 258 INIT_LIST_HEAD(&queue->tasks[i]); 259 queue->maxpriority = nr_queues - 1; 260 rpc_reset_waitqueue_priority(queue); 261 queue->qlen = 0; 262 queue->timer_list.expires = 0; 263 INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn); 264 INIT_LIST_HEAD(&queue->timer_list.list); 265 rpc_assign_waitqueue_name(queue, qname); 266 } 267 268 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) 269 { 270 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 271 } 272 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue); 273 274 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 275 { 276 __rpc_init_priority_wait_queue(queue, qname, 1); 277 } 278 EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 279 280 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) 281 { 282 cancel_delayed_work_sync(&queue->timer_list.dwork); 283 } 284 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 285 286 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 287 { 288 freezable_schedule_unsafe(); 289 if (signal_pending_state(mode, current)) 290 return -ERESTARTSYS; 291 return 0; 292 } 293 294 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) 295 static void rpc_task_set_debuginfo(struct rpc_task *task) 296 { 297 static atomic_t rpc_pid; 298 299 task->tk_pid = atomic_inc_return(&rpc_pid); 300 } 301 #else 302 static inline void rpc_task_set_debuginfo(struct rpc_task *task) 303 { 304 } 305 #endif 306 307 static void rpc_set_active(struct rpc_task *task) 308 { 309 rpc_task_set_debuginfo(task); 310 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 311 trace_rpc_task_begin(task, NULL); 312 } 313 314 /* 315 * Mark an RPC call as having completed by clearing the 'active' bit 316 * and then waking up all tasks that were sleeping. 317 */ 318 static int rpc_complete_task(struct rpc_task *task) 319 { 320 void *m = &task->tk_runstate; 321 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); 322 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); 323 unsigned long flags; 324 int ret; 325 326 trace_rpc_task_complete(task, NULL); 327 328 spin_lock_irqsave(&wq->lock, flags); 329 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 330 ret = atomic_dec_and_test(&task->tk_count); 331 if (waitqueue_active(wq)) 332 __wake_up_locked_key(wq, TASK_NORMAL, &k); 333 spin_unlock_irqrestore(&wq->lock, flags); 334 return ret; 335 } 336 337 /* 338 * Allow callers to wait for completion of an RPC call 339 * 340 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() 341 * to enforce taking of the wq->lock and hence avoid races with 342 * rpc_complete_task(). 343 */ 344 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action) 345 { 346 if (action == NULL) 347 action = rpc_wait_bit_killable; 348 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 349 action, TASK_KILLABLE); 350 } 351 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 352 353 /* 354 * Make an RPC task runnable. 355 * 356 * Note: If the task is ASYNC, and is being made runnable after sitting on an 357 * rpc_wait_queue, this must be called with the queue spinlock held to protect 358 * the wait queue operation. 359 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(), 360 * which is needed to ensure that __rpc_execute() doesn't loop (due to the 361 * lockless RPC_IS_QUEUED() test) before we've had a chance to test 362 * the RPC_TASK_RUNNING flag. 363 */ 364 static void rpc_make_runnable(struct workqueue_struct *wq, 365 struct rpc_task *task) 366 { 367 bool need_wakeup = !rpc_test_and_set_running(task); 368 369 rpc_clear_queued(task); 370 if (!need_wakeup) 371 return; 372 if (RPC_IS_ASYNC(task)) { 373 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 374 queue_work(wq, &task->u.tk_work); 375 } else 376 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); 377 } 378 379 /* 380 * Prepare for sleeping on a wait queue. 381 * By always appending tasks to the list we ensure FIFO behavior. 382 * NB: An RPC task will only receive interrupt-driven events as long 383 * as it's on a wait queue. 384 */ 385 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, 386 struct rpc_task *task, 387 unsigned char queue_priority) 388 { 389 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 390 task->tk_pid, rpc_qname(q), jiffies); 391 392 trace_rpc_task_sleep(task, q); 393 394 __rpc_add_wait_queue(q, task, queue_priority); 395 396 } 397 398 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 399 struct rpc_task *task, unsigned long timeout, 400 unsigned char queue_priority) 401 { 402 if (time_is_after_jiffies(timeout)) { 403 __rpc_sleep_on_priority(q, task, queue_priority); 404 __rpc_add_timer(q, task, timeout); 405 } else 406 task->tk_status = -ETIMEDOUT; 407 } 408 409 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) 410 { 411 if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) 412 task->tk_callback = action; 413 } 414 415 static bool rpc_sleep_check_activated(struct rpc_task *task) 416 { 417 /* We shouldn't ever put an inactive task to sleep */ 418 if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { 419 task->tk_status = -EIO; 420 rpc_put_task_async(task); 421 return false; 422 } 423 return true; 424 } 425 426 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, 427 rpc_action action, unsigned long timeout) 428 { 429 if (!rpc_sleep_check_activated(task)) 430 return; 431 432 rpc_set_tk_callback(task, action); 433 434 /* 435 * Protect the queue operations. 436 */ 437 spin_lock(&q->lock); 438 __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); 439 spin_unlock(&q->lock); 440 } 441 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout); 442 443 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 444 rpc_action action) 445 { 446 if (!rpc_sleep_check_activated(task)) 447 return; 448 449 rpc_set_tk_callback(task, action); 450 451 WARN_ON_ONCE(task->tk_timeout != 0); 452 /* 453 * Protect the queue operations. 454 */ 455 spin_lock(&q->lock); 456 __rpc_sleep_on_priority(q, task, task->tk_priority); 457 spin_unlock(&q->lock); 458 } 459 EXPORT_SYMBOL_GPL(rpc_sleep_on); 460 461 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, 462 struct rpc_task *task, unsigned long timeout, int priority) 463 { 464 if (!rpc_sleep_check_activated(task)) 465 return; 466 467 priority -= RPC_PRIORITY_LOW; 468 /* 469 * Protect the queue operations. 470 */ 471 spin_lock(&q->lock); 472 __rpc_sleep_on_priority_timeout(q, task, timeout, priority); 473 spin_unlock(&q->lock); 474 } 475 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout); 476 477 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, 478 int priority) 479 { 480 if (!rpc_sleep_check_activated(task)) 481 return; 482 483 WARN_ON_ONCE(task->tk_timeout != 0); 484 priority -= RPC_PRIORITY_LOW; 485 /* 486 * Protect the queue operations. 487 */ 488 spin_lock(&q->lock); 489 __rpc_sleep_on_priority(q, task, priority); 490 spin_unlock(&q->lock); 491 } 492 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); 493 494 /** 495 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task 496 * @wq: workqueue on which to run task 497 * @queue: wait queue 498 * @task: task to be woken up 499 * 500 * Caller must hold queue->lock, and have cleared the task queued flag. 501 */ 502 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, 503 struct rpc_wait_queue *queue, 504 struct rpc_task *task) 505 { 506 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 507 task->tk_pid, jiffies); 508 509 /* Has the task been executed yet? If not, we cannot wake it up! */ 510 if (!RPC_IS_ACTIVATED(task)) { 511 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); 512 return; 513 } 514 515 trace_rpc_task_wakeup(task, queue); 516 517 __rpc_remove_wait_queue(queue, task); 518 519 rpc_make_runnable(wq, task); 520 521 dprintk("RPC: __rpc_wake_up_task done\n"); 522 } 523 524 /* 525 * Wake up a queued task while the queue lock is being held 526 */ 527 static struct rpc_task * 528 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, 529 struct rpc_wait_queue *queue, struct rpc_task *task, 530 bool (*action)(struct rpc_task *, void *), void *data) 531 { 532 if (RPC_IS_QUEUED(task)) { 533 smp_rmb(); 534 if (task->tk_waitqueue == queue) { 535 if (action == NULL || action(task, data)) { 536 __rpc_do_wake_up_task_on_wq(wq, queue, task); 537 return task; 538 } 539 } 540 } 541 return NULL; 542 } 543 544 /* 545 * Wake up a queued task while the queue lock is being held 546 */ 547 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, 548 struct rpc_task *task) 549 { 550 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 551 task, NULL, NULL); 552 } 553 554 /* 555 * Wake up a task on a specific queue 556 */ 557 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 558 { 559 if (!RPC_IS_QUEUED(task)) 560 return; 561 spin_lock(&queue->lock); 562 rpc_wake_up_task_queue_locked(queue, task); 563 spin_unlock(&queue->lock); 564 } 565 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); 566 567 static bool rpc_task_action_set_status(struct rpc_task *task, void *status) 568 { 569 task->tk_status = *(int *)status; 570 return true; 571 } 572 573 static void 574 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue, 575 struct rpc_task *task, int status) 576 { 577 rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue, 578 task, rpc_task_action_set_status, &status); 579 } 580 581 /** 582 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status 583 * @queue: pointer to rpc_wait_queue 584 * @task: pointer to rpc_task 585 * @status: integer error value 586 * 587 * If @task is queued on @queue, then it is woken up, and @task->tk_status is 588 * set to the value of @status. 589 */ 590 void 591 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue, 592 struct rpc_task *task, int status) 593 { 594 if (!RPC_IS_QUEUED(task)) 595 return; 596 spin_lock(&queue->lock); 597 rpc_wake_up_task_queue_set_status_locked(queue, task, status); 598 spin_unlock(&queue->lock); 599 } 600 601 /* 602 * Wake up the next task on a priority queue. 603 */ 604 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) 605 { 606 struct list_head *q; 607 struct rpc_task *task; 608 609 /* 610 * Service a batch of tasks from a single owner. 611 */ 612 q = &queue->tasks[queue->priority]; 613 if (!list_empty(q) && --queue->nr) { 614 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 615 goto out; 616 } 617 618 /* 619 * Service the next queue. 620 */ 621 do { 622 if (q == &queue->tasks[0]) 623 q = &queue->tasks[queue->maxpriority]; 624 else 625 q = q - 1; 626 if (!list_empty(q)) { 627 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); 628 goto new_queue; 629 } 630 } while (q != &queue->tasks[queue->priority]); 631 632 rpc_reset_waitqueue_priority(queue); 633 return NULL; 634 635 new_queue: 636 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); 637 out: 638 return task; 639 } 640 641 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) 642 { 643 if (RPC_IS_PRIORITY(queue)) 644 return __rpc_find_next_queued_priority(queue); 645 if (!list_empty(&queue->tasks[0])) 646 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); 647 return NULL; 648 } 649 650 /* 651 * Wake up the first task on the wait queue. 652 */ 653 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, 654 struct rpc_wait_queue *queue, 655 bool (*func)(struct rpc_task *, void *), void *data) 656 { 657 struct rpc_task *task = NULL; 658 659 dprintk("RPC: wake_up_first(%p \"%s\")\n", 660 queue, rpc_qname(queue)); 661 spin_lock(&queue->lock); 662 task = __rpc_find_next_queued(queue); 663 if (task != NULL) 664 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, 665 task, func, data); 666 spin_unlock(&queue->lock); 667 668 return task; 669 } 670 671 /* 672 * Wake up the first task on the wait queue. 673 */ 674 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, 675 bool (*func)(struct rpc_task *, void *), void *data) 676 { 677 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data); 678 } 679 EXPORT_SYMBOL_GPL(rpc_wake_up_first); 680 681 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data) 682 { 683 return true; 684 } 685 686 /* 687 * Wake up the next task on the wait queue. 688 */ 689 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) 690 { 691 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); 692 } 693 EXPORT_SYMBOL_GPL(rpc_wake_up_next); 694 695 /** 696 * rpc_wake_up - wake up all rpc_tasks 697 * @queue: rpc_wait_queue on which the tasks are sleeping 698 * 699 * Grabs queue->lock 700 */ 701 void rpc_wake_up(struct rpc_wait_queue *queue) 702 { 703 struct list_head *head; 704 705 spin_lock(&queue->lock); 706 head = &queue->tasks[queue->maxpriority]; 707 for (;;) { 708 while (!list_empty(head)) { 709 struct rpc_task *task; 710 task = list_first_entry(head, 711 struct rpc_task, 712 u.tk_wait.list); 713 rpc_wake_up_task_queue_locked(queue, task); 714 } 715 if (head == &queue->tasks[0]) 716 break; 717 head--; 718 } 719 spin_unlock(&queue->lock); 720 } 721 EXPORT_SYMBOL_GPL(rpc_wake_up); 722 723 /** 724 * rpc_wake_up_status - wake up all rpc_tasks and set their status value. 725 * @queue: rpc_wait_queue on which the tasks are sleeping 726 * @status: status value to set 727 * 728 * Grabs queue->lock 729 */ 730 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) 731 { 732 struct list_head *head; 733 734 spin_lock(&queue->lock); 735 head = &queue->tasks[queue->maxpriority]; 736 for (;;) { 737 while (!list_empty(head)) { 738 struct rpc_task *task; 739 task = list_first_entry(head, 740 struct rpc_task, 741 u.tk_wait.list); 742 task->tk_status = status; 743 rpc_wake_up_task_queue_locked(queue, task); 744 } 745 if (head == &queue->tasks[0]) 746 break; 747 head--; 748 } 749 spin_unlock(&queue->lock); 750 } 751 EXPORT_SYMBOL_GPL(rpc_wake_up_status); 752 753 static void __rpc_queue_timer_fn(struct work_struct *work) 754 { 755 struct rpc_wait_queue *queue = container_of(work, 756 struct rpc_wait_queue, 757 timer_list.dwork.work); 758 struct rpc_task *task, *n; 759 unsigned long expires, now, timeo; 760 761 spin_lock(&queue->lock); 762 expires = now = jiffies; 763 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { 764 timeo = task->tk_timeout; 765 if (time_after_eq(now, timeo)) { 766 dprintk("RPC: %5u timeout\n", task->tk_pid); 767 task->tk_status = -ETIMEDOUT; 768 rpc_wake_up_task_queue_locked(queue, task); 769 continue; 770 } 771 if (expires == now || time_after(expires, timeo)) 772 expires = timeo; 773 } 774 if (!list_empty(&queue->timer_list.list)) 775 rpc_set_queue_timer(queue, expires); 776 spin_unlock(&queue->lock); 777 } 778 779 static void __rpc_atrun(struct rpc_task *task) 780 { 781 if (task->tk_status == -ETIMEDOUT) 782 task->tk_status = 0; 783 } 784 785 /* 786 * Run a task at a later time 787 */ 788 void rpc_delay(struct rpc_task *task, unsigned long delay) 789 { 790 rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay); 791 } 792 EXPORT_SYMBOL_GPL(rpc_delay); 793 794 /* 795 * Helper to call task->tk_ops->rpc_call_prepare 796 */ 797 void rpc_prepare_task(struct rpc_task *task) 798 { 799 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 800 } 801 802 static void 803 rpc_init_task_statistics(struct rpc_task *task) 804 { 805 /* Initialize retry counters */ 806 task->tk_garb_retry = 2; 807 task->tk_cred_retry = 2; 808 task->tk_rebind_retry = 2; 809 810 /* starting timestamp */ 811 task->tk_start = ktime_get(); 812 } 813 814 static void 815 rpc_reset_task_statistics(struct rpc_task *task) 816 { 817 task->tk_timeouts = 0; 818 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT); 819 rpc_init_task_statistics(task); 820 } 821 822 /* 823 * Helper that calls task->tk_ops->rpc_call_done if it exists 824 */ 825 void rpc_exit_task(struct rpc_task *task) 826 { 827 trace_rpc_task_end(task, task->tk_action); 828 task->tk_action = NULL; 829 if (task->tk_ops->rpc_count_stats) 830 task->tk_ops->rpc_count_stats(task, task->tk_calldata); 831 else if (task->tk_client) 832 rpc_count_iostats(task, task->tk_client->cl_metrics); 833 if (task->tk_ops->rpc_call_done != NULL) { 834 task->tk_ops->rpc_call_done(task, task->tk_calldata); 835 if (task->tk_action != NULL) { 836 /* Always release the RPC slot and buffer memory */ 837 xprt_release(task); 838 rpc_reset_task_statistics(task); 839 } 840 } 841 } 842 843 void rpc_signal_task(struct rpc_task *task) 844 { 845 struct rpc_wait_queue *queue; 846 847 if (!RPC_IS_ACTIVATED(task)) 848 return; 849 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 850 smp_mb__after_atomic(); 851 queue = READ_ONCE(task->tk_waitqueue); 852 if (queue) 853 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); 854 } 855 856 void rpc_exit(struct rpc_task *task, int status) 857 { 858 task->tk_status = status; 859 task->tk_action = rpc_exit_task; 860 rpc_wake_up_queued_task(task->tk_waitqueue, task); 861 } 862 EXPORT_SYMBOL_GPL(rpc_exit); 863 864 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) 865 { 866 if (ops->rpc_release != NULL) 867 ops->rpc_release(calldata); 868 } 869 870 /* 871 * This is the RPC `scheduler' (or rather, the finite state machine). 872 */ 873 static void __rpc_execute(struct rpc_task *task) 874 { 875 struct rpc_wait_queue *queue; 876 int task_is_async = RPC_IS_ASYNC(task); 877 int status = 0; 878 879 dprintk("RPC: %5u __rpc_execute flags=0x%x\n", 880 task->tk_pid, task->tk_flags); 881 882 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 883 if (RPC_IS_QUEUED(task)) 884 return; 885 886 for (;;) { 887 void (*do_action)(struct rpc_task *); 888 889 /* 890 * Perform the next FSM step or a pending callback. 891 * 892 * tk_action may be NULL if the task has been killed. 893 * In particular, note that rpc_killall_tasks may 894 * do this at any time, so beware when dereferencing. 895 */ 896 do_action = task->tk_action; 897 if (task->tk_callback) { 898 do_action = task->tk_callback; 899 task->tk_callback = NULL; 900 } 901 if (!do_action) 902 break; 903 trace_rpc_task_run_action(task, do_action); 904 do_action(task); 905 906 /* 907 * Lockless check for whether task is sleeping or not. 908 */ 909 if (!RPC_IS_QUEUED(task)) 910 continue; 911 912 /* 913 * Signalled tasks should exit rather than sleep. 914 */ 915 if (RPC_SIGNALLED(task)) { 916 task->tk_rpc_status = -ERESTARTSYS; 917 rpc_exit(task, -ERESTARTSYS); 918 } 919 920 /* 921 * The queue->lock protects against races with 922 * rpc_make_runnable(). 923 * 924 * Note that once we clear RPC_TASK_RUNNING on an asynchronous 925 * rpc_task, rpc_make_runnable() can assign it to a 926 * different workqueue. We therefore cannot assume that the 927 * rpc_task pointer may still be dereferenced. 928 */ 929 queue = task->tk_waitqueue; 930 spin_lock(&queue->lock); 931 if (!RPC_IS_QUEUED(task)) { 932 spin_unlock(&queue->lock); 933 continue; 934 } 935 rpc_clear_running(task); 936 spin_unlock(&queue->lock); 937 if (task_is_async) 938 return; 939 940 /* sync task: sleep here */ 941 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); 942 status = out_of_line_wait_on_bit(&task->tk_runstate, 943 RPC_TASK_QUEUED, rpc_wait_bit_killable, 944 TASK_KILLABLE); 945 if (status < 0) { 946 /* 947 * When a sync task receives a signal, it exits with 948 * -ERESTARTSYS. In order to catch any callbacks that 949 * clean up after sleeping on some queue, we don't 950 * break the loop here, but go around once more. 951 */ 952 dprintk("RPC: %5u got signal\n", task->tk_pid); 953 set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); 954 task->tk_rpc_status = -ERESTARTSYS; 955 rpc_exit(task, -ERESTARTSYS); 956 } 957 dprintk("RPC: %5u sync task resuming\n", task->tk_pid); 958 } 959 960 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status, 961 task->tk_status); 962 /* Release all resources associated with the task */ 963 rpc_release_task(task); 964 } 965 966 /* 967 * User-visible entry point to the scheduler. 968 * 969 * This may be called recursively if e.g. an async NFS task updates 970 * the attributes and finds that dirty pages must be flushed. 971 * NOTE: Upon exit of this function the task is guaranteed to be 972 * released. In particular note that tk_release() will have 973 * been called, so your task memory may have been freed. 974 */ 975 void rpc_execute(struct rpc_task *task) 976 { 977 bool is_async = RPC_IS_ASYNC(task); 978 979 rpc_set_active(task); 980 rpc_make_runnable(rpciod_workqueue, task); 981 if (!is_async) 982 __rpc_execute(task); 983 } 984 985 static void rpc_async_schedule(struct work_struct *work) 986 { 987 unsigned int pflags = memalloc_nofs_save(); 988 989 __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); 990 memalloc_nofs_restore(pflags); 991 } 992 993 /** 994 * rpc_malloc - allocate RPC buffer resources 995 * @task: RPC task 996 * 997 * A single memory region is allocated, which is split between the 998 * RPC call and RPC reply that this task is being used for. When 999 * this RPC is retired, the memory is released by calling rpc_free. 1000 * 1001 * To prevent rpciod from hanging, this allocator never sleeps, 1002 * returning -ENOMEM and suppressing warning if the request cannot 1003 * be serviced immediately. The caller can arrange to sleep in a 1004 * way that is safe for rpciod. 1005 * 1006 * Most requests are 'small' (under 2KiB) and can be serviced from a 1007 * mempool, ensuring that NFS reads and writes can always proceed, 1008 * and that there is good locality of reference for these buffers. 1009 */ 1010 int rpc_malloc(struct rpc_task *task) 1011 { 1012 struct rpc_rqst *rqst = task->tk_rqstp; 1013 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; 1014 struct rpc_buffer *buf; 1015 gfp_t gfp = GFP_NOFS; 1016 1017 if (RPC_IS_SWAPPER(task)) 1018 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; 1019 1020 size += sizeof(struct rpc_buffer); 1021 if (size <= RPC_BUFFER_MAXSIZE) 1022 buf = mempool_alloc(rpc_buffer_mempool, gfp); 1023 else 1024 buf = kmalloc(size, gfp); 1025 1026 if (!buf) 1027 return -ENOMEM; 1028 1029 buf->len = size; 1030 dprintk("RPC: %5u allocated buffer of size %zu at %p\n", 1031 task->tk_pid, size, buf); 1032 rqst->rq_buffer = buf->data; 1033 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; 1034 return 0; 1035 } 1036 EXPORT_SYMBOL_GPL(rpc_malloc); 1037 1038 /** 1039 * rpc_free - free RPC buffer resources allocated via rpc_malloc 1040 * @task: RPC task 1041 * 1042 */ 1043 void rpc_free(struct rpc_task *task) 1044 { 1045 void *buffer = task->tk_rqstp->rq_buffer; 1046 size_t size; 1047 struct rpc_buffer *buf; 1048 1049 buf = container_of(buffer, struct rpc_buffer, data); 1050 size = buf->len; 1051 1052 dprintk("RPC: freeing buffer of size %zu at %p\n", 1053 size, buf); 1054 1055 if (size <= RPC_BUFFER_MAXSIZE) 1056 mempool_free(buf, rpc_buffer_mempool); 1057 else 1058 kfree(buf); 1059 } 1060 EXPORT_SYMBOL_GPL(rpc_free); 1061 1062 /* 1063 * Creation and deletion of RPC task structures 1064 */ 1065 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 1066 { 1067 memset(task, 0, sizeof(*task)); 1068 atomic_set(&task->tk_count, 1); 1069 task->tk_flags = task_setup_data->flags; 1070 task->tk_ops = task_setup_data->callback_ops; 1071 task->tk_calldata = task_setup_data->callback_data; 1072 INIT_LIST_HEAD(&task->tk_task); 1073 1074 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 1075 task->tk_owner = current->tgid; 1076 1077 /* Initialize workqueue for async tasks */ 1078 task->tk_workqueue = task_setup_data->workqueue; 1079 1080 task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client, 1081 xprt_get(task_setup_data->rpc_xprt)); 1082 1083 task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred); 1084 1085 if (task->tk_ops->rpc_call_prepare != NULL) 1086 task->tk_action = rpc_prepare_task; 1087 1088 rpc_init_task_statistics(task); 1089 1090 dprintk("RPC: new task initialized, procpid %u\n", 1091 task_pid_nr(current)); 1092 } 1093 1094 static struct rpc_task * 1095 rpc_alloc_task(void) 1096 { 1097 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 1098 } 1099 1100 /* 1101 * Create a new task for the specified client. 1102 */ 1103 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) 1104 { 1105 struct rpc_task *task = setup_data->task; 1106 unsigned short flags = 0; 1107 1108 if (task == NULL) { 1109 task = rpc_alloc_task(); 1110 flags = RPC_TASK_DYNAMIC; 1111 } 1112 1113 rpc_init_task(task, setup_data); 1114 task->tk_flags |= flags; 1115 dprintk("RPC: allocated task %p\n", task); 1116 return task; 1117 } 1118 1119 /* 1120 * rpc_free_task - release rpc task and perform cleanups 1121 * 1122 * Note that we free up the rpc_task _after_ rpc_release_calldata() 1123 * in order to work around a workqueue dependency issue. 1124 * 1125 * Tejun Heo states: 1126 * "Workqueue currently considers two work items to be the same if they're 1127 * on the same address and won't execute them concurrently - ie. it 1128 * makes a work item which is queued again while being executed wait 1129 * for the previous execution to complete. 1130 * 1131 * If a work function frees the work item, and then waits for an event 1132 * which should be performed by another work item and *that* work item 1133 * recycles the freed work item, it can create a false dependency loop. 1134 * There really is no reliable way to detect this short of verifying 1135 * every memory free." 1136 * 1137 */ 1138 static void rpc_free_task(struct rpc_task *task) 1139 { 1140 unsigned short tk_flags = task->tk_flags; 1141 1142 put_rpccred(task->tk_op_cred); 1143 rpc_release_calldata(task->tk_ops, task->tk_calldata); 1144 1145 if (tk_flags & RPC_TASK_DYNAMIC) { 1146 dprintk("RPC: %5u freeing task\n", task->tk_pid); 1147 mempool_free(task, rpc_task_mempool); 1148 } 1149 } 1150 1151 static void rpc_async_release(struct work_struct *work) 1152 { 1153 unsigned int pflags = memalloc_nofs_save(); 1154 1155 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 1156 memalloc_nofs_restore(pflags); 1157 } 1158 1159 static void rpc_release_resources_task(struct rpc_task *task) 1160 { 1161 xprt_release(task); 1162 if (task->tk_msg.rpc_cred) { 1163 put_cred(task->tk_msg.rpc_cred); 1164 task->tk_msg.rpc_cred = NULL; 1165 } 1166 rpc_task_release_client(task); 1167 } 1168 1169 static void rpc_final_put_task(struct rpc_task *task, 1170 struct workqueue_struct *q) 1171 { 1172 if (q != NULL) { 1173 INIT_WORK(&task->u.tk_work, rpc_async_release); 1174 queue_work(q, &task->u.tk_work); 1175 } else 1176 rpc_free_task(task); 1177 } 1178 1179 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) 1180 { 1181 if (atomic_dec_and_test(&task->tk_count)) { 1182 rpc_release_resources_task(task); 1183 rpc_final_put_task(task, q); 1184 } 1185 } 1186 1187 void rpc_put_task(struct rpc_task *task) 1188 { 1189 rpc_do_put_task(task, NULL); 1190 } 1191 EXPORT_SYMBOL_GPL(rpc_put_task); 1192 1193 void rpc_put_task_async(struct rpc_task *task) 1194 { 1195 rpc_do_put_task(task, task->tk_workqueue); 1196 } 1197 EXPORT_SYMBOL_GPL(rpc_put_task_async); 1198 1199 static void rpc_release_task(struct rpc_task *task) 1200 { 1201 dprintk("RPC: %5u release task\n", task->tk_pid); 1202 1203 WARN_ON_ONCE(RPC_IS_QUEUED(task)); 1204 1205 rpc_release_resources_task(task); 1206 1207 /* 1208 * Note: at this point we have been removed from rpc_clnt->cl_tasks, 1209 * so it should be safe to use task->tk_count as a test for whether 1210 * or not any other processes still hold references to our rpc_task. 1211 */ 1212 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { 1213 /* Wake up anyone who may be waiting for task completion */ 1214 if (!rpc_complete_task(task)) 1215 return; 1216 } else { 1217 if (!atomic_dec_and_test(&task->tk_count)) 1218 return; 1219 } 1220 rpc_final_put_task(task, task->tk_workqueue); 1221 } 1222 1223 int rpciod_up(void) 1224 { 1225 return try_module_get(THIS_MODULE) ? 0 : -EINVAL; 1226 } 1227 1228 void rpciod_down(void) 1229 { 1230 module_put(THIS_MODULE); 1231 } 1232 1233 /* 1234 * Start up the rpciod workqueue. 1235 */ 1236 static int rpciod_start(void) 1237 { 1238 struct workqueue_struct *wq; 1239 1240 /* 1241 * Create the rpciod thread and wait for it to start. 1242 */ 1243 dprintk("RPC: creating workqueue rpciod\n"); 1244 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1245 if (!wq) 1246 goto out_failed; 1247 rpciod_workqueue = wq; 1248 /* Note: highpri because network receive is latency sensitive */ 1249 wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); 1250 if (!wq) 1251 goto free_rpciod; 1252 xprtiod_workqueue = wq; 1253 return 1; 1254 free_rpciod: 1255 wq = rpciod_workqueue; 1256 rpciod_workqueue = NULL; 1257 destroy_workqueue(wq); 1258 out_failed: 1259 return 0; 1260 } 1261 1262 static void rpciod_stop(void) 1263 { 1264 struct workqueue_struct *wq = NULL; 1265 1266 if (rpciod_workqueue == NULL) 1267 return; 1268 dprintk("RPC: destroying workqueue rpciod\n"); 1269 1270 wq = rpciod_workqueue; 1271 rpciod_workqueue = NULL; 1272 destroy_workqueue(wq); 1273 wq = xprtiod_workqueue; 1274 xprtiod_workqueue = NULL; 1275 destroy_workqueue(wq); 1276 } 1277 1278 void 1279 rpc_destroy_mempool(void) 1280 { 1281 rpciod_stop(); 1282 mempool_destroy(rpc_buffer_mempool); 1283 mempool_destroy(rpc_task_mempool); 1284 kmem_cache_destroy(rpc_task_slabp); 1285 kmem_cache_destroy(rpc_buffer_slabp); 1286 rpc_destroy_wait_queue(&delay_queue); 1287 } 1288 1289 int 1290 rpc_init_mempool(void) 1291 { 1292 /* 1293 * The following is not strictly a mempool initialisation, 1294 * but there is no harm in doing it here 1295 */ 1296 rpc_init_wait_queue(&delay_queue, "delayq"); 1297 if (!rpciod_start()) 1298 goto err_nomem; 1299 1300 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1301 sizeof(struct rpc_task), 1302 0, SLAB_HWCACHE_ALIGN, 1303 NULL); 1304 if (!rpc_task_slabp) 1305 goto err_nomem; 1306 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1307 RPC_BUFFER_MAXSIZE, 1308 0, SLAB_HWCACHE_ALIGN, 1309 NULL); 1310 if (!rpc_buffer_slabp) 1311 goto err_nomem; 1312 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, 1313 rpc_task_slabp); 1314 if (!rpc_task_mempool) 1315 goto err_nomem; 1316 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, 1317 rpc_buffer_slabp); 1318 if (!rpc_buffer_mempool) 1319 goto err_nomem; 1320 return 0; 1321 err_nomem: 1322 rpc_destroy_mempool(); 1323 return -ENOMEM; 1324 } 1325