1 /* 2 * padata.c - generic interface to process data streams in parallel 3 * 4 * See Documentation/padata.txt for an api documentation. 5 * 6 * Copyright (C) 2008, 2009 secunet Security Networks AG 7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms and conditions of the GNU General Public License, 11 * version 2, as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 * 18 * You should have received a copy of the GNU General Public License along with 19 * this program; if not, write to the Free Software Foundation, Inc., 20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 21 */ 22 23 #include <linux/export.h> 24 #include <linux/cpumask.h> 25 #include <linux/err.h> 26 #include <linux/cpu.h> 27 #include <linux/padata.h> 28 #include <linux/mutex.h> 29 #include <linux/sched.h> 30 #include <linux/slab.h> 31 #include <linux/sysfs.h> 32 #include <linux/rcupdate.h> 33 #include <linux/module.h> 34 35 #define MAX_OBJ_NUM 1000 36 37 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) 38 { 39 int cpu, target_cpu; 40 41 target_cpu = cpumask_first(pd->cpumask.pcpu); 42 for (cpu = 0; cpu < cpu_index; cpu++) 43 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); 44 45 return target_cpu; 46 } 47 48 static int padata_cpu_hash(struct parallel_data *pd) 49 { 50 unsigned int seq_nr; 51 int cpu_index; 52 53 /* 54 * Hash the sequence numbers to the cpus by taking 55 * seq_nr mod. number of cpus in use. 56 */ 57 58 seq_nr = atomic_inc_return(&pd->seq_nr); 59 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 60 61 return padata_index_to_cpu(pd, cpu_index); 62 } 63 64 static void padata_parallel_worker(struct work_struct *parallel_work) 65 { 66 struct padata_parallel_queue *pqueue; 67 LIST_HEAD(local_list); 68 69 local_bh_disable(); 70 pqueue = container_of(parallel_work, 71 struct padata_parallel_queue, work); 72 73 spin_lock(&pqueue->parallel.lock); 74 list_replace_init(&pqueue->parallel.list, &local_list); 75 spin_unlock(&pqueue->parallel.lock); 76 77 while (!list_empty(&local_list)) { 78 struct padata_priv *padata; 79 80 padata = list_entry(local_list.next, 81 struct padata_priv, list); 82 83 list_del_init(&padata->list); 84 85 padata->parallel(padata); 86 } 87 88 local_bh_enable(); 89 } 90 91 /** 92 * padata_do_parallel - padata parallelization function 93 * 94 * @pinst: padata instance 95 * @padata: object to be parallelized 96 * @cb_cpu: cpu the serialization callback function will run on, 97 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). 98 * 99 * The parallelization callback function will run with BHs off. 100 * Note: Every object which is parallelized by padata_do_parallel 101 * must be seen by padata_do_serial. 102 */ 103 int padata_do_parallel(struct padata_instance *pinst, 104 struct padata_priv *padata, int cb_cpu) 105 { 106 int target_cpu, err; 107 struct padata_parallel_queue *queue; 108 struct parallel_data *pd; 109 110 rcu_read_lock_bh(); 111 112 pd = rcu_dereference_bh(pinst->pd); 113 114 err = -EINVAL; 115 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) 116 goto out; 117 118 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) 119 goto out; 120 121 err = -EBUSY; 122 if ((pinst->flags & PADATA_RESET)) 123 goto out; 124 125 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) 126 goto out; 127 128 err = 0; 129 atomic_inc(&pd->refcnt); 130 padata->pd = pd; 131 padata->cb_cpu = cb_cpu; 132 133 target_cpu = padata_cpu_hash(pd); 134 queue = per_cpu_ptr(pd->pqueue, target_cpu); 135 136 spin_lock(&queue->parallel.lock); 137 list_add_tail(&padata->list, &queue->parallel.list); 138 spin_unlock(&queue->parallel.lock); 139 140 queue_work_on(target_cpu, pinst->wq, &queue->work); 141 142 out: 143 rcu_read_unlock_bh(); 144 145 return err; 146 } 147 EXPORT_SYMBOL(padata_do_parallel); 148 149 /* 150 * padata_get_next - Get the next object that needs serialization. 151 * 152 * Return values are: 153 * 154 * A pointer to the control struct of the next object that needs 155 * serialization, if present in one of the percpu reorder queues. 156 * 157 * NULL, if all percpu reorder queues are empty. 158 * 159 * -EINPROGRESS, if the next object that needs serialization will 160 * be parallel processed by another cpu and is not yet present in 161 * the cpu's reorder queue. 162 * 163 * -ENODATA, if this cpu has to do the parallel processing for 164 * the next object. 165 */ 166 static struct padata_priv *padata_get_next(struct parallel_data *pd) 167 { 168 int cpu, num_cpus; 169 unsigned int next_nr, next_index; 170 struct padata_parallel_queue *next_queue; 171 struct padata_priv *padata; 172 struct padata_list *reorder; 173 174 num_cpus = cpumask_weight(pd->cpumask.pcpu); 175 176 /* 177 * Calculate the percpu reorder queue and the sequence 178 * number of the next object. 179 */ 180 next_nr = pd->processed; 181 next_index = next_nr % num_cpus; 182 cpu = padata_index_to_cpu(pd, next_index); 183 next_queue = per_cpu_ptr(pd->pqueue, cpu); 184 185 padata = NULL; 186 187 reorder = &next_queue->reorder; 188 189 spin_lock(&reorder->lock); 190 if (!list_empty(&reorder->list)) { 191 padata = list_entry(reorder->list.next, 192 struct padata_priv, list); 193 194 list_del_init(&padata->list); 195 atomic_dec(&pd->reorder_objects); 196 197 pd->processed++; 198 199 spin_unlock(&reorder->lock); 200 goto out; 201 } 202 spin_unlock(&reorder->lock); 203 204 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { 205 padata = ERR_PTR(-ENODATA); 206 goto out; 207 } 208 209 padata = ERR_PTR(-EINPROGRESS); 210 out: 211 return padata; 212 } 213 214 static void padata_reorder(struct parallel_data *pd) 215 { 216 int cb_cpu; 217 struct padata_priv *padata; 218 struct padata_serial_queue *squeue; 219 struct padata_instance *pinst = pd->pinst; 220 221 /* 222 * We need to ensure that only one cpu can work on dequeueing of 223 * the reorder queue the time. Calculating in which percpu reorder 224 * queue the next object will arrive takes some time. A spinlock 225 * would be highly contended. Also it is not clear in which order 226 * the objects arrive to the reorder queues. So a cpu could wait to 227 * get the lock just to notice that there is nothing to do at the 228 * moment. Therefore we use a trylock and let the holder of the lock 229 * care for all the objects enqueued during the holdtime of the lock. 230 */ 231 if (!spin_trylock_bh(&pd->lock)) 232 return; 233 234 while (1) { 235 padata = padata_get_next(pd); 236 237 /* 238 * All reorder queues are empty, or the next object that needs 239 * serialization is parallel processed by another cpu and is 240 * still on it's way to the cpu's reorder queue, nothing to 241 * do for now. 242 */ 243 if (!padata || PTR_ERR(padata) == -EINPROGRESS) 244 break; 245 246 /* 247 * This cpu has to do the parallel processing of the next 248 * object. It's waiting in the cpu's parallelization queue, 249 * so exit immediately. 250 */ 251 if (PTR_ERR(padata) == -ENODATA) { 252 del_timer(&pd->timer); 253 spin_unlock_bh(&pd->lock); 254 return; 255 } 256 257 cb_cpu = padata->cb_cpu; 258 squeue = per_cpu_ptr(pd->squeue, cb_cpu); 259 260 spin_lock(&squeue->serial.lock); 261 list_add_tail(&padata->list, &squeue->serial.list); 262 spin_unlock(&squeue->serial.lock); 263 264 queue_work_on(cb_cpu, pinst->wq, &squeue->work); 265 } 266 267 spin_unlock_bh(&pd->lock); 268 269 /* 270 * The next object that needs serialization might have arrived to 271 * the reorder queues in the meantime, we will be called again 272 * from the timer function if no one else cares for it. 273 */ 274 if (atomic_read(&pd->reorder_objects) 275 && !(pinst->flags & PADATA_RESET)) 276 mod_timer(&pd->timer, jiffies + HZ); 277 else 278 del_timer(&pd->timer); 279 280 return; 281 } 282 283 static void padata_reorder_timer(unsigned long arg) 284 { 285 struct parallel_data *pd = (struct parallel_data *)arg; 286 287 padata_reorder(pd); 288 } 289 290 static void padata_serial_worker(struct work_struct *serial_work) 291 { 292 struct padata_serial_queue *squeue; 293 struct parallel_data *pd; 294 LIST_HEAD(local_list); 295 296 local_bh_disable(); 297 squeue = container_of(serial_work, struct padata_serial_queue, work); 298 pd = squeue->pd; 299 300 spin_lock(&squeue->serial.lock); 301 list_replace_init(&squeue->serial.list, &local_list); 302 spin_unlock(&squeue->serial.lock); 303 304 while (!list_empty(&local_list)) { 305 struct padata_priv *padata; 306 307 padata = list_entry(local_list.next, 308 struct padata_priv, list); 309 310 list_del_init(&padata->list); 311 312 padata->serial(padata); 313 atomic_dec(&pd->refcnt); 314 } 315 local_bh_enable(); 316 } 317 318 /** 319 * padata_do_serial - padata serialization function 320 * 321 * @padata: object to be serialized. 322 * 323 * padata_do_serial must be called for every parallelized object. 324 * The serialization callback function will run with BHs off. 325 */ 326 void padata_do_serial(struct padata_priv *padata) 327 { 328 int cpu; 329 struct padata_parallel_queue *pqueue; 330 struct parallel_data *pd; 331 332 pd = padata->pd; 333 334 cpu = get_cpu(); 335 pqueue = per_cpu_ptr(pd->pqueue, cpu); 336 337 spin_lock(&pqueue->reorder.lock); 338 atomic_inc(&pd->reorder_objects); 339 list_add_tail(&padata->list, &pqueue->reorder.list); 340 spin_unlock(&pqueue->reorder.lock); 341 342 put_cpu(); 343 344 padata_reorder(pd); 345 } 346 EXPORT_SYMBOL(padata_do_serial); 347 348 static int padata_setup_cpumasks(struct parallel_data *pd, 349 const struct cpumask *pcpumask, 350 const struct cpumask *cbcpumask) 351 { 352 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) 353 return -ENOMEM; 354 355 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); 356 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { 357 free_cpumask_var(pd->cpumask.cbcpu); 358 return -ENOMEM; 359 } 360 361 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); 362 return 0; 363 } 364 365 static void __padata_list_init(struct padata_list *pd_list) 366 { 367 INIT_LIST_HEAD(&pd_list->list); 368 spin_lock_init(&pd_list->lock); 369 } 370 371 /* Initialize all percpu queues used by serial workers */ 372 static void padata_init_squeues(struct parallel_data *pd) 373 { 374 int cpu; 375 struct padata_serial_queue *squeue; 376 377 for_each_cpu(cpu, pd->cpumask.cbcpu) { 378 squeue = per_cpu_ptr(pd->squeue, cpu); 379 squeue->pd = pd; 380 __padata_list_init(&squeue->serial); 381 INIT_WORK(&squeue->work, padata_serial_worker); 382 } 383 } 384 385 /* Initialize all percpu queues used by parallel workers */ 386 static void padata_init_pqueues(struct parallel_data *pd) 387 { 388 int cpu_index, cpu; 389 struct padata_parallel_queue *pqueue; 390 391 cpu_index = 0; 392 for_each_cpu(cpu, pd->cpumask.pcpu) { 393 pqueue = per_cpu_ptr(pd->pqueue, cpu); 394 pqueue->pd = pd; 395 pqueue->cpu_index = cpu_index; 396 cpu_index++; 397 398 __padata_list_init(&pqueue->reorder); 399 __padata_list_init(&pqueue->parallel); 400 INIT_WORK(&pqueue->work, padata_parallel_worker); 401 atomic_set(&pqueue->num_obj, 0); 402 } 403 } 404 405 /* Allocate and initialize the internal cpumask dependend resources. */ 406 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, 407 const struct cpumask *pcpumask, 408 const struct cpumask *cbcpumask) 409 { 410 struct parallel_data *pd; 411 412 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); 413 if (!pd) 414 goto err; 415 416 pd->pqueue = alloc_percpu(struct padata_parallel_queue); 417 if (!pd->pqueue) 418 goto err_free_pd; 419 420 pd->squeue = alloc_percpu(struct padata_serial_queue); 421 if (!pd->squeue) 422 goto err_free_pqueue; 423 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) 424 goto err_free_squeue; 425 426 padata_init_pqueues(pd); 427 padata_init_squeues(pd); 428 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 429 atomic_set(&pd->seq_nr, -1); 430 atomic_set(&pd->reorder_objects, 0); 431 atomic_set(&pd->refcnt, 0); 432 pd->pinst = pinst; 433 spin_lock_init(&pd->lock); 434 435 return pd; 436 437 err_free_squeue: 438 free_percpu(pd->squeue); 439 err_free_pqueue: 440 free_percpu(pd->pqueue); 441 err_free_pd: 442 kfree(pd); 443 err: 444 return NULL; 445 } 446 447 static void padata_free_pd(struct parallel_data *pd) 448 { 449 free_cpumask_var(pd->cpumask.pcpu); 450 free_cpumask_var(pd->cpumask.cbcpu); 451 free_percpu(pd->pqueue); 452 free_percpu(pd->squeue); 453 kfree(pd); 454 } 455 456 /* Flush all objects out of the padata queues. */ 457 static void padata_flush_queues(struct parallel_data *pd) 458 { 459 int cpu; 460 struct padata_parallel_queue *pqueue; 461 struct padata_serial_queue *squeue; 462 463 for_each_cpu(cpu, pd->cpumask.pcpu) { 464 pqueue = per_cpu_ptr(pd->pqueue, cpu); 465 flush_work(&pqueue->work); 466 } 467 468 del_timer_sync(&pd->timer); 469 470 if (atomic_read(&pd->reorder_objects)) 471 padata_reorder(pd); 472 473 for_each_cpu(cpu, pd->cpumask.cbcpu) { 474 squeue = per_cpu_ptr(pd->squeue, cpu); 475 flush_work(&squeue->work); 476 } 477 478 BUG_ON(atomic_read(&pd->refcnt) != 0); 479 } 480 481 static void __padata_start(struct padata_instance *pinst) 482 { 483 pinst->flags |= PADATA_INIT; 484 } 485 486 static void __padata_stop(struct padata_instance *pinst) 487 { 488 if (!(pinst->flags & PADATA_INIT)) 489 return; 490 491 pinst->flags &= ~PADATA_INIT; 492 493 synchronize_rcu(); 494 495 get_online_cpus(); 496 padata_flush_queues(pinst->pd); 497 put_online_cpus(); 498 } 499 500 /* Replace the internal control structure with a new one. */ 501 static void padata_replace(struct padata_instance *pinst, 502 struct parallel_data *pd_new) 503 { 504 struct parallel_data *pd_old = pinst->pd; 505 int notification_mask = 0; 506 507 pinst->flags |= PADATA_RESET; 508 509 rcu_assign_pointer(pinst->pd, pd_new); 510 511 synchronize_rcu(); 512 513 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) 514 notification_mask |= PADATA_CPU_PARALLEL; 515 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) 516 notification_mask |= PADATA_CPU_SERIAL; 517 518 padata_flush_queues(pd_old); 519 padata_free_pd(pd_old); 520 521 if (notification_mask) 522 blocking_notifier_call_chain(&pinst->cpumask_change_notifier, 523 notification_mask, 524 &pd_new->cpumask); 525 526 pinst->flags &= ~PADATA_RESET; 527 } 528 529 /** 530 * padata_register_cpumask_notifier - Registers a notifier that will be called 531 * if either pcpu or cbcpu or both cpumasks change. 532 * 533 * @pinst: A poineter to padata instance 534 * @nblock: A pointer to notifier block. 535 */ 536 int padata_register_cpumask_notifier(struct padata_instance *pinst, 537 struct notifier_block *nblock) 538 { 539 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, 540 nblock); 541 } 542 EXPORT_SYMBOL(padata_register_cpumask_notifier); 543 544 /** 545 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier 546 * registered earlier using padata_register_cpumask_notifier 547 * 548 * @pinst: A pointer to data instance. 549 * @nlock: A pointer to notifier block. 550 */ 551 int padata_unregister_cpumask_notifier(struct padata_instance *pinst, 552 struct notifier_block *nblock) 553 { 554 return blocking_notifier_chain_unregister( 555 &pinst->cpumask_change_notifier, 556 nblock); 557 } 558 EXPORT_SYMBOL(padata_unregister_cpumask_notifier); 559 560 561 /* If cpumask contains no active cpu, we mark the instance as invalid. */ 562 static bool padata_validate_cpumask(struct padata_instance *pinst, 563 const struct cpumask *cpumask) 564 { 565 if (!cpumask_intersects(cpumask, cpu_online_mask)) { 566 pinst->flags |= PADATA_INVALID; 567 return false; 568 } 569 570 pinst->flags &= ~PADATA_INVALID; 571 return true; 572 } 573 574 static int __padata_set_cpumasks(struct padata_instance *pinst, 575 cpumask_var_t pcpumask, 576 cpumask_var_t cbcpumask) 577 { 578 int valid; 579 struct parallel_data *pd; 580 581 valid = padata_validate_cpumask(pinst, pcpumask); 582 if (!valid) { 583 __padata_stop(pinst); 584 goto out_replace; 585 } 586 587 valid = padata_validate_cpumask(pinst, cbcpumask); 588 if (!valid) 589 __padata_stop(pinst); 590 591 out_replace: 592 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 593 if (!pd) 594 return -ENOMEM; 595 596 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 597 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 598 599 padata_replace(pinst, pd); 600 601 if (valid) 602 __padata_start(pinst); 603 604 return 0; 605 } 606 607 /** 608 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value 609 * equivalent to @cpumask. 610 * 611 * @pinst: padata instance 612 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding 613 * to parallel and serial cpumasks respectively. 614 * @cpumask: the cpumask to use 615 */ 616 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, 617 cpumask_var_t cpumask) 618 { 619 struct cpumask *serial_mask, *parallel_mask; 620 int err = -EINVAL; 621 622 mutex_lock(&pinst->lock); 623 get_online_cpus(); 624 625 switch (cpumask_type) { 626 case PADATA_CPU_PARALLEL: 627 serial_mask = pinst->cpumask.cbcpu; 628 parallel_mask = cpumask; 629 break; 630 case PADATA_CPU_SERIAL: 631 parallel_mask = pinst->cpumask.pcpu; 632 serial_mask = cpumask; 633 break; 634 default: 635 goto out; 636 } 637 638 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); 639 640 out: 641 put_online_cpus(); 642 mutex_unlock(&pinst->lock); 643 644 return err; 645 } 646 EXPORT_SYMBOL(padata_set_cpumask); 647 648 /** 649 * padata_start - start the parallel processing 650 * 651 * @pinst: padata instance to start 652 */ 653 int padata_start(struct padata_instance *pinst) 654 { 655 int err = 0; 656 657 mutex_lock(&pinst->lock); 658 659 if (pinst->flags & PADATA_INVALID) 660 err = -EINVAL; 661 662 __padata_start(pinst); 663 664 mutex_unlock(&pinst->lock); 665 666 return err; 667 } 668 EXPORT_SYMBOL(padata_start); 669 670 /** 671 * padata_stop - stop the parallel processing 672 * 673 * @pinst: padata instance to stop 674 */ 675 void padata_stop(struct padata_instance *pinst) 676 { 677 mutex_lock(&pinst->lock); 678 __padata_stop(pinst); 679 mutex_unlock(&pinst->lock); 680 } 681 EXPORT_SYMBOL(padata_stop); 682 683 #ifdef CONFIG_HOTPLUG_CPU 684 685 static int __padata_add_cpu(struct padata_instance *pinst, int cpu) 686 { 687 struct parallel_data *pd; 688 689 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 690 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 691 pinst->cpumask.cbcpu); 692 if (!pd) 693 return -ENOMEM; 694 695 padata_replace(pinst, pd); 696 697 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && 698 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 699 __padata_start(pinst); 700 } 701 702 return 0; 703 } 704 705 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) 706 { 707 struct parallel_data *pd = NULL; 708 709 if (cpumask_test_cpu(cpu, cpu_online_mask)) { 710 711 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || 712 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) 713 __padata_stop(pinst); 714 715 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, 716 pinst->cpumask.cbcpu); 717 if (!pd) 718 return -ENOMEM; 719 720 padata_replace(pinst, pd); 721 722 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); 723 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); 724 } 725 726 return 0; 727 } 728 729 /** 730 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) 731 * padata cpumasks. 732 * 733 * @pinst: padata instance 734 * @cpu: cpu to remove 735 * @mask: bitmask specifying from which cpumask @cpu should be removed 736 * The @mask may be any combination of the following flags: 737 * PADATA_CPU_SERIAL - serial cpumask 738 * PADATA_CPU_PARALLEL - parallel cpumask 739 */ 740 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) 741 { 742 int err; 743 744 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) 745 return -EINVAL; 746 747 mutex_lock(&pinst->lock); 748 749 get_online_cpus(); 750 if (mask & PADATA_CPU_SERIAL) 751 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); 752 if (mask & PADATA_CPU_PARALLEL) 753 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); 754 755 err = __padata_remove_cpu(pinst, cpu); 756 put_online_cpus(); 757 758 mutex_unlock(&pinst->lock); 759 760 return err; 761 } 762 EXPORT_SYMBOL(padata_remove_cpu); 763 764 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) 765 { 766 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || 767 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); 768 } 769 770 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) 771 { 772 struct padata_instance *pinst; 773 int ret; 774 775 pinst = hlist_entry_safe(node, struct padata_instance, node); 776 if (!pinst_has_cpu(pinst, cpu)) 777 return 0; 778 779 mutex_lock(&pinst->lock); 780 ret = __padata_add_cpu(pinst, cpu); 781 mutex_unlock(&pinst->lock); 782 return ret; 783 } 784 785 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) 786 { 787 struct padata_instance *pinst; 788 int ret; 789 790 pinst = hlist_entry_safe(node, struct padata_instance, node); 791 if (!pinst_has_cpu(pinst, cpu)) 792 return 0; 793 794 mutex_lock(&pinst->lock); 795 ret = __padata_remove_cpu(pinst, cpu); 796 mutex_unlock(&pinst->lock); 797 return ret; 798 } 799 800 static enum cpuhp_state hp_online; 801 #endif 802 803 static void __padata_free(struct padata_instance *pinst) 804 { 805 #ifdef CONFIG_HOTPLUG_CPU 806 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); 807 #endif 808 809 padata_stop(pinst); 810 padata_free_pd(pinst->pd); 811 free_cpumask_var(pinst->cpumask.pcpu); 812 free_cpumask_var(pinst->cpumask.cbcpu); 813 kfree(pinst); 814 } 815 816 #define kobj2pinst(_kobj) \ 817 container_of(_kobj, struct padata_instance, kobj) 818 #define attr2pentry(_attr) \ 819 container_of(_attr, struct padata_sysfs_entry, attr) 820 821 static void padata_sysfs_release(struct kobject *kobj) 822 { 823 struct padata_instance *pinst = kobj2pinst(kobj); 824 __padata_free(pinst); 825 } 826 827 struct padata_sysfs_entry { 828 struct attribute attr; 829 ssize_t (*show)(struct padata_instance *, struct attribute *, char *); 830 ssize_t (*store)(struct padata_instance *, struct attribute *, 831 const char *, size_t); 832 }; 833 834 static ssize_t show_cpumask(struct padata_instance *pinst, 835 struct attribute *attr, char *buf) 836 { 837 struct cpumask *cpumask; 838 ssize_t len; 839 840 mutex_lock(&pinst->lock); 841 if (!strcmp(attr->name, "serial_cpumask")) 842 cpumask = pinst->cpumask.cbcpu; 843 else 844 cpumask = pinst->cpumask.pcpu; 845 846 len = snprintf(buf, PAGE_SIZE, "%*pb\n", 847 nr_cpu_ids, cpumask_bits(cpumask)); 848 mutex_unlock(&pinst->lock); 849 return len < PAGE_SIZE ? len : -EINVAL; 850 } 851 852 static ssize_t store_cpumask(struct padata_instance *pinst, 853 struct attribute *attr, 854 const char *buf, size_t count) 855 { 856 cpumask_var_t new_cpumask; 857 ssize_t ret; 858 int mask_type; 859 860 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) 861 return -ENOMEM; 862 863 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), 864 nr_cpumask_bits); 865 if (ret < 0) 866 goto out; 867 868 mask_type = !strcmp(attr->name, "serial_cpumask") ? 869 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; 870 ret = padata_set_cpumask(pinst, mask_type, new_cpumask); 871 if (!ret) 872 ret = count; 873 874 out: 875 free_cpumask_var(new_cpumask); 876 return ret; 877 } 878 879 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ 880 static struct padata_sysfs_entry _name##_attr = \ 881 __ATTR(_name, 0644, _show_name, _store_name) 882 #define PADATA_ATTR_RO(_name, _show_name) \ 883 static struct padata_sysfs_entry _name##_attr = \ 884 __ATTR(_name, 0400, _show_name, NULL) 885 886 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); 887 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); 888 889 /* 890 * Padata sysfs provides the following objects: 891 * serial_cpumask [RW] - cpumask for serial workers 892 * parallel_cpumask [RW] - cpumask for parallel workers 893 */ 894 static struct attribute *padata_default_attrs[] = { 895 &serial_cpumask_attr.attr, 896 ¶llel_cpumask_attr.attr, 897 NULL, 898 }; 899 900 static ssize_t padata_sysfs_show(struct kobject *kobj, 901 struct attribute *attr, char *buf) 902 { 903 struct padata_instance *pinst; 904 struct padata_sysfs_entry *pentry; 905 ssize_t ret = -EIO; 906 907 pinst = kobj2pinst(kobj); 908 pentry = attr2pentry(attr); 909 if (pentry->show) 910 ret = pentry->show(pinst, attr, buf); 911 912 return ret; 913 } 914 915 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, 916 const char *buf, size_t count) 917 { 918 struct padata_instance *pinst; 919 struct padata_sysfs_entry *pentry; 920 ssize_t ret = -EIO; 921 922 pinst = kobj2pinst(kobj); 923 pentry = attr2pentry(attr); 924 if (pentry->show) 925 ret = pentry->store(pinst, attr, buf, count); 926 927 return ret; 928 } 929 930 static const struct sysfs_ops padata_sysfs_ops = { 931 .show = padata_sysfs_show, 932 .store = padata_sysfs_store, 933 }; 934 935 static struct kobj_type padata_attr_type = { 936 .sysfs_ops = &padata_sysfs_ops, 937 .default_attrs = padata_default_attrs, 938 .release = padata_sysfs_release, 939 }; 940 941 /** 942 * padata_alloc_possible - Allocate and initialize padata instance. 943 * Use the cpu_possible_mask for serial and 944 * parallel workers. 945 * 946 * @wq: workqueue to use for the allocated padata instance 947 */ 948 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) 949 { 950 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); 951 } 952 EXPORT_SYMBOL(padata_alloc_possible); 953 954 /** 955 * padata_alloc - allocate and initialize a padata instance and specify 956 * cpumasks for serial and parallel workers. 957 * 958 * @wq: workqueue to use for the allocated padata instance 959 * @pcpumask: cpumask that will be used for padata parallelization 960 * @cbcpumask: cpumask that will be used for padata serialization 961 */ 962 struct padata_instance *padata_alloc(struct workqueue_struct *wq, 963 const struct cpumask *pcpumask, 964 const struct cpumask *cbcpumask) 965 { 966 struct padata_instance *pinst; 967 struct parallel_data *pd = NULL; 968 969 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); 970 if (!pinst) 971 goto err; 972 973 get_online_cpus(); 974 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) 975 goto err_free_inst; 976 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { 977 free_cpumask_var(pinst->cpumask.pcpu); 978 goto err_free_inst; 979 } 980 if (!padata_validate_cpumask(pinst, pcpumask) || 981 !padata_validate_cpumask(pinst, cbcpumask)) 982 goto err_free_masks; 983 984 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); 985 if (!pd) 986 goto err_free_masks; 987 988 rcu_assign_pointer(pinst->pd, pd); 989 990 pinst->wq = wq; 991 992 cpumask_copy(pinst->cpumask.pcpu, pcpumask); 993 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); 994 995 pinst->flags = 0; 996 997 put_online_cpus(); 998 999 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); 1000 kobject_init(&pinst->kobj, &padata_attr_type); 1001 mutex_init(&pinst->lock); 1002 1003 #ifdef CONFIG_HOTPLUG_CPU 1004 cpuhp_state_add_instance_nocalls(hp_online, &pinst->node); 1005 #endif 1006 return pinst; 1007 1008 err_free_masks: 1009 free_cpumask_var(pinst->cpumask.pcpu); 1010 free_cpumask_var(pinst->cpumask.cbcpu); 1011 err_free_inst: 1012 kfree(pinst); 1013 put_online_cpus(); 1014 err: 1015 return NULL; 1016 } 1017 1018 /** 1019 * padata_free - free a padata instance 1020 * 1021 * @padata_inst: padata instance to free 1022 */ 1023 void padata_free(struct padata_instance *pinst) 1024 { 1025 kobject_put(&pinst->kobj); 1026 } 1027 EXPORT_SYMBOL(padata_free); 1028 1029 #ifdef CONFIG_HOTPLUG_CPU 1030 1031 static __init int padata_driver_init(void) 1032 { 1033 int ret; 1034 1035 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", 1036 padata_cpu_online, 1037 padata_cpu_prep_down); 1038 if (ret < 0) 1039 return ret; 1040 hp_online = ret; 1041 return 0; 1042 } 1043 module_init(padata_driver_init); 1044 1045 static __exit void padata_driver_exit(void) 1046 { 1047 cpuhp_remove_multi_state(hp_online); 1048 } 1049 module_exit(padata_driver_exit); 1050 #endif 1051