Lines Matching +full:per +full:- +full:vpe
1 // SPDX-License-Identifier: GPL-2.0-or-later
34 #include <asm/r4k-timer.h>
35 #include <asm/mips-cps.h>
47 /* Number of TCs (or siblings in Intel speak) per CPU core */
55 /* representing the core map of multi-core chips of each logical CPU */
65 * A logical cpu mask containing only one VPE per core to
144 /* Re-calculate the mask */ in calculate_cpu_foreign_map()
365 mp_ops->init_secondary(); in start_secondary()
407 * irq will be enabled in ->smp_finish(), enabling it too early in start_secondary()
411 mp_ops->smp_finish(); in start_secondary()
441 current_thread_info()->cpu = 0; in smp_prepare_cpus()
442 mp_ops->prepare_cpus(max_cpus); in smp_prepare_cpus()
455 if (mp_ops->prepare_boot_cpu) in smp_prepare_boot_cpu()
456 mp_ops->prepare_boot_cpu(); in smp_prepare_boot_cpu()
464 return mp_ops->boot_secondary(cpu, tidle); in arch_cpuhp_kick_ap_alive()
471 err = mp_ops->boot_secondary(cpu, tidle); in __cpu_up()
479 return -EIO; in __cpu_up()
549 * multithreaded address spaces, inter-CPU interrupts have to be sent.
550 * Another case where inter-CPU interrupts are required is when the target
561 if (atomic_read(&mm->mm_users) == 0) in flush_tlb_mm()
568 * No need to worry about other CPUs - the ginvt in in flush_tlb_mm()
571 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()
596 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi()
601 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
621 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_range()
632 int exec = vma->vm_flags & VM_EXEC; in flush_tlb_range()
653 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); in flush_tlb_kernel_range_ipi()
670 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi()
681 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page()
688 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
689 (current->mm != vma->vm_mm)) { in flush_tlb_page()
707 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page()
708 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
733 if (mp_ops->cleanup_dead_cpu) in arch_cpuhp_cleanup_dead_cpu()
734 mp_ops->cleanup_dead_cpu(cpu); in arch_cpuhp_cleanup_dead_cpu()