Lines Matching +full:is +full:- +full:wired
2 * This file is subject to the terms and conditions of the GNU General Public
22 #include <asm/cpu-type.h>
32 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
52 if (vma->vm_flags & VM_EXEC) in flush_micro_tlb_vm()
73 * If there are any wired entries, fall back to iterating in local_flush_tlb_all()
111 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
120 size = (end - start) >> (PAGE_SHIFT + 1); in local_flush_tlb_range()
173 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in local_flush_tlb_kernel_range()
181 end += ((PAGE_SIZE << 1) - 1); in local_flush_tlb_kernel_range()
217 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
229 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page()
231 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page()
258 * This one is only used for pages with the global bit set so we don't care
308 if (current->active_mm != vma->vm_mm) in __update_tlb()
321 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
351 * update_mmu_cache() is called between pte_offset_map_lock() in __update_tlb()
352 * and pte_unmap_unlock(), so we can assume that ptep is not in __update_tlb()
358 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); in __update_tlb()
360 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); in __update_tlb()
362 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); in __update_tlb()
364 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); in __update_tlb()
366 write_c0_entrylo0(ptep->pte_high); in __update_tlb()
368 write_c0_entrylo1(ptep->pte_high); in __update_tlb()
397 unsigned long wired; in add_wired_entry()
410 wired = num_wired_entries(); in add_wired_entry()
411 write_c0_wired(wired + 1); in add_wired_entry()
412 write_c0_index(wired); in add_wired_entry()
413 tlbw_use_hazard(); /* What is the hazard here? */ in add_wired_entry()
425 tlbw_use_hazard(); /* What is the hazard here? */ in add_wired_entry()
437 static unsigned int mask = -1; in has_transparent_hugepage()
439 if (mask == -1) { /* first call comes during __init */ in has_transparent_hugepage()
457 * don't actually want to add a wired entry which remains throughout the
469 unsigned long wired; in add_temporary_entry() local
478 wired = num_wired_entries(); in add_temporary_entry()
479 if (--temp_tlb_entry < wired) { in add_temporary_entry()
482 ret = -ENOSPC; in add_temporary_entry()
517 long v = *(unsigned long *)a - *(unsigned long *)b; in r4k_vpn_cmp()
518 int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; in r4k_vpn_cmp()
536 vpn_mask = GENMASK(cpu_vmbits - 1, 13); in r4k_tlb_uniquify()
544 return; /* Pray local_flush_tlb_all() is good enough. */ in r4k_tlb_uniquify()
611 * - On R4600 1.7 the tlbp never hits for pages smaller than in r4k_tlb_configure()
613 * - The entire mm handling assumes the c0_pagemask register to in r4k_tlb_configure()
614 * be set to fixed-size pages. in r4k_tlb_configure()
640 temp_tlb_entry = current_cpu_data.tlbsize - 1; in r4k_tlb_configure()
642 /* From this point on the ARC firmware is dead. */ in r4k_tlb_configure()
655 int wired = current_cpu_data.tlbsize - ntlb; in tlb_init() local
656 write_c0_wired(wired); in tlb_init()
657 write_c0_index(wired-1); in tlb_init()