tlbex.c (036313316d3a38bfde9ba49b3d00f73b7d8019d2) tlbex.c (268a2d60013049cfd9a0aada77284aa6ea8ad26a)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer

--- 557 unchanged lines hidden (view full) ---

566 case CPU_4KSC:
567 case CPU_20KC:
568 case CPU_25KF:
569 case CPU_BMIPS32:
570 case CPU_BMIPS3300:
571 case CPU_BMIPS4350:
572 case CPU_BMIPS4380:
573 case CPU_BMIPS5000:
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer

--- 557 unchanged lines hidden (view full) ---

566 case CPU_4KSC:
567 case CPU_20KC:
568 case CPU_25KF:
569 case CPU_BMIPS32:
570 case CPU_BMIPS3300:
571 case CPU_BMIPS4350:
572 case CPU_BMIPS4380:
573 case CPU_BMIPS5000:
574 case CPU_LOONGSON2:
575 case CPU_LOONGSON3:
574 case CPU_LOONGSON2EF:
575 case CPU_LOONGSON64:
576 case CPU_R5500:
577 if (m4kc_tlbp_war())
578 uasm_i_nop(p);
579 /* fall through */
580 case CPU_ALCHEMY:
581 tlbw(p);
582 break;
583

--- 64 unchanged lines hidden (view full) ---

648
649#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
650
651static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
652 unsigned int tmp, enum label_id lid,
653 int restore_scratch)
654{
655 if (restore_scratch) {
576 case CPU_R5500:
577 if (m4kc_tlbp_war())
578 uasm_i_nop(p);
579 /* fall through */
580 case CPU_ALCHEMY:
581 tlbw(p);
582 break;
583

--- 64 unchanged lines hidden (view full) ---

648
649#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
650
651static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
652 unsigned int tmp, enum label_id lid,
653 int restore_scratch)
654{
655 if (restore_scratch) {
656 /*
657 * Ensure the MFC0 below observes the value written to the
658 * KScratch register by the prior MTC0.
659 */
660 if (scratch_reg >= 0)
661 uasm_i_ehb(p);
662
663 /* Reset default page size */
664 if (PM_DEFAULT_MASK >> 16) {
665 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
666 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
667 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
668 uasm_il_b(p, r, lid);
669 } else if (PM_DEFAULT_MASK) {
670 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
671 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
672 uasm_il_b(p, r, lid);
673 } else {
674 uasm_i_mtc0(p, 0, C0_PAGEMASK);
675 uasm_il_b(p, r, lid);
676 }
656 /* Reset default page size */
657 if (PM_DEFAULT_MASK >> 16) {
658 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
659 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
660 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
661 uasm_il_b(p, r, lid);
662 } else if (PM_DEFAULT_MASK) {
663 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
664 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
665 uasm_il_b(p, r, lid);
666 } else {
667 uasm_i_mtc0(p, 0, C0_PAGEMASK);
668 uasm_il_b(p, r, lid);
669 }
677 if (scratch_reg >= 0)
670 if (scratch_reg >= 0) {
671 uasm_i_ehb(p);
678 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
672 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
679 else
673 } else {
680 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
674 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
675 }
681 } else {
682 /* Reset default page size */
683 if (PM_DEFAULT_MASK >> 16) {
684 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
685 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
686 uasm_il_b(p, r, lid);
687 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
688 } else if (PM_DEFAULT_MASK) {

--- 232 unchanged lines hidden (view full) ---

921 if (uasm_in_compat_space_p(swpd))
922 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
923 else
924 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
925 }
926 }
927 if (mode != not_refill && check_for_high_segbits) {
928 uasm_l_large_segbits_fault(l, *p);
676 } else {
677 /* Reset default page size */
678 if (PM_DEFAULT_MASK >> 16) {
679 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
680 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
681 uasm_il_b(p, r, lid);
682 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
683 } else if (PM_DEFAULT_MASK) {

--- 232 unchanged lines hidden (view full) ---

916 if (uasm_in_compat_space_p(swpd))
917 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
918 else
919 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
920 }
921 }
922 if (mode != not_refill && check_for_high_segbits) {
923 uasm_l_large_segbits_fault(l, *p);
929
930 if (mode == refill_scratch && scratch_reg >= 0)
931 uasm_i_ehb(p);
932
933 /*
934 * We get here if we are an xsseg address, or if we are
935 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
936 *
937 * Ignoring xsseg (assume disabled so would generate
938 * (address errors?), the only remaining possibility
939 * is the upper xuseg addresses. On processors with
940 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
941 * addresses would have taken an address error. We try
942 * to mimic that here by taking a load/istream page
943 * fault.
944 */
945 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
946 uasm_i_sync(p, 0);
947 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
948 uasm_i_jr(p, ptr);
949
950 if (mode == refill_scratch) {
924 /*
925 * We get here if we are an xsseg address, or if we are
926 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
927 *
928 * Ignoring xsseg (assume disabled so would generate
929 * (address errors?), the only remaining possibility
930 * is the upper xuseg addresses. On processors with
931 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
932 * addresses would have taken an address error. We try
933 * to mimic that here by taking a load/istream page
934 * fault.
935 */
936 if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
937 uasm_i_sync(p, 0);
938 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
939 uasm_i_jr(p, ptr);
940
941 if (mode == refill_scratch) {
951 if (scratch_reg >= 0)
942 if (scratch_reg >= 0) {
943 uasm_i_ehb(p);
952 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
944 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
953 else
945 } else {
954 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
946 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
947 }
955 } else {
956 uasm_i_nop(p);
957 }
958 }
959}
960
961#else /* !CONFIG_64BIT */
962

--- 409 unchanged lines hidden (view full) ---

1372 * free instruction slot for the wrap-around branch. In worst
1373 * case, if the intended insertion point is a delay slot, we
1374 * need three, with the second nop'ed and the third being
1375 * unused.
1376 */
1377 switch (boot_cpu_type()) {
1378 default:
1379 if (sizeof(long) == 4) {
948 } else {
949 uasm_i_nop(p);
950 }
951 }
952}
953
954#else /* !CONFIG_64BIT */
955

--- 409 unchanged lines hidden (view full) ---

1365 * free instruction slot for the wrap-around branch. In worst
1366 * case, if the intended insertion point is a delay slot, we
1367 * need three, with the second nop'ed and the third being
1368 * unused.
1369 */
1370 switch (boot_cpu_type()) {
1371 default:
1372 if (sizeof(long) == 4) {
1380 case CPU_LOONGSON2:
1373 case CPU_LOONGSON2EF:
1381 /* Loongson2 ebase is different than r4k, we have more space */
1382 if ((p - tlb_handler) > 64)
1383 panic("TLB refill handler space exceeded");
1384 /*
1385 * Now fold the handler in the TLB refill handler space.
1386 */
1387 f = final_handler;
1388 /* Simplest case, just copy the handler. */

--- 1266 unchanged lines hidden ---
1374 /* Loongson2 ebase is different than r4k, we have more space */
1375 if ((p - tlb_handler) > 64)
1376 panic("TLB refill handler space exceeded");
1377 /*
1378 * Now fold the handler in the TLB refill handler space.
1379 */
1380 f = final_handler;
1381 /* Simplest case, just copy the handler. */

--- 1266 unchanged lines hidden ---