tlb.c (88715b6e5d529f4ef3830ad2a893e4624c6af0b8) tlb.c (a898530eea3d0ba08c17a60865995a3bb468d1bc)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines for TLB flushing.
4 * On machines where the MMU does not use a hash table to store virtual to
5 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
6 * this does -not- include 603 however which shares the implementation with
7 * hash based processors)
8 *

--- 96 unchanged lines hidden (view full) ---

105 .shift = 19,
106 },
107 [MMU_PAGE_8M] = {
108 .shift = 23,
109 },
110};
111#endif
112
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines for TLB flushing.
4 * On machines where the MMU does not use a hash table to store virtual to
5 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
6 * this does -not- include 603 however which shares the implementation with
7 * hash based processors)
8 *

--- 96 unchanged lines hidden (view full) ---

105 .shift = 19,
106 },
107 [MMU_PAGE_8M] = {
108 .shift = 23,
109 },
110};
111#endif
112
113/* The variables below are currently only used on 64-bit Book3E
114 * though this will probably be made common with other nohash
115 * implementations at some point
116 */
117#ifdef CONFIG_PPC64
118
119int mmu_pte_psize; /* Page size used for PTE pages */
120int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
121int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
122unsigned long linear_map_top; /* Top of linear mapping */
123
124
125/*
126 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
127 * exceptions. This is used for bolted and e6500 TLB miss handlers which
128 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
129 * this is set to zero.
130 */
131int extlb_level_exc;
132
133#endif /* CONFIG_PPC64 */
134
135#ifdef CONFIG_PPC_E500
136/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
137DEFINE_PER_CPU(int, next_tlbcam_idx);
138EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
139#endif
140
141/*
142 * Base TLB flushing operations:

--- 210 unchanged lines hidden (view full) ---

353}
354EXPORT_SYMBOL(flush_tlb_range);
355
356void tlb_flush(struct mmu_gather *tlb)
357{
358 flush_tlb_mm(tlb->mm);
359}
360
113#ifdef CONFIG_PPC_E500
114/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
115DEFINE_PER_CPU(int, next_tlbcam_idx);
116EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
117#endif
118
119/*
120 * Base TLB flushing operations:

--- 210 unchanged lines hidden (view full) ---

331}
332EXPORT_SYMBOL(flush_tlb_range);
333
334void tlb_flush(struct mmu_gather *tlb)
335{
336 flush_tlb_mm(tlb->mm);
337}
338
361/*
362 * Below are functions specific to the 64-bit variant of Book3E though that
363 * may change in the future
364 */
365
366#ifdef CONFIG_PPC64
367
368/*
369 * Handling of virtual linear page tables or indirect TLB entries
370 * flushing when PTE pages are freed
371 */
372void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
373{
374 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
375
376 if (book3e_htw_mode != PPC_HTW_NONE) {
377 unsigned long start = address & PMD_MASK;
378 unsigned long end = address + PMD_SIZE;
379 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
380
381 /* This isn't the most optimal, ideally we would factor out the
382 * while preempt & CPU mask mucking around, or even the IPI but
383 * it will do for now
384 */
385 while (start < end) {
386 __flush_tlb_page(tlb->mm, start, tsize, 1);
387 start += size;
388 }
389 } else {
390 unsigned long rmask = 0xf000000000000000ul;
391 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
392 unsigned long vpte = address & ~rmask;
393
394 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
395 vpte |= rid;
396 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
397 }
398}
399
400static void __init setup_page_sizes(void)
401{
402 unsigned int tlb0cfg;
403 unsigned int eptcfg;
404 int psize;
405
406#ifdef CONFIG_PPC_E500
407 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
408 int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
409
410 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
411 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
412 unsigned int min_pg, max_pg;
413
414 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
415 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
416
417 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
418 struct mmu_psize_def *def;
419 unsigned int shift;
420
421 def = &mmu_psize_defs[psize];
422 shift = def->shift;
423
424 if (shift == 0 || shift & 1)
425 continue;
426
427 /* adjust to be in terms of 4^shift Kb */
428 shift = (shift - 10) >> 1;
429
430 if ((shift >= min_pg) && (shift <= max_pg))
431 def->flags |= MMU_PAGE_SIZE_DIRECT;
432 }
433
434 goto out;
435 }
436
437 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
438 u32 tlb1cfg, tlb1ps;
439
440 tlb0cfg = mfspr(SPRN_TLB0CFG);
441 tlb1cfg = mfspr(SPRN_TLB1CFG);
442 tlb1ps = mfspr(SPRN_TLB1PS);
443 eptcfg = mfspr(SPRN_EPTCFG);
444
445 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
446 book3e_htw_mode = PPC_HTW_E6500;
447
448 /*
449 * We expect 4K subpage size and unrestricted indirect size.
450 * The lack of a restriction on indirect size is a Freescale
451 * extension, indicated by PSn = 0 but SPSn != 0.
452 */
453 if (eptcfg != 2)
454 book3e_htw_mode = PPC_HTW_NONE;
455
456 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
457 struct mmu_psize_def *def = &mmu_psize_defs[psize];
458
459 if (!def->shift)
460 continue;
461
462 if (tlb1ps & (1U << (def->shift - 10))) {
463 def->flags |= MMU_PAGE_SIZE_DIRECT;
464
465 if (book3e_htw_mode && psize == MMU_PAGE_2M)
466 def->flags |= MMU_PAGE_SIZE_INDIRECT;
467 }
468 }
469
470 goto out;
471 }
472#endif
473out:
474 /* Cleanup array and print summary */
475 pr_info("MMU: Supported page sizes\n");
476 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
477 struct mmu_psize_def *def = &mmu_psize_defs[psize];
478 const char *__page_type_names[] = {
479 "unsupported",
480 "direct",
481 "indirect",
482 "direct & indirect"
483 };
484 if (def->flags == 0) {
485 def->shift = 0;
486 continue;
487 }
488 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
489 __page_type_names[def->flags & 0x3]);
490 }
491}
492
493static void __init setup_mmu_htw(void)
494{
495 /*
496 * If we want to use HW tablewalk, enable it by patching the TLB miss
497 * handlers to branch to the one dedicated to it.
498 */
499
500 switch (book3e_htw_mode) {
501#ifdef CONFIG_PPC_E500
502 case PPC_HTW_E6500:
503 extlb_level_exc = EX_TLB_SIZE;
504 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
505 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
506 break;
507#endif
508 }
509 pr_info("MMU: Book3E HW tablewalk %s\n",
510 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
511}
512
513/*
514 * Early initialization of the MMU TLB code
515 */
516static void early_init_this_mmu(void)
517{
518 unsigned int mas4;
519
520 /* Set MAS4 based on page table setting */
521
522 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
523 switch (book3e_htw_mode) {
524 case PPC_HTW_E6500:
525 mas4 |= MAS4_INDD;
526 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
527 mas4 |= MAS4_TLBSELD(1);
528 mmu_pte_psize = MMU_PAGE_2M;
529 break;
530
531 case PPC_HTW_NONE:
532 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
533 mmu_pte_psize = mmu_virtual_psize;
534 break;
535 }
536 mtspr(SPRN_MAS4, mas4);
537
538#ifdef CONFIG_PPC_E500
539 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
540 unsigned int num_cams;
541 bool map = true;
542
543 /* use a quarter of the TLBCAM for bolted linear map */
544 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
545
546 /*
547 * Only do the mapping once per core, or else the
548 * transient mapping would cause problems.
549 */
550#ifdef CONFIG_SMP
551 if (hweight32(get_tensr()) > 1)
552 map = false;
553#endif
554
555 if (map)
556 linear_map_top = map_mem_in_cams(linear_map_top,
557 num_cams, false, true);
558 }
559#endif
560
561 /* A sync won't hurt us after mucking around with
562 * the MMU configuration
563 */
564 mb();
565}
566
567static void __init early_init_mmu_global(void)
568{
569 /* XXX This should be decided at runtime based on supported
570 * page sizes in the TLB, but for now let's assume 16M is
571 * always there and a good fit (which it probably is)
572 *
573 * Freescale booke only supports 4K pages in TLB0, so use that.
574 */
575 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
576 mmu_vmemmap_psize = MMU_PAGE_4K;
577 else
578 mmu_vmemmap_psize = MMU_PAGE_16M;
579
580 /* XXX This code only checks for TLB 0 capabilities and doesn't
581 * check what page size combos are supported by the HW. It
582 * also doesn't handle the case where a separate array holds
583 * the IND entries from the array loaded by the PT.
584 */
585 /* Look for supported page sizes */
586 setup_page_sizes();
587
588 /* Look for HW tablewalk support */
589 setup_mmu_htw();
590
591#ifdef CONFIG_PPC_E500
592 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
593 if (book3e_htw_mode == PPC_HTW_NONE) {
594 extlb_level_exc = EX_TLB_SIZE;
595 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
596 patch_exception(0x1e0,
597 exc_instruction_tlb_miss_bolted_book3e);
598 }
599 }
600#endif
601
602 /* Set the global containing the top of the linear mapping
603 * for use by the TLB miss code
604 */
605 linear_map_top = memblock_end_of_DRAM();
606
607 ioremap_bot = IOREMAP_BASE;
608}
609
610static void __init early_mmu_set_memory_limit(void)
611{
612#ifdef CONFIG_PPC_E500
613 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
614 /*
615 * Limit memory so we dont have linear faults.
616 * Unlike memblock_set_current_limit, which limits
617 * memory available during early boot, this permanently
618 * reduces the memory available to Linux. We need to
619 * do this because highmem is not supported on 64-bit.
620 */
621 memblock_enforce_memory_limit(linear_map_top);
622 }
623#endif
624
625 memblock_set_current_limit(linear_map_top);
626}
627
628/* boot cpu only */
339#ifndef CONFIG_PPC64
629void __init early_init_mmu(void)
630{
340void __init early_init_mmu(void)
341{
631 early_init_mmu_global();
632 early_init_this_mmu();
633 early_mmu_set_memory_limit();
634}
635
636void early_init_mmu_secondary(void)
637{
638 early_init_this_mmu();
639}
640
641void setup_initial_memory_limit(phys_addr_t first_memblock_base,
642 phys_addr_t first_memblock_size)
643{
644 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
645 * the bolted TLB entry. We know for now that only 1G
646 * entries are supported though that may eventually
647 * change.
648 *
649 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
650 * unusual memory sizes it's possible for some RAM to not be mapped
651 * (such RAM is not used at all by Linux, since we don't support
652 * highmem on 64-bit). We limit ppc64_rma_size to what would be
653 * mappable if this memblock is the only one. Additional memblocks
654 * can only increase, not decrease, the amount that ends up getting
655 * mapped. We still limit max to 1G even if we'll eventually map
656 * more. This is due to what the early init code is set up to do.
657 *
658 * We crop it to the size of the first MEMBLOCK to
659 * avoid going over total available memory just in case...
660 */
661#ifdef CONFIG_PPC_E500
662 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
663 unsigned long linear_sz;
664 unsigned int num_cams;
665
666 /* use a quarter of the TLBCAM for bolted linear map */
667 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
668
669 linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
670 true, true);
671
672 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
673 } else
674#endif
675 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
676
677 /* Finally limit subsequent allocations */
678 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
679}
680#else /* ! CONFIG_PPC64 */
681void __init early_init_mmu(void)
682{
683 unsigned long root = of_get_flat_dt_root();
684
685 if (IS_ENABLED(CONFIG_PPC_47x) && IS_ENABLED(CONFIG_SMP) &&
686 of_get_flat_dt_prop(root, "cooperative-partition", NULL))
687 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
688}
689#endif /* CONFIG_PPC64 */
342 unsigned long root = of_get_flat_dt_root();
343
344 if (IS_ENABLED(CONFIG_PPC_47x) && IS_ENABLED(CONFIG_SMP) &&
345 of_get_flat_dt_prop(root, "cooperative-partition", NULL))
346 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
347}
348#endif /* CONFIG_PPC64 */