gmap.c (8ecb1a59d6c6674bc98e4eee0c2482490748e21a) gmap.c (b2d73b2a0ad1c758cb0c1acb01a911744b845942)
1/*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/kernel.h>

--- 539 unchanged lines hidden (view full) ---

548 up_read(&gmap->mm->mmap_sem);
549}
550EXPORT_SYMBOL_GPL(gmap_discard);
551
552static LIST_HEAD(gmap_notifier_list);
553static DEFINE_SPINLOCK(gmap_notifier_lock);
554
555/**
1/*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/kernel.h>

--- 539 unchanged lines hidden (view full) ---

548 up_read(&gmap->mm->mmap_sem);
549}
550EXPORT_SYMBOL_GPL(gmap_discard);
551
552static LIST_HEAD(gmap_notifier_list);
553static DEFINE_SPINLOCK(gmap_notifier_lock);
554
555/**
556 * gmap_register_ipte_notifier - register a pte invalidation callback
556 * gmap_register_pte_notifier - register a pte invalidation callback
557 * @nb: pointer to the gmap notifier block
558 */
557 * @nb: pointer to the gmap notifier block
558 */
559void gmap_register_ipte_notifier(struct gmap_notifier *nb)
559void gmap_register_pte_notifier(struct gmap_notifier *nb)
560{
561 spin_lock(&gmap_notifier_lock);
562 list_add_rcu(&nb->list, &gmap_notifier_list);
563 spin_unlock(&gmap_notifier_lock);
564}
560{
561 spin_lock(&gmap_notifier_lock);
562 list_add_rcu(&nb->list, &gmap_notifier_list);
563 spin_unlock(&gmap_notifier_lock);
564}
565EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
565EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
566
567/**
566
567/**
568 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
568 * gmap_unregister_pte_notifier - remove a pte invalidation callback
569 * @nb: pointer to the gmap notifier block
570 */
569 * @nb: pointer to the gmap notifier block
570 */
571void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
571void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
572{
573 spin_lock(&gmap_notifier_lock);
574 list_del_rcu(&nb->list);
575 spin_unlock(&gmap_notifier_lock);
576 synchronize_rcu();
577}
572{
573 spin_lock(&gmap_notifier_lock);
574 list_del_rcu(&nb->list);
575 spin_unlock(&gmap_notifier_lock);
576 synchronize_rcu();
577}
578EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
578EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
579
580/**
581 * gmap_call_notifier - call all registered invalidation callbacks
582 * @gmap: pointer to guest mapping meta data structure
583 * @start: start virtual address in the guest address space
584 * @end: end virtual address in the guest address space
585 */
586static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
587 unsigned long end)
588{
589 struct gmap_notifier *nb;
590
591 list_for_each_entry(nb, &gmap_notifier_list, list)
592 nb->notifier_call(gmap, start, end);
593}
594
595/**
579
580/**
581 * gmap_call_notifier - call all registered invalidation callbacks
582 * @gmap: pointer to guest mapping meta data structure
583 * @start: start virtual address in the guest address space
584 * @end: end virtual address in the guest address space
585 */
586static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
587 unsigned long end)
588{
589 struct gmap_notifier *nb;
590
591 list_for_each_entry(nb, &gmap_notifier_list, list)
592 nb->notifier_call(gmap, start, end);
593}
594
595/**
596 * gmap_ipte_notify - mark a range of ptes for invalidation notification
596 * gmap_table_walk - walk the gmap page tables
597 * @gmap: pointer to guest mapping meta data structure
598 * @gaddr: virtual address in the guest address space
597 * @gmap: pointer to guest mapping meta data structure
598 * @gaddr: virtual address in the guest address space
599 *
600 * Returns a table pointer for the given guest address.
601 */
602static inline unsigned long *gmap_table_walk(struct gmap *gmap,
603 unsigned long gaddr)
604{
605 unsigned long *table;
606
607 table = gmap->table;
608 switch (gmap->asce & _ASCE_TYPE_MASK) {
609 case _ASCE_TYPE_REGION1:
610 table += (gaddr >> 53) & 0x7ff;
611 if (*table & _REGION_ENTRY_INVALID)
612 return NULL;
613 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
614 /* Fallthrough */
615 case _ASCE_TYPE_REGION2:
616 table += (gaddr >> 42) & 0x7ff;
617 if (*table & _REGION_ENTRY_INVALID)
618 return NULL;
619 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
620 /* Fallthrough */
621 case _ASCE_TYPE_REGION3:
622 table += (gaddr >> 31) & 0x7ff;
623 if (*table & _REGION_ENTRY_INVALID)
624 return NULL;
625 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
626 /* Fallthrough */
627 case _ASCE_TYPE_SEGMENT:
628 table += (gaddr >> 20) & 0x7ff;
629 }
630 return table;
631}
632
633/**
634 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
635 * and return the pte pointer
636 * @gmap: pointer to guest mapping meta data structure
637 * @gaddr: virtual address in the guest address space
638 * @ptl: pointer to the spinlock pointer
639 *
640 * Returns a pointer to the locked pte for a guest address, or NULL
641 */
642static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
643 spinlock_t **ptl)
644{
645 unsigned long *table;
646
647 /* Walk the gmap page table, lock and get pte pointer */
648 table = gmap_table_walk(gmap, gaddr);
649 if (!table || *table & _SEGMENT_ENTRY_INVALID)
650 return NULL;
651 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
652}
653
654/**
655 * gmap_pte_op_fixup - force a page in and connect the gmap page table
656 * @gmap: pointer to guest mapping meta data structure
657 * @gaddr: virtual address in the guest address space
658 * @vmaddr: address in the host process address space
659 *
660 * Returns 0 if the caller can retry __gmap_translate (might fail again),
661 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
662 * up or connecting the gmap page table.
663 */
664static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
665 unsigned long vmaddr)
666{
667 struct mm_struct *mm = gmap->mm;
668 bool unlocked = false;
669
670 if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
671 return -EFAULT;
672 if (unlocked)
673 /* lost mmap_sem, caller has to retry __gmap_translate */
674 return 0;
675 /* Connect the page tables */
676 return __gmap_link(gmap, gaddr, vmaddr);
677}
678
679/**
680 * gmap_pte_op_end - release the page table lock
681 * @ptl: pointer to the spinlock pointer
682 */
683static void gmap_pte_op_end(spinlock_t *ptl)
684{
685 spin_unlock(ptl);
686}
687
688/**
689 * gmap_mprotect_notify - change access rights for a range of ptes and
690 * call the notifier if any pte changes again
691 * @gmap: pointer to guest mapping meta data structure
692 * @gaddr: virtual address in the guest address space
599 * @len: size of area
693 * @len: size of area
694 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
600 *
695 *
601 * Returns 0 if for each page in the given range a gmap mapping exists and
602 * the invalidation notification could be set. If the gmap mapping is missing
603 * for one or more pages -EFAULT is returned. If no memory could be allocated
604 * -ENOMEM is returned. This function establishes missing page table entries.
696 * Returns 0 if for each page in the given range a gmap mapping exists,
697 * the new access rights could be set and the notifier could be armed.
698 * If the gmap mapping is missing for one or more pages -EFAULT is
699 * returned. If no memory could be allocated -ENOMEM is returned.
700 * This function establishes missing page table entries.
605 */
701 */
606int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
702int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
703 unsigned long len, int prot)
607{
704{
608 unsigned long addr;
705 unsigned long vmaddr;
609 spinlock_t *ptl;
610 pte_t *ptep;
706 spinlock_t *ptl;
707 pte_t *ptep;
611 bool unlocked;
612 int rc = 0;
613
614 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
615 return -EINVAL;
708 int rc = 0;
709
710 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
711 return -EINVAL;
712 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
713 return -EINVAL;
616 down_read(&gmap->mm->mmap_sem);
617 while (len) {
714 down_read(&gmap->mm->mmap_sem);
715 while (len) {
618 unlocked = false;
619 /* Convert gmap address and connect the page tables */
620 addr = __gmap_translate(gmap, gaddr);
621 if (IS_ERR_VALUE(addr)) {
622 rc = addr;
623 break;
716 rc = -EAGAIN;
717 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
718 if (ptep) {
719 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot);
720 gmap_pte_op_end(ptl);
624 }
721 }
625 /* Get the page mapped */
626 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
627 &unlocked)) {
628 rc = -EFAULT;
629 break;
630 }
631 /* While trying to map mmap_sem got unlocked. Let us retry */
632 if (unlocked)
722 if (rc) {
723 vmaddr = __gmap_translate(gmap, gaddr);
724 if (IS_ERR_VALUE(vmaddr)) {
725 rc = vmaddr;
726 break;
727 }
728 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
729 if (rc)
730 break;
633 continue;
731 continue;
634 rc = __gmap_link(gmap, gaddr, addr);
635 if (rc)
636 break;
637 /* Walk the process page table, lock and get pte pointer */
638 ptep = get_locked_pte(gmap->mm, addr, &ptl);
639 VM_BUG_ON(!ptep);
640 /* Set notification bit in the pgste of the pte */
641 if ((pte_val(*ptep) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
642 ptep_set_notify(gmap->mm, addr, ptep);
643 gaddr += PAGE_SIZE;
644 len -= PAGE_SIZE;
645 }
732 }
646 pte_unmap_unlock(ptep, ptl);
733 gaddr += PAGE_SIZE;
734 len -= PAGE_SIZE;
647 }
648 up_read(&gmap->mm->mmap_sem);
649 return rc;
650}
735 }
736 up_read(&gmap->mm->mmap_sem);
737 return rc;
738}
651EXPORT_SYMBOL_GPL(gmap_ipte_notify);
739EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
652
653/**
654 * ptep_notify - call all invalidation callbacks for a specific pte.
655 * @mm: pointer to the process mm_struct
656 * @addr: virtual address in the process address space
657 * @pte: pointer to the page table entry
658 *
659 * This function is assumed to be called with the page table lock held

--- 135 unchanged lines hidden ---
740
741/**
742 * ptep_notify - call all invalidation callbacks for a specific pte.
743 * @mm: pointer to the process mm_struct
744 * @addr: virtual address in the process address space
745 * @pte: pointer to the page table entry
746 *
747 * This function is assumed to be called with the page table lock held

--- 135 unchanged lines hidden ---