memory.c (0d71d10a4252a3938e6b70189bc776171c02e076) memory.c (28b2ee20c7cba812b6f2ccf6d722cf86d00a84dc)
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of

--- 2737 unchanged lines hidden (view full) ---

2746 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2747 return 1;
2748#endif
2749 return 0;
2750}
2751
2752#endif /* __HAVE_ARCH_GATE_AREA */
2753
1/*
2 * linux/mm/memory.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7/*
8 * demand-loading started 01.12.91 - seems it is high on the list of

--- 2737 unchanged lines hidden (view full) ---

2746 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2747 return 1;
2748#endif
2749 return 0;
2750}
2751
2752#endif /* __HAVE_ARCH_GATE_AREA */
2753
2754#ifdef CONFIG_HAVE_IOREMAP_PROT
2755static resource_size_t follow_phys(struct vm_area_struct *vma,
2756 unsigned long address, unsigned int flags,
2757 unsigned long *prot)
2758{
2759 pgd_t *pgd;
2760 pud_t *pud;
2761 pmd_t *pmd;
2762 pte_t *ptep, pte;
2763 spinlock_t *ptl;
2764 resource_size_t phys_addr = 0;
2765 struct mm_struct *mm = vma->vm_mm;
2766
2767 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
2768
2769 pgd = pgd_offset(mm, address);
2770 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
2771 goto no_page_table;
2772
2773 pud = pud_offset(pgd, address);
2774 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
2775 goto no_page_table;
2776
2777 pmd = pmd_offset(pud, address);
2778 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
2779 goto no_page_table;
2780
2781 /* We cannot handle huge page PFN maps. Luckily they don't exist. */
2782 if (pmd_huge(*pmd))
2783 goto no_page_table;
2784
2785 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2786 if (!ptep)
2787 goto out;
2788
2789 pte = *ptep;
2790 if (!pte_present(pte))
2791 goto unlock;
2792 if ((flags & FOLL_WRITE) && !pte_write(pte))
2793 goto unlock;
2794 phys_addr = pte_pfn(pte);
2795 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
2796
2797 *prot = pgprot_val(pte_pgprot(pte));
2798
2799unlock:
2800 pte_unmap_unlock(ptep, ptl);
2801out:
2802 return phys_addr;
2803no_page_table:
2804 return 0;
2805}
2806
2807int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2808 void *buf, int len, int write)
2809{
2810 resource_size_t phys_addr;
2811 unsigned long prot = 0;
2812 void *maddr;
2813 int offset = addr & (PAGE_SIZE-1);
2814
2815 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
2816 return -EINVAL;
2817
2818 phys_addr = follow_phys(vma, addr, write, &prot);
2819
2820 if (!phys_addr)
2821 return -EINVAL;
2822
2823 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
2824 if (write)
2825 memcpy_toio(maddr + offset, buf, len);
2826 else
2827 memcpy_fromio(buf, maddr + offset, len);
2828 iounmap(maddr);
2829
2830 return len;
2831}
2832#endif
2833
2754/*
2755 * Access another process' address space.
2756 * Source/target buffer must be kernel space,
2757 * Do not walk the page table directly, use get_user_pages
2758 */
2759int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2760{
2761 struct mm_struct *mm;
2762 struct vm_area_struct *vma;
2834/*
2835 * Access another process' address space.
2836 * Source/target buffer must be kernel space,
2837 * Do not walk the page table directly, use get_user_pages
2838 */
2839int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2840{
2841 struct mm_struct *mm;
2842 struct vm_area_struct *vma;
2763 struct page *page;
2764 void *old_buf = buf;
2765
2766 mm = get_task_mm(tsk);
2767 if (!mm)
2768 return 0;
2769
2770 down_read(&mm->mmap_sem);
2771 /* ignore errors, just check how much was successfully transferred */
2772 while (len) {
2773 int bytes, ret, offset;
2774 void *maddr;
2843 void *old_buf = buf;
2844
2845 mm = get_task_mm(tsk);
2846 if (!mm)
2847 return 0;
2848
2849 down_read(&mm->mmap_sem);
2850 /* ignore errors, just check how much was successfully transferred */
2851 while (len) {
2852 int bytes, ret, offset;
2853 void *maddr;
2854 struct page *page = NULL;
2775
2776 ret = get_user_pages(tsk, mm, addr, 1,
2777 write, 1, &page, &vma);
2855
2856 ret = get_user_pages(tsk, mm, addr, 1,
2857 write, 1, &page, &vma);
2778 if (ret <= 0)
2779 break;
2780
2781 bytes = len;
2782 offset = addr & (PAGE_SIZE-1);
2783 if (bytes > PAGE_SIZE-offset)
2784 bytes = PAGE_SIZE-offset;
2785
2786 maddr = kmap(page);
2787 if (write) {
2788 copy_to_user_page(vma, page, addr,
2789 maddr + offset, buf, bytes);
2790 set_page_dirty_lock(page);
2858 if (ret <= 0) {
2859 /*
2860 * Check if this is a VM_IO | VM_PFNMAP VMA, which
2861 * we can access using slightly different code.
2862 */
2863#ifdef CONFIG_HAVE_IOREMAP_PROT
2864 vma = find_vma(mm, addr);
2865 if (!vma)
2866 break;
2867 if (vma->vm_ops && vma->vm_ops->access)
2868 ret = vma->vm_ops->access(vma, addr, buf,
2869 len, write);
2870 if (ret <= 0)
2871#endif
2872 break;
2873 bytes = ret;
2791 } else {
2874 } else {
2792 copy_from_user_page(vma, page, addr,
2793 buf, maddr + offset, bytes);
2875 bytes = len;
2876 offset = addr & (PAGE_SIZE-1);
2877 if (bytes > PAGE_SIZE-offset)
2878 bytes = PAGE_SIZE-offset;
2879
2880 maddr = kmap(page);
2881 if (write) {
2882 copy_to_user_page(vma, page, addr,
2883 maddr + offset, buf, bytes);
2884 set_page_dirty_lock(page);
2885 } else {
2886 copy_from_user_page(vma, page, addr,
2887 buf, maddr + offset, bytes);
2888 }
2889 kunmap(page);
2890 page_cache_release(page);
2794 }
2891 }
2795 kunmap(page);
2796 page_cache_release(page);
2797 len -= bytes;
2798 buf += bytes;
2799 addr += bytes;
2800 }
2801 up_read(&mm->mmap_sem);
2802 mmput(mm);
2803
2804 return buf - old_buf;

--- 39 unchanged lines hidden ---
2892 len -= bytes;
2893 buf += bytes;
2894 addr += bytes;
2895 }
2896 up_read(&mm->mmap_sem);
2897 mmput(mm);
2898
2899 return buf - old_buf;

--- 39 unchanged lines hidden ---