1dd3ef10eSGuo Ren // SPDX-License-Identifier: GPL-2.0
2dd3ef10eSGuo Ren // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3dd3ef10eSGuo Ren
4dd3ef10eSGuo Ren #include <linux/binfmts.h>
5dd3ef10eSGuo Ren #include <linux/elf.h>
687f3248cSGuo Ren #include <linux/err.h>
787f3248cSGuo Ren #include <linux/mm.h>
887f3248cSGuo Ren #include <linux/slab.h>
9dd3ef10eSGuo Ren
1087f3248cSGuo Ren #include <asm/page.h>
110d3b051aSGuo Ren #include <vdso/datapage.h>
12dd3ef10eSGuo Ren
1387f3248cSGuo Ren extern char vdso_start[], vdso_end[];
14dd3ef10eSGuo Ren
1587f3248cSGuo Ren static unsigned int vdso_pages;
1687f3248cSGuo Ren static struct page **vdso_pagelist;
1787f3248cSGuo Ren
1856145a0fSAnna-Maria Behnsen static union vdso_data_store vdso_data_store __page_aligned_data;
1956145a0fSAnna-Maria Behnsen struct vdso_data *vdso_data = vdso_data_store.data;
2087f3248cSGuo Ren
vdso_init(void)2187f3248cSGuo Ren static int __init vdso_init(void)
22dd3ef10eSGuo Ren {
2387f3248cSGuo Ren unsigned int i;
24dd3ef10eSGuo Ren
2587f3248cSGuo Ren vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
2687f3248cSGuo Ren vdso_pagelist =
2787f3248cSGuo Ren kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
2887f3248cSGuo Ren if (unlikely(vdso_pagelist == NULL)) {
2987f3248cSGuo Ren pr_err("vdso: pagelist allocation failed\n");
3087f3248cSGuo Ren return -ENOMEM;
3187f3248cSGuo Ren }
32dd3ef10eSGuo Ren
3387f3248cSGuo Ren for (i = 0; i < vdso_pages; i++) {
3487f3248cSGuo Ren struct page *pg;
35dd3ef10eSGuo Ren
3687f3248cSGuo Ren pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
3787f3248cSGuo Ren vdso_pagelist[i] = pg;
3887f3248cSGuo Ren }
3987f3248cSGuo Ren vdso_pagelist[i] = virt_to_page(vdso_data);
40dd3ef10eSGuo Ren
41dd3ef10eSGuo Ren return 0;
42dd3ef10eSGuo Ren }
4387f3248cSGuo Ren arch_initcall(vdso_init);
44dd3ef10eSGuo Ren
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)4587f3248cSGuo Ren int arch_setup_additional_pages(struct linux_binprm *bprm,
4687f3248cSGuo Ren int uses_interp)
47dd3ef10eSGuo Ren {
48*497258dfSLinus Torvalds struct vm_area_struct *vma;
49dd3ef10eSGuo Ren struct mm_struct *mm = current->mm;
5087f3248cSGuo Ren unsigned long vdso_base, vdso_len;
5187f3248cSGuo Ren int ret;
52*497258dfSLinus Torvalds static struct vm_special_mapping vdso_mapping = {
53*497258dfSLinus Torvalds .name = "[vdso]",
54*497258dfSLinus Torvalds };
55*497258dfSLinus Torvalds static struct vm_special_mapping vvar_mapping = {
56*497258dfSLinus Torvalds .name = "[vvar]",
57*497258dfSLinus Torvalds };
5887f3248cSGuo Ren
5987f3248cSGuo Ren vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
60dd3ef10eSGuo Ren
61d8ed45c5SMichel Lespinasse mmap_write_lock(mm);
6287f3248cSGuo Ren vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
6387f3248cSGuo Ren if (IS_ERR_VALUE(vdso_base)) {
6487f3248cSGuo Ren ret = vdso_base;
6587f3248cSGuo Ren goto end;
66dd3ef10eSGuo Ren }
67dd3ef10eSGuo Ren
6887f3248cSGuo Ren /*
6987f3248cSGuo Ren * Put vDSO base into mm struct. We need to do this before calling
7087f3248cSGuo Ren * install_special_mapping or the perf counter mmap tracking code
7187f3248cSGuo Ren * will fail to recognise it as a vDSO (since arch_vma_name fails).
7287f3248cSGuo Ren */
7387f3248cSGuo Ren mm->context.vdso = (void *)vdso_base;
74dd3ef10eSGuo Ren
75*497258dfSLinus Torvalds vdso_mapping.pages = vdso_pagelist;
76*497258dfSLinus Torvalds vma =
77*497258dfSLinus Torvalds _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
7887f3248cSGuo Ren (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
79*497258dfSLinus Torvalds &vdso_mapping);
80dd3ef10eSGuo Ren
81*497258dfSLinus Torvalds if (IS_ERR(vma)) {
82*497258dfSLinus Torvalds ret = PTR_ERR(vma);
8387f3248cSGuo Ren mm->context.vdso = NULL;
8487f3248cSGuo Ren goto end;
8587f3248cSGuo Ren }
8687f3248cSGuo Ren
8787f3248cSGuo Ren vdso_base += (vdso_pages << PAGE_SHIFT);
88*497258dfSLinus Torvalds vvar_mapping.pages = &vdso_pagelist[vdso_pages];
89*497258dfSLinus Torvalds vma = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
90*497258dfSLinus Torvalds (VM_READ | VM_MAYREAD), &vvar_mapping);
9187f3248cSGuo Ren
92*497258dfSLinus Torvalds if (IS_ERR(vma)) {
93*497258dfSLinus Torvalds ret = PTR_ERR(vma);
9487f3248cSGuo Ren mm->context.vdso = NULL;
95*497258dfSLinus Torvalds goto end;
96*497258dfSLinus Torvalds }
97*497258dfSLinus Torvalds ret = 0;
9887f3248cSGuo Ren end:
99d8ed45c5SMichel Lespinasse mmap_write_unlock(mm);
100dd3ef10eSGuo Ren return ret;
101dd3ef10eSGuo Ren }
102dd3ef10eSGuo Ren
arch_vma_name(struct vm_area_struct * vma)103dd3ef10eSGuo Ren const char *arch_vma_name(struct vm_area_struct *vma)
104dd3ef10eSGuo Ren {
10587f3248cSGuo Ren if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
106dd3ef10eSGuo Ren return "[vdso]";
10787f3248cSGuo Ren if (vma->vm_mm && (vma->vm_start ==
10887f3248cSGuo Ren (long)vma->vm_mm->context.vdso + PAGE_SIZE))
10987f3248cSGuo Ren return "[vdso_data]";
110dd3ef10eSGuo Ren return NULL;
111dd3ef10eSGuo Ren }
112