xref: /linux/arch/arm64/kernel/elfcore.c (revision 5a48b7433a5aee719ab242d2feadaf4c9e065989)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/coredump.h>
4 #include <linux/elfcore.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 
8 #include <asm/cpufeature.h>
9 #include <asm/mte.h>
10 
11 #ifndef VMA_ITERATOR
12 #define VMA_ITERATOR(name, mm, addr)	\
13 	struct mm_struct *name = mm
14 #define for_each_vma(vmi, vma)		\
15 	for (vma = vmi->mmap; vma; vma = vma->vm_next)
16 #endif
17 
18 #define for_each_mte_vma(vmi, vma)					\
19 	if (system_supports_mte())					\
20 		for_each_vma(vmi, vma)					\
21 			if (vma->vm_flags & VM_MTE)
22 
23 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
24 {
25 	if (vma->vm_flags & VM_DONTDUMP)
26 		return 0;
27 
28 	return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
29 }
30 
31 /* Derived from dump_user_range(); start/end must be page-aligned */
32 static int mte_dump_tag_range(struct coredump_params *cprm,
33 			      unsigned long start, unsigned long end)
34 {
35 	unsigned long addr;
36 
37 	for (addr = start; addr < end; addr += PAGE_SIZE) {
38 		char tags[MTE_PAGE_TAG_STORAGE];
39 		struct page *page = get_dump_page(addr);
40 
41 		/*
42 		 * get_dump_page() returns NULL when encountering an empty
43 		 * page table entry that would otherwise have been filled with
44 		 * the zero page. Skip the equivalent tag dump which would
45 		 * have been all zeros.
46 		 */
47 		if (!page) {
48 			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
49 			continue;
50 		}
51 
52 		/*
53 		 * Pages mapped in user space as !pte_access_permitted() (e.g.
54 		 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
55 		 */
56 		if (!test_bit(PG_mte_tagged, &page->flags)) {
57 			put_page(page);
58 			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
59 			continue;
60 		}
61 
62 		mte_save_page_tags(page_address(page), tags);
63 		put_page(page);
64 		if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE))
65 			return 0;
66 	}
67 
68 	return 1;
69 }
70 
71 Elf_Half elf_core_extra_phdrs(void)
72 {
73 	struct vm_area_struct *vma;
74 	int vma_count = 0;
75 	VMA_ITERATOR(vmi, current->mm, 0);
76 
77 	for_each_mte_vma(vmi, vma)
78 		vma_count++;
79 
80 	return vma_count;
81 }
82 
83 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
84 {
85 	struct vm_area_struct *vma;
86 	VMA_ITERATOR(vmi, current->mm, 0);
87 
88 	for_each_mte_vma(vmi, vma) {
89 		struct elf_phdr phdr;
90 
91 		phdr.p_type = PT_ARM_MEMTAG_MTE;
92 		phdr.p_offset = offset;
93 		phdr.p_vaddr = vma->vm_start;
94 		phdr.p_paddr = 0;
95 		phdr.p_filesz = mte_vma_tag_dump_size(vma);
96 		phdr.p_memsz = vma->vm_end - vma->vm_start;
97 		offset += phdr.p_filesz;
98 		phdr.p_flags = 0;
99 		phdr.p_align = 0;
100 
101 		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
102 			return 0;
103 	}
104 
105 	return 1;
106 }
107 
108 size_t elf_core_extra_data_size(void)
109 {
110 	struct vm_area_struct *vma;
111 	size_t data_size = 0;
112 	VMA_ITERATOR(vmi, current->mm, 0);
113 
114 	for_each_mte_vma(vmi, vma)
115 		data_size += mte_vma_tag_dump_size(vma);
116 
117 	return data_size;
118 }
119 
120 int elf_core_write_extra_data(struct coredump_params *cprm)
121 {
122 	struct vm_area_struct *vma;
123 	VMA_ITERATOR(vmi, current->mm, 0);
124 
125 	for_each_mte_vma(vmi, vma) {
126 		if (vma->vm_flags & VM_DONTDUMP)
127 			continue;
128 
129 		if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
130 			return 0;
131 	}
132 
133 	return 1;
134 }
135