1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/buildid.h> 4 #include <linux/cache.h> 5 #include <linux/elf.h> 6 #include <linux/kernel.h> 7 #include <linux/pagemap.h> 8 9 #define BUILD_ID 3 10 11 #define MAX_PHDR_CNT 256 12 13 struct freader { 14 void *buf; 15 u32 buf_sz; 16 int err; 17 union { 18 struct { 19 struct file *file; 20 struct folio *folio; 21 void *addr; 22 loff_t folio_off; 23 bool may_fault; 24 }; 25 struct { 26 const char *data; 27 u64 data_sz; 28 }; 29 }; 30 }; 31 32 static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz, 33 struct file *file, bool may_fault) 34 { 35 memset(r, 0, sizeof(*r)); 36 r->buf = buf; 37 r->buf_sz = buf_sz; 38 r->file = file; 39 r->may_fault = may_fault; 40 } 41 42 static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz) 43 { 44 memset(r, 0, sizeof(*r)); 45 r->data = data; 46 r->data_sz = data_sz; 47 } 48 49 static void freader_put_folio(struct freader *r) 50 { 51 if (!r->folio) 52 return; 53 kunmap_local(r->addr); 54 folio_put(r->folio); 55 r->folio = NULL; 56 } 57 58 static int freader_get_folio(struct freader *r, loff_t file_off) 59 { 60 /* check if we can just reuse current folio */ 61 if (r->folio && file_off >= r->folio_off && 62 file_off < r->folio_off + folio_size(r->folio)) 63 return 0; 64 65 freader_put_folio(r); 66 67 r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); 68 69 /* if sleeping is allowed, wait for the page, if necessary */ 70 if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) { 71 filemap_invalidate_lock_shared(r->file->f_mapping); 72 r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT, 73 NULL, r->file); 74 filemap_invalidate_unlock_shared(r->file->f_mapping); 75 } 76 77 if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) { 78 if (!IS_ERR(r->folio)) 79 folio_put(r->folio); 80 r->folio = NULL; 81 return -EFAULT; 82 } 83 84 r->folio_off = folio_pos(r->folio); 85 r->addr = kmap_local_folio(r->folio, 0); 86 87 return 0; 88 } 89 90 static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz) 91 { 92 size_t folio_sz; 93 94 /* provided internal temporary buffer should be sized correctly */ 95 if (WARN_ON(r->buf && sz > r->buf_sz)) { 96 r->err = -E2BIG; 97 return NULL; 98 } 99 100 if (unlikely(file_off + sz < file_off)) { 101 r->err = -EOVERFLOW; 102 return NULL; 103 } 104 105 /* working with memory buffer is much more straightforward */ 106 if (!r->buf) { 107 if (file_off + sz > r->data_sz) { 108 r->err = -ERANGE; 109 return NULL; 110 } 111 return r->data + file_off; 112 } 113 114 /* fetch or reuse folio for given file offset */ 115 r->err = freader_get_folio(r, file_off); 116 if (r->err) 117 return NULL; 118 119 /* if requested data is crossing folio boundaries, we have to copy 120 * everything into our local buffer to keep a simple linear memory 121 * access interface 122 */ 123 folio_sz = folio_size(r->folio); 124 if (file_off + sz > r->folio_off + folio_sz) { 125 int part_sz = r->folio_off + folio_sz - file_off; 126 127 /* copy the part that resides in the current folio */ 128 memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz); 129 130 /* fetch next folio */ 131 r->err = freader_get_folio(r, r->folio_off + folio_sz); 132 if (r->err) 133 return NULL; 134 135 /* copy the rest of requested data */ 136 memcpy(r->buf + part_sz, r->addr, sz - part_sz); 137 138 return r->buf; 139 } 140 141 /* if data fits in a single folio, just return direct pointer */ 142 return r->addr + (file_off - r->folio_off); 143 } 144 145 static void freader_cleanup(struct freader *r) 146 { 147 if (!r->buf) 148 return; /* non-file-backed mode */ 149 150 freader_put_folio(r); 151 } 152 153 /* 154 * Parse build id from the note segment. This logic can be shared between 155 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are 156 * identical. 157 */ 158 static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size, 159 loff_t note_off, Elf32_Word note_size) 160 { 161 const char note_name[] = "GNU"; 162 const size_t note_name_sz = sizeof(note_name); 163 u32 build_id_off, new_off, note_end, name_sz, desc_sz; 164 const Elf32_Nhdr *nhdr; 165 const char *data; 166 167 if (check_add_overflow(note_off, note_size, ¬e_end)) 168 return -EINVAL; 169 170 while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) { 171 nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz); 172 if (!nhdr) 173 return r->err; 174 175 name_sz = READ_ONCE(nhdr->n_namesz); 176 desc_sz = READ_ONCE(nhdr->n_descsz); 177 178 new_off = note_off + sizeof(Elf32_Nhdr); 179 if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) || 180 check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) || 181 new_off > note_end) 182 break; 183 184 if (nhdr->n_type == BUILD_ID && 185 name_sz == note_name_sz && 186 memcmp(nhdr + 1, note_name, note_name_sz) == 0 && 187 desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { 188 build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); 189 190 /* freader_fetch() will invalidate nhdr pointer */ 191 data = freader_fetch(r, build_id_off, desc_sz); 192 if (!data) 193 return r->err; 194 195 memcpy(build_id, data, desc_sz); 196 memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); 197 if (size) 198 *size = desc_sz; 199 return 0; 200 } 201 202 note_off = new_off; 203 } 204 205 return -EINVAL; 206 } 207 208 /* Parse build ID from 32-bit ELF */ 209 static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size) 210 { 211 const Elf32_Ehdr *ehdr; 212 const Elf32_Phdr *phdr; 213 __u32 phnum, phoff, i; 214 215 ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr)); 216 if (!ehdr) 217 return r->err; 218 219 /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ 220 phnum = READ_ONCE(ehdr->e_phnum); 221 phoff = READ_ONCE(ehdr->e_phoff); 222 223 /* set upper bound on amount of segments (phdrs) we iterate */ 224 if (phnum > MAX_PHDR_CNT) 225 phnum = MAX_PHDR_CNT; 226 227 /* check that phoff is not large enough to cause an overflow */ 228 if (phoff + phnum * sizeof(Elf32_Phdr) < phoff) 229 return -EINVAL; 230 231 for (i = 0; i < phnum; ++i) { 232 phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); 233 if (!phdr) 234 return r->err; 235 236 if (phdr->p_type == PT_NOTE && 237 !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), 238 READ_ONCE(phdr->p_filesz))) 239 return 0; 240 } 241 return -EINVAL; 242 } 243 244 /* Parse build ID from 64-bit ELF */ 245 static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size) 246 { 247 const Elf64_Ehdr *ehdr; 248 const Elf64_Phdr *phdr; 249 __u32 phnum, i; 250 __u64 phoff; 251 252 ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr)); 253 if (!ehdr) 254 return r->err; 255 256 /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ 257 phnum = READ_ONCE(ehdr->e_phnum); 258 phoff = READ_ONCE(ehdr->e_phoff); 259 260 /* set upper bound on amount of segments (phdrs) we iterate */ 261 if (phnum > MAX_PHDR_CNT) 262 phnum = MAX_PHDR_CNT; 263 264 /* check that phoff is not large enough to cause an overflow */ 265 if (phoff + phnum * sizeof(Elf64_Phdr) < phoff) 266 return -EINVAL; 267 268 for (i = 0; i < phnum; ++i) { 269 phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); 270 if (!phdr) 271 return r->err; 272 273 if (phdr->p_type == PT_NOTE && 274 !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), 275 READ_ONCE(phdr->p_filesz))) 276 return 0; 277 } 278 279 return -EINVAL; 280 } 281 282 /* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ 283 #define MAX_FREADER_BUF_SZ 64 284 285 static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, 286 __u32 *size, bool may_fault) 287 { 288 const Elf32_Ehdr *ehdr; 289 struct freader r; 290 char buf[MAX_FREADER_BUF_SZ]; 291 int ret; 292 293 /* only works for page backed storage */ 294 if (!vma->vm_file) 295 return -EINVAL; 296 297 freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); 298 299 /* fetch first 18 bytes of ELF header for checks */ 300 ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); 301 if (!ehdr) { 302 ret = r.err; 303 goto out; 304 } 305 306 ret = -EINVAL; 307 308 /* compare magic x7f "ELF" */ 309 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) 310 goto out; 311 312 /* only support executable file and shared object file */ 313 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) 314 goto out; 315 316 if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) 317 ret = get_build_id_32(&r, build_id, size); 318 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 319 ret = get_build_id_64(&r, build_id, size); 320 out: 321 freader_cleanup(&r); 322 return ret; 323 } 324 325 /* 326 * Parse build ID of ELF file mapped to vma 327 * @vma: vma object 328 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 329 * @size: returns actual build id size in case of success 330 * 331 * Assumes no page fault can be taken, so if relevant portions of ELF file are 332 * not already paged in, fetching of build ID fails. 333 * 334 * Return: 0 on success; negative error, otherwise 335 */ 336 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 337 { 338 return __build_id_parse(vma, build_id, size, false /* !may_fault */); 339 } 340 341 /* 342 * Parse build ID of ELF file mapped to VMA 343 * @vma: vma object 344 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 345 * @size: returns actual build id size in case of success 346 * 347 * Assumes faultable context and can cause page faults to bring in file data 348 * into page cache. 349 * 350 * Return: 0 on success; negative error, otherwise 351 */ 352 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 353 { 354 return __build_id_parse(vma, build_id, size, true /* may_fault */); 355 } 356 357 /** 358 * build_id_parse_buf - Get build ID from a buffer 359 * @buf: ELF note section(s) to parse 360 * @buf_size: Size of @buf in bytes 361 * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long 362 * 363 * Return: 0 on success, -EINVAL otherwise 364 */ 365 int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) 366 { 367 struct freader r; 368 int err; 369 370 freader_init_from_mem(&r, buf, buf_size); 371 372 err = parse_build_id(&r, build_id, NULL, 0, buf_size); 373 374 freader_cleanup(&r); 375 return err; 376 } 377 378 #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) 379 unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init; 380 381 /** 382 * init_vmlinux_build_id - Compute and stash the running kernel's build ID 383 */ 384 void __init init_vmlinux_build_id(void) 385 { 386 extern const void __start_notes; 387 extern const void __stop_notes; 388 unsigned int size = &__stop_notes - &__start_notes; 389 390 build_id_parse_buf(&__start_notes, vmlinux_build_id, size); 391 } 392 #endif 393