Lines Matching full:image

105 static struct page *kimage_alloc_page(struct kimage *image,
109 int sanity_check_segment_list(struct kimage *image) in sanity_check_segment_list() argument
112 unsigned long nr_segments = image->nr_segments; in sanity_check_segment_list()
119 * the new image into invalid or reserved areas of RAM. This in sanity_check_segment_list()
132 mstart = image->segment[i].mem; in sanity_check_segment_list()
133 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
151 mstart = image->segment[i].mem; in sanity_check_segment_list()
152 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
156 pstart = image->segment[j].mem; in sanity_check_segment_list()
157 pend = pstart + image->segment[j].memsz; in sanity_check_segment_list()
170 if (image->segment[i].bufsz > image->segment[i].memsz) in sanity_check_segment_list()
180 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) in sanity_check_segment_list()
183 total_pages += PAGE_COUNT(image->segment[i].memsz); in sanity_check_segment_list()
193 * attempt to load the new image into invalid or reserved in sanity_check_segment_list()
200 if (image->type == KEXEC_TYPE_CRASH) { in sanity_check_segment_list()
204 mstart = image->segment[i].mem; in sanity_check_segment_list()
205 mend = mstart + image->segment[i].memsz - 1; in sanity_check_segment_list()
222 accept_memory(image->segment[i].mem, image->segment[i].memsz); in sanity_check_segment_list()
229 struct kimage *image; in do_kimage_alloc_init() local
232 image = kzalloc(sizeof(*image), GFP_KERNEL); in do_kimage_alloc_init()
233 if (!image) in do_kimage_alloc_init()
236 image->entry = &image->head; in do_kimage_alloc_init()
237 image->last_entry = &image->head; in do_kimage_alloc_init()
238 image->control_page = ~0; /* By default this does not apply */ in do_kimage_alloc_init()
239 image->type = KEXEC_TYPE_DEFAULT; in do_kimage_alloc_init()
242 INIT_LIST_HEAD(&image->control_pages); in do_kimage_alloc_init()
245 INIT_LIST_HEAD(&image->dest_pages); in do_kimage_alloc_init()
248 INIT_LIST_HEAD(&image->unusable_pages); in do_kimage_alloc_init()
251 image->hp_action = KEXEC_CRASH_HP_NONE; in do_kimage_alloc_init()
252 image->elfcorehdr_index = -1; in do_kimage_alloc_init()
253 image->elfcorehdr_updated = false; in do_kimage_alloc_init()
256 return image; in do_kimage_alloc_init()
259 int kimage_is_destination_range(struct kimage *image, in kimage_is_destination_range() argument
265 for (i = 0; i < image->nr_segments; i++) { in kimage_is_destination_range()
268 mstart = image->segment[i].mem; in kimage_is_destination_range()
269 mend = mstart + image->segment[i].memsz - 1; in kimage_is_destination_range()
328 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, in kimage_alloc_normal_control_pages() argument
342 * At worst this runs in O(N) of the image size. in kimage_alloc_normal_control_pages()
365 kimage_is_destination_range(image, addr, eaddr)) { in kimage_alloc_normal_control_pages()
373 list_add(&pages->lru, &image->control_pages); in kimage_alloc_normal_control_pages()
379 * to give it an entry in image->segment[]. in kimage_alloc_normal_control_pages()
385 * page allocations, and add everything to image->dest_pages. in kimage_alloc_normal_control_pages()
395 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, in kimage_alloc_crash_control_pages() argument
424 hole_start = ALIGN(image->control_page, size); in kimage_alloc_crash_control_pages()
434 for (i = 0; i < image->nr_segments; i++) { in kimage_alloc_crash_control_pages()
437 mstart = image->segment[i].mem; in kimage_alloc_crash_control_pages()
438 mend = mstart + image->segment[i].memsz - 1; in kimage_alloc_crash_control_pages()
447 if (i == image->nr_segments) { in kimage_alloc_crash_control_pages()
449 image->control_page = hole_end + 1; in kimage_alloc_crash_control_pages()
463 struct page *kimage_alloc_control_pages(struct kimage *image, in kimage_alloc_control_pages() argument
468 switch (image->type) { in kimage_alloc_control_pages()
470 pages = kimage_alloc_normal_control_pages(image, order); in kimage_alloc_control_pages()
474 pages = kimage_alloc_crash_control_pages(image, order); in kimage_alloc_control_pages()
482 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) in kimage_add_entry() argument
484 if (*image->entry != 0) in kimage_add_entry()
485 image->entry++; in kimage_add_entry()
487 if (image->entry == image->last_entry) { in kimage_add_entry()
491 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); in kimage_add_entry()
496 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; in kimage_add_entry()
497 image->entry = ind_page; in kimage_add_entry()
498 image->last_entry = ind_page + in kimage_add_entry()
501 *image->entry = entry; in kimage_add_entry()
502 image->entry++; in kimage_add_entry()
503 *image->entry = 0; in kimage_add_entry()
508 static int kimage_set_destination(struct kimage *image, in kimage_set_destination() argument
513 return kimage_add_entry(image, destination | IND_DESTINATION); in kimage_set_destination()
517 static int kimage_add_page(struct kimage *image, unsigned long page) in kimage_add_page() argument
521 return kimage_add_entry(image, page | IND_SOURCE); in kimage_add_page()
525 static void kimage_free_extra_pages(struct kimage *image) in kimage_free_extra_pages() argument
528 kimage_free_page_list(&image->dest_pages); in kimage_free_extra_pages()
531 kimage_free_page_list(&image->unusable_pages); in kimage_free_extra_pages()
535 void kimage_terminate(struct kimage *image) in kimage_terminate() argument
537 if (*image->entry != 0) in kimage_terminate()
538 image->entry++; in kimage_terminate()
540 *image->entry = IND_DONE; in kimage_terminate()
543 #define for_each_kimage_entry(image, ptr, entry) \ argument
544 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
556 static void kimage_free_cma(struct kimage *image) in kimage_free_cma() argument
560 for (i = 0; i < image->nr_segments; i++) { in kimage_free_cma()
561 struct page *cma = image->segment_cma[i]; in kimage_free_cma()
562 u32 nr_pages = image->segment[i].memsz >> PAGE_SHIFT; in kimage_free_cma()
569 image->segment_cma[i] = NULL; in kimage_free_cma()
574 void kimage_free(struct kimage *image) in kimage_free() argument
579 if (!image) in kimage_free()
583 if (image->vmcoreinfo_data_copy) { in kimage_free()
585 vunmap(image->vmcoreinfo_data_copy); in kimage_free()
589 kimage_free_extra_pages(image); in kimage_free()
590 for_each_kimage_entry(image, ptr, entry) { in kimage_free()
607 machine_kexec_cleanup(image); in kimage_free()
610 kimage_free_page_list(&image->control_pages); in kimage_free()
613 kimage_free_cma(image); in kimage_free()
619 if (image->file_mode) in kimage_free()
620 kimage_file_post_load_cleanup(image); in kimage_free()
622 kfree(image); in kimage_free()
625 static kimage_entry_t *kimage_dst_used(struct kimage *image, in kimage_dst_used() argument
631 for_each_kimage_entry(image, ptr, entry) { in kimage_dst_used()
644 static struct page *kimage_alloc_page(struct kimage *image, in kimage_alloc_page() argument
673 list_for_each_entry(page, &image->dest_pages, lru) { in kimage_alloc_page()
691 list_add(&page->lru, &image->unusable_pages); in kimage_alloc_page()
701 if (!kimage_is_destination_range(image, addr, in kimage_alloc_page()
710 old = kimage_dst_used(image, addr); in kimage_alloc_page()
734 list_add(&page->lru, &image->dest_pages); in kimage_alloc_page()
740 static int kimage_load_cma_segment(struct kimage *image, int idx) in kimage_load_cma_segment() argument
742 struct kexec_segment *segment = &image->segment[idx]; in kimage_load_cma_segment()
743 struct page *cma = image->segment_cma[idx]; in kimage_load_cma_segment()
751 if (image->file_mode) in kimage_load_cma_segment()
770 if (image->file_mode) in kimage_load_cma_segment()
775 if (image->file_mode) in kimage_load_cma_segment()
800 static int kimage_load_normal_segment(struct kimage *image, int idx) in kimage_load_normal_segment() argument
802 struct kexec_segment *segment = &image->segment[idx]; in kimage_load_normal_segment()
809 if (image->file_mode) in kimage_load_normal_segment()
817 if (image->segment_cma[idx]) in kimage_load_normal_segment()
818 return kimage_load_cma_segment(image, idx); in kimage_load_normal_segment()
820 result = kimage_set_destination(image, maddr); in kimage_load_normal_segment()
829 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); in kimage_load_normal_segment()
834 result = kimage_add_page(image, page_to_boot_pfn(page) in kimage_load_normal_segment()
849 if (image->file_mode) in kimage_load_normal_segment()
854 if (image->file_mode) in kimage_load_normal_segment()
874 static int kimage_load_crash_segment(struct kimage *image, int idx) in kimage_load_crash_segment() argument
880 struct kexec_segment *segment = &image->segment[idx]; in kimage_load_crash_segment()
888 if (image->file_mode) in kimage_load_crash_segment()
918 if (image->file_mode) in kimage_load_crash_segment()
923 if (image->file_mode) in kimage_load_crash_segment()
945 int kimage_load_segment(struct kimage *image, int idx) in kimage_load_segment() argument
949 switch (image->type) { in kimage_load_segment()
951 result = kimage_load_normal_segment(image, idx); in kimage_load_segment()
955 result = kimage_load_crash_segment(image, idx); in kimage_load_segment()
963 void *kimage_map_segment(struct kimage *image, in kimage_map_segment() argument
985 for_each_kimage_entry(image, ptr, entry) { in kimage_map_segment()
1153 * before creating an image and before jumping from the in kernel_kexec()
1154 * restore kernel to the image one, so it uses the same in kernel_kexec()
1208 * creating an image and after the image kernel has got control in kernel_kexec()