Lines Matching +full:disable +full:- +full:hibernation +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * kexec.c - kexec system call core code.
4 * Copyright (C) 2002-2004 Eric Biederman <[email protected]>
58 * When kexec transitions to the new kernel there is a one-to-one
60 * where you can disable the MMU this is trivial, and easy. For
77 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
84 * be self-contained.
92 * - allocating a page table with the control code buffer identity
101 #define KIMAGE_NO_DEST (-1UL)
102 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
111 unsigned long nr_segments = image->nr_segments; in sanity_check_segment_list()
131 mstart = image->segment[i].mem; in sanity_check_segment_list()
132 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
134 return -EADDRNOTAVAIL; in sanity_check_segment_list()
136 return -EADDRNOTAVAIL; in sanity_check_segment_list()
138 return -EADDRNOTAVAIL; in sanity_check_segment_list()
150 mstart = image->segment[i].mem; in sanity_check_segment_list()
151 mend = mstart + image->segment[i].memsz; in sanity_check_segment_list()
155 pstart = image->segment[j].mem; in sanity_check_segment_list()
156 pend = pstart + image->segment[j].memsz; in sanity_check_segment_list()
159 return -EINVAL; in sanity_check_segment_list()
169 if (image->segment[i].bufsz > image->segment[i].memsz) in sanity_check_segment_list()
170 return -EINVAL; in sanity_check_segment_list()
179 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) in sanity_check_segment_list()
180 return -EINVAL; in sanity_check_segment_list()
182 total_pages += PAGE_COUNT(image->segment[i].memsz); in sanity_check_segment_list()
186 return -EINVAL; in sanity_check_segment_list()
199 if (image->type == KEXEC_TYPE_CRASH) { in sanity_check_segment_list()
203 mstart = image->segment[i].mem; in sanity_check_segment_list()
204 mend = mstart + image->segment[i].memsz - 1; in sanity_check_segment_list()
208 return -EADDRNOTAVAIL; in sanity_check_segment_list()
225 image->head = 0; in do_kimage_alloc_init()
226 image->entry = &image->head; in do_kimage_alloc_init()
227 image->last_entry = &image->head; in do_kimage_alloc_init()
228 image->control_page = ~0; /* By default this does not apply */ in do_kimage_alloc_init()
229 image->type = KEXEC_TYPE_DEFAULT; in do_kimage_alloc_init()
232 INIT_LIST_HEAD(&image->control_pages); in do_kimage_alloc_init()
235 INIT_LIST_HEAD(&image->dest_pages); in do_kimage_alloc_init()
238 INIT_LIST_HEAD(&image->unusable_pages); in do_kimage_alloc_init()
241 image->hp_action = KEXEC_CRASH_HP_NONE; in do_kimage_alloc_init()
242 image->elfcorehdr_index = -1; in do_kimage_alloc_init()
243 image->elfcorehdr_updated = false; in do_kimage_alloc_init()
255 for (i = 0; i < image->nr_segments; i++) { in kimage_is_destination_range()
258 mstart = image->segment[i].mem; in kimage_is_destination_range()
259 mend = mstart + image->segment[i].memsz - 1; in kimage_is_destination_range()
277 pages->mapping = NULL; in kimage_alloc_pages()
313 list_del(&page->lru); in kimage_free_page_list()
328 * these are for architectures where we cannot disable in kimage_alloc_normal_control_pages()
353 eaddr = (epfn << PAGE_SHIFT) - 1; in kimage_alloc_normal_control_pages()
356 list_add(&pages->lru, &extra_pages); in kimage_alloc_normal_control_pages()
363 list_add(&pages->lru, &image->control_pages); in kimage_alloc_normal_control_pages()
369 * to give it an entry in image->segment[]. in kimage_alloc_normal_control_pages()
374 * Ideally I would convert multi-page allocations into single in kimage_alloc_normal_control_pages()
375 * page allocations, and add everything to image->dest_pages. in kimage_alloc_normal_control_pages()
400 * these are for architectures where we cannot disable in kimage_alloc_crash_control_pages()
414 hole_start = ALIGN(image->control_page, size); in kimage_alloc_crash_control_pages()
415 hole_end = hole_start + size - 1; in kimage_alloc_crash_control_pages()
424 for (i = 0; i < image->nr_segments; i++) { in kimage_alloc_crash_control_pages()
427 mstart = image->segment[i].mem; in kimage_alloc_crash_control_pages()
428 mend = mstart + image->segment[i].memsz - 1; in kimage_alloc_crash_control_pages()
432 hole_end = hole_start + size - 1; in kimage_alloc_crash_control_pages()
437 if (i == image->nr_segments) { in kimage_alloc_crash_control_pages()
439 image->control_page = hole_end + 1; in kimage_alloc_crash_control_pages()
458 switch (image->type) { in kimage_alloc_control_pages()
474 if (*image->entry != 0) in kimage_add_entry()
475 image->entry++; in kimage_add_entry()
477 if (image->entry == image->last_entry) { in kimage_add_entry()
483 return -ENOMEM; in kimage_add_entry()
486 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; in kimage_add_entry()
487 image->entry = ind_page; in kimage_add_entry()
488 image->last_entry = ind_page + in kimage_add_entry()
489 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); in kimage_add_entry()
491 *image->entry = entry; in kimage_add_entry()
492 image->entry++; in kimage_add_entry()
493 *image->entry = 0; in kimage_add_entry()
518 kimage_free_page_list(&image->dest_pages); in kimage_free_extra_pages()
521 kimage_free_page_list(&image->unusable_pages); in kimage_free_extra_pages()
527 if (*image->entry != 0) in kimage_terminate()
528 image->entry++; in kimage_terminate()
530 *image->entry = IND_DONE; in kimage_terminate()
534 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
555 if (image->vmcoreinfo_data_copy) { in kimage_free()
557 vunmap(image->vmcoreinfo_data_copy); in kimage_free()
582 kimage_free_page_list(&image->control_pages); in kimage_free()
588 if (image->file_mode) in kimage_free()
642 list_for_each_entry(page, &image->dest_pages, lru) { in kimage_alloc_page()
645 list_del(&page->lru); in kimage_alloc_page()
660 list_add(&page->lru, &image->unusable_pages); in kimage_alloc_page()
671 addr + PAGE_SIZE - 1)) in kimage_alloc_page()
703 list_add(&page->lru, &image->dest_pages); in kimage_alloc_page()
718 if (image->file_mode) in kimage_load_normal_segment()
719 kbuf = segment->kbuf; in kimage_load_normal_segment()
721 buf = segment->buf; in kimage_load_normal_segment()
722 ubytes = segment->bufsz; in kimage_load_normal_segment()
723 mbytes = segment->memsz; in kimage_load_normal_segment()
724 maddr = segment->mem; in kimage_load_normal_segment()
737 result = -ENOMEM; in kimage_load_normal_segment()
750 PAGE_SIZE - (maddr & ~PAGE_MASK)); in kimage_load_normal_segment()
755 if (image->file_mode) in kimage_load_normal_segment()
759 ubytes -= uchunk; in kimage_load_normal_segment()
760 if (image->file_mode) in kimage_load_normal_segment()
767 result = -EFAULT; in kimage_load_normal_segment()
771 mbytes -= mchunk; in kimage_load_normal_segment()
794 if (image->file_mode) in kimage_load_crash_segment()
795 kbuf = segment->kbuf; in kimage_load_crash_segment()
797 buf = segment->buf; in kimage_load_crash_segment()
798 ubytes = segment->bufsz; in kimage_load_crash_segment()
799 mbytes = segment->memsz; in kimage_load_crash_segment()
800 maddr = segment->mem; in kimage_load_crash_segment()
808 result = -ENOMEM; in kimage_load_crash_segment()
815 PAGE_SIZE - (maddr & ~PAGE_MASK)); in kimage_load_crash_segment()
819 memset(ptr + uchunk, 0, mchunk - uchunk); in kimage_load_crash_segment()
824 if (image->file_mode) in kimage_load_crash_segment()
828 ubytes -= uchunk; in kimage_load_crash_segment()
829 if (image->file_mode) in kimage_load_crash_segment()
838 result = -EFAULT; in kimage_load_crash_segment()
842 mbytes -= mchunk; in kimage_load_crash_segment()
854 int result = -ENOMEM; in kimage_load_segment()
856 switch (image->type) { in kimage_load_segment()
878 .limit = -1,
883 .limit = -1,
894 struct kexec_load_limit *limit = table->data; in kexec_limit_handler()
899 .mode = table->mode, in kexec_limit_handler()
909 return -EINVAL; in kexec_limit_handler()
911 mutex_lock(&limit->mutex); in kexec_limit_handler()
912 if (limit->limit != -1 && val >= limit->limit) in kexec_limit_handler()
913 ret = -EINVAL; in kexec_limit_handler()
915 limit->limit = val; in kexec_limit_handler()
916 mutex_unlock(&limit->mutex); in kexec_limit_handler()
921 mutex_lock(&limit->mutex); in kexec_limit_handler()
922 val = limit->limit; in kexec_limit_handler()
923 mutex_unlock(&limit->mutex); in kexec_limit_handler()
933 .mode = 0644,
942 .mode = 0644,
948 .mode = 0644,
975 mutex_lock(&limit->mutex); in kexec_load_permitted()
976 if (!limit->limit) { in kexec_load_permitted()
977 mutex_unlock(&limit->mutex); in kexec_load_permitted()
980 if (limit->limit != -1) in kexec_load_permitted()
981 limit->limit--; in kexec_load_permitted()
982 mutex_unlock(&limit->mutex); in kexec_load_permitted()
996 return -EBUSY; in kernel_kexec()
998 error = -EINVAL; in kernel_kexec()
1003 if (kexec_image->preserve_context) { in kernel_kexec()
1005 * This flow is analogous to hibernation flows that occur in kernel_kexec()
1013 error = -EBUSY; in kernel_kexec()
1022 * to complete the transition, like in the hibernation flows in kernel_kexec()
1047 * CPU hotplug again; so re-enable it here. in kernel_kexec()
1058 if (kexec_image->preserve_context) { in kernel_kexec()
1060 * This flow is analogous to hibernation flows that occur after in kernel_kexec()