2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 * This Edition is maintained by Matthew Veety (aliasxerog) <mveety@gmail.com>
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
10 #include <linux/capability.h>
12 #include <linux/file.h>
13 #include <linux/slab.h>
15 #include <linux/kexec.h>
16 #include <linux/mutex.h>
17 #include <linux/list.h>
18 #include <linux/highmem.h>
19 #include <linux/syscalls.h>
20 #include <linux/reboot.h>
21 #include <linux/ioport.h>
22 #include <linux/hardirq.h>
23 #include <linux/elf.h>
24 #include <linux/elfcore.h>
25 #include <linux/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/console.h>
34 #include <linux/vmalloc.h>
37 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/sections.h>
41 #include <asm/unistd.h>
43 MODULE_LICENSE("GPL");
46 void **sys_call_table
;
48 /* original and new reboot syscall */
49 asmlinkage
long (*original_reboot
)(int magic1
, int magic2
, unsigned int cmd
, void __user
*arg
);
50 extern asmlinkage
long reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
*arg
);
52 /* Per cpu memory for storing cpu states in case of system crash. */
53 note_buf_t
* crash_notes
;
55 /* vmcoreinfo stuff */
56 unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
];
57 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4];
58 size_t vmcoreinfo_size
;
59 size_t vmcoreinfo_max_size
= sizeof(vmcoreinfo_data
);
61 /* Location of the reserved area for the crash kernel */
62 struct resource crashk_res
= {
63 .name
= "Crash kernel",
66 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
69 int kexec_should_crash(struct task_struct
*p
)
71 if (in_interrupt() || !p
->pid
|| is_global_init(p
))
77 * When kexec transitions to the new kernel there is a one-to-one
78 * mapping between physical and virtual addresses. On processors
79 * where you can disable the MMU this is trivial, and easy. For
80 * others it is still a simple predictable page table to setup.
82 * In that environment kexec copies the new kernel to its final
83 * resting place. This means I can only support memory whose
84 * physical address can fit in an unsigned long. In particular
85 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
86 * If the assembly stub has more restrictive requirements
87 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
88 * defined more restrictively in <asm/kexec.h>.
90 * The code for the transition from the current kernel to the
91 * the new kernel is placed in the control_code_buffer, whose size
92 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
93 * page of memory is necessary, but some architectures require more.
94 * Because this memory must be identity mapped in the transition from
95 * virtual to physical addresses it must live in the range
96 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
99 * The assembly stub in the control code buffer is passed a linked list
100 * of descriptor pages detailing the source pages of the new kernel,
101 * and the destination addresses of those source pages. As this data
102 * structure is not used in the context of the current OS, it must
105 * The code has been made to work with highmem pages and will use a
106 * destination page in its final resting place (if it happens
107 * to allocate it). The end product of this is that most of the
108 * physical address space, and most of RAM can be used.
110 * Future directions include:
111 * - allocating a page table with the control code buffer identity
112 * mapped, to simplify machine_kexec and make kexec_on_panic more
117 * KIMAGE_NO_DEST is an impossible destination address..., for
118 * allocating pages whose destination address we do not care about.
120 #define KIMAGE_NO_DEST (-1UL)
122 static int kimage_is_destination_range(struct kimage
*image
,
123 unsigned long start
, unsigned long end
);
124 static struct page
*kimage_alloc_page(struct kimage
*image
,
128 static int do_kimage_alloc(struct kimage
**rimage
, unsigned long entry
,
129 unsigned long nr_segments
,
130 struct kexec_segment __user
*segments
)
132 size_t segment_bytes
;
133 struct kimage
*image
;
137 /* Allocate a controlling structure */
139 image
= kzalloc(sizeof(*image
), GFP_KERNEL
);
144 image
->entry
= &image
->head
;
145 image
->last_entry
= &image
->head
;
146 image
->control_page
= ~0; /* By default this does not apply */
147 image
->start
= entry
;
148 image
->type
= KEXEC_TYPE_DEFAULT
;
150 /* Initialize the list of control pages */
151 INIT_LIST_HEAD(&image
->control_pages
);
153 /* Initialize the list of destination pages */
154 INIT_LIST_HEAD(&image
->dest_pages
);
156 /* Initialize the list of unusable pages */
157 INIT_LIST_HEAD(&image
->unuseable_pages
);
159 /* Read in the segments */
160 image
->nr_segments
= nr_segments
;
161 segment_bytes
= nr_segments
* sizeof(*segments
);
162 result
= copy_from_user(image
->segment
, segments
, segment_bytes
);
169 * Verify we have good destination addresses. The caller is
170 * responsible for making certain we don't attempt to load
171 * the new image into invalid or reserved areas of RAM. This
172 * just verifies it is an address we can use.
174 * Since the kernel does everything in page size chunks ensure
175 * the destination addresses are page aligned. Too many
176 * special cases crop of when we don't do this. The most
177 * insidious is getting overlapping destination addresses
178 * simply because addresses are changed to page size
181 result
= -EADDRNOTAVAIL
;
182 for (i
= 0; i
< nr_segments
; i
++) {
183 unsigned long mstart
, mend
;
185 mstart
= image
->segment
[i
].mem
;
186 mend
= mstart
+ image
->segment
[i
].memsz
;
187 if ((mstart
& ~PAGE_MASK
) || (mend
& ~PAGE_MASK
))
189 if (mend
>= KEXEC_DESTINATION_MEMORY_LIMIT
)
193 /* Verify our destination addresses do not overlap.
194 * If we alloed overlapping destination addresses
195 * through very weird things can happen with no
196 * easy explanation as one segment stops on another.
199 for (i
= 0; i
< nr_segments
; i
++) {
200 unsigned long mstart
, mend
;
203 mstart
= image
->segment
[i
].mem
;
204 mend
= mstart
+ image
->segment
[i
].memsz
;
205 for (j
= 0; j
< i
; j
++) {
206 unsigned long pstart
, pend
;
207 pstart
= image
->segment
[j
].mem
;
208 pend
= pstart
+ image
->segment
[j
].memsz
;
209 /* Do the segments overlap ? */
210 if ((mend
> pstart
) && (mstart
< pend
))
215 /* Ensure our buffer sizes are strictly less than
216 * our memory sizes. This should always be the case,
217 * and it is easier to check up front than to be surprised
221 for (i
= 0; i
< nr_segments
; i
++) {
222 if (image
->segment
[i
].bufsz
> image
->segment
[i
].memsz
)
237 static int kimage_normal_alloc(struct kimage
**rimage
, unsigned long entry
,
238 unsigned long nr_segments
,
239 struct kexec_segment __user
*segments
)
242 struct kimage
*image
;
244 /* Allocate and initialize a controlling structure */
246 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
253 * Find a location for the control code buffer, and add it
254 * the vector of segments so that it's pages will also be
255 * counted as destination pages.
258 image
->control_code_page
= kimage_alloc_control_pages(image
,
259 get_order(KEXEC_CONTROL_PAGE_SIZE
));
260 if (!image
->control_code_page
) {
261 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
265 image
->swap_page
= kimage_alloc_control_pages(image
, 0);
266 if (!image
->swap_page
) {
267 printk(KERN_ERR
"Could not allocate swap buffer\n");
281 static int kimage_crash_alloc(struct kimage
**rimage
, unsigned long entry
,
282 unsigned long nr_segments
,
283 struct kexec_segment __user
*segments
)
286 struct kimage
*image
;
290 /* Verify we have a valid entry point */
291 if ((entry
< crashk_res
.start
) || (entry
> crashk_res
.end
)) {
292 result
= -EADDRNOTAVAIL
;
296 /* Allocate and initialize a controlling structure */
297 result
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
);
301 /* Enable the special crash kernel control page
304 image
->control_page
= crashk_res
.start
;
305 image
->type
= KEXEC_TYPE_CRASH
;
308 * Verify we have good destination addresses. Normally
309 * the caller is responsible for making certain we don't
310 * attempt to load the new image into invalid or reserved
311 * areas of RAM. But crash kernels are preloaded into a
312 * reserved area of ram. We must ensure the addresses
313 * are in the reserved area otherwise preloading the
314 * kernel could corrupt things.
316 result
= -EADDRNOTAVAIL
;
317 for (i
= 0; i
< nr_segments
; i
++) {
318 unsigned long mstart
, mend
;
320 mstart
= image
->segment
[i
].mem
;
321 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
322 /* Ensure we are within the crash kernel limits */
323 if ((mstart
< crashk_res
.start
) || (mend
> crashk_res
.end
))
328 * Find a location for the control code buffer, and add
329 * the vector of segments so that it's pages will also be
330 * counted as destination pages.
333 image
->control_code_page
= kimage_alloc_control_pages(image
,
334 get_order(KEXEC_CONTROL_PAGE_SIZE
));
335 if (!image
->control_code_page
) {
336 printk(KERN_ERR
"Could not allocate control_code_buffer\n");
350 static int kimage_is_destination_range(struct kimage
*image
,
356 for (i
= 0; i
< image
->nr_segments
; i
++) {
357 unsigned long mstart
, mend
;
359 mstart
= image
->segment
[i
].mem
;
360 mend
= mstart
+ image
->segment
[i
].memsz
;
361 if ((end
> mstart
) && (start
< mend
))
368 static struct page
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
)
372 pages
= alloc_pages(gfp_mask
, order
);
374 unsigned int count
, i
;
375 pages
->mapping
= NULL
;
376 set_page_private(pages
, order
);
378 for (i
= 0; i
< count
; i
++)
379 SetPageReserved(pages
+ i
);
385 static void kimage_free_pages(struct page
*page
)
387 unsigned int order
, count
, i
;
389 order
= page_private(page
);
391 for (i
= 0; i
< count
; i
++)
392 ClearPageReserved(page
+ i
);
393 __free_pages(page
, order
);
396 static void kimage_free_page_list(struct list_head
*list
)
398 struct list_head
*pos
, *next
;
400 list_for_each_safe(pos
, next
, list
) {
403 page
= list_entry(pos
, struct page
, lru
);
404 list_del(&page
->lru
);
405 kimage_free_pages(page
);
409 static struct page
*kimage_alloc_normal_control_pages(struct kimage
*image
,
412 /* Control pages are special, they are the intermediaries
413 * that are needed while we copy the rest of the pages
414 * to their final resting place. As such they must
415 * not conflict with either the destination addresses
416 * or memory the kernel is already using.
418 * The only case where we really need more than one of
419 * these are for architectures where we cannot disable
420 * the MMU and must instead generate an identity mapped
421 * page table for all of the memory.
423 * At worst this runs in O(N) of the image size.
425 struct list_head extra_pages
;
430 INIT_LIST_HEAD(&extra_pages
);
432 /* Loop while I can allocate a page and the page allocated
433 * is a destination page.
436 unsigned long pfn
, epfn
, addr
, eaddr
;
438 pages
= kimage_alloc_pages(GFP_KERNEL
, order
);
441 pfn
= page_to_pfn(pages
);
443 addr
= pfn
<< PAGE_SHIFT
;
444 eaddr
= epfn
<< PAGE_SHIFT
;
445 if ((epfn
>= (KEXEC_CONTROL_MEMORY_LIMIT
>> PAGE_SHIFT
)) ||
446 kimage_is_destination_range(image
, addr
, eaddr
)) {
447 list_add(&pages
->lru
, &extra_pages
);
453 /* Remember the allocated page... */
454 list_add(&pages
->lru
, &image
->control_pages
);
456 /* Because the page is already in it's destination
457 * location we will never allocate another page at
458 * that address. Therefore kimage_alloc_pages
459 * will not return it (again) and we don't need
460 * to give it an entry in image->segment[].
463 /* Deal with the destination pages I have inadvertently allocated.
465 * Ideally I would convert multi-page allocations into single
466 * page allocations, and add everything to image->dest_pages.
468 * For now it is simpler to just free the pages.
470 kimage_free_page_list(&extra_pages
);
475 static struct page
*kimage_alloc_crash_control_pages(struct kimage
*image
,
478 /* Control pages are special, they are the intermediaries
479 * that are needed while we copy the rest of the pages
480 * to their final resting place. As such they must
481 * not conflict with either the destination addresses
482 * or memory the kernel is already using.
484 * Control pages are also the only pags we must allocate
485 * when loading a crash kernel. All of the other pages
486 * are specified by the segments and we just memcpy
487 * into them directly.
489 * The only case where we really need more than one of
490 * these are for architectures where we cannot disable
491 * the MMU and must instead generate an identity mapped
492 * page table for all of the memory.
494 * Given the low demand this implements a very simple
495 * allocator that finds the first hole of the appropriate
496 * size in the reserved memory region, and allocates all
497 * of the memory up to and including the hole.
499 unsigned long hole_start
, hole_end
, size
;
503 size
= (1 << order
) << PAGE_SHIFT
;
504 hole_start
= (image
->control_page
+ (size
- 1)) & ~(size
- 1);
505 hole_end
= hole_start
+ size
- 1;
506 while (hole_end
<= crashk_res
.end
) {
509 if (hole_end
> KEXEC_CONTROL_MEMORY_LIMIT
)
511 if (hole_end
> crashk_res
.end
)
513 /* See if I overlap any of the segments */
514 for (i
= 0; i
< image
->nr_segments
; i
++) {
515 unsigned long mstart
, mend
;
517 mstart
= image
->segment
[i
].mem
;
518 mend
= mstart
+ image
->segment
[i
].memsz
- 1;
519 if ((hole_end
>= mstart
) && (hole_start
<= mend
)) {
520 /* Advance the hole to the end of the segment */
521 hole_start
= (mend
+ (size
- 1)) & ~(size
- 1);
522 hole_end
= hole_start
+ size
- 1;
526 /* If I don't overlap any segments I have found my hole! */
527 if (i
== image
->nr_segments
) {
528 pages
= pfn_to_page(hole_start
>> PAGE_SHIFT
);
533 image
->control_page
= hole_end
;
539 struct page
*kimage_alloc_control_pages(struct kimage
*image
,
542 struct page
*pages
= NULL
;
544 switch (image
->type
) {
545 case KEXEC_TYPE_DEFAULT
:
546 pages
= kimage_alloc_normal_control_pages(image
, order
);
548 case KEXEC_TYPE_CRASH
:
549 pages
= kimage_alloc_crash_control_pages(image
, order
);
556 static int kimage_add_entry(struct kimage
*image
, kimage_entry_t entry
)
558 if (*image
->entry
!= 0)
561 if (image
->entry
== image
->last_entry
) {
562 kimage_entry_t
*ind_page
;
565 page
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
);
569 ind_page
= page_address(page
);
570 *image
->entry
= virt_to_phys(ind_page
) | IND_INDIRECTION
;
571 image
->entry
= ind_page
;
572 image
->last_entry
= ind_page
+
573 ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1);
575 *image
->entry
= entry
;
582 static int kimage_set_destination(struct kimage
*image
,
583 unsigned long destination
)
587 destination
&= PAGE_MASK
;
588 result
= kimage_add_entry(image
, destination
| IND_DESTINATION
);
590 image
->destination
= destination
;
596 static int kimage_add_page(struct kimage
*image
, unsigned long page
)
601 result
= kimage_add_entry(image
, page
| IND_SOURCE
);
603 image
->destination
+= PAGE_SIZE
;
609 static void kimage_free_extra_pages(struct kimage
*image
)
611 /* Walk through and free any extra destination pages I may have */
612 kimage_free_page_list(&image
->dest_pages
);
614 /* Walk through and free any unusable pages I have cached */
615 kimage_free_page_list(&image
->unuseable_pages
);
618 static void kimage_terminate(struct kimage
*image
)
620 if (*image
->entry
!= 0)
623 *image
->entry
= IND_DONE
;
626 #define for_each_kimage_entry(image, ptr, entry) \
627 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
628 ptr = (entry & IND_INDIRECTION)? \
629 phys_to_virt((entry & PAGE_MASK)): ptr +1)
631 static void kimage_free_entry(kimage_entry_t entry
)
635 page
= pfn_to_page(entry
>> PAGE_SHIFT
);
636 kimage_free_pages(page
);
639 static void kimage_free(struct kimage
*image
)
641 kimage_entry_t
*ptr
, entry
;
642 kimage_entry_t ind
= 0;
647 kimage_free_extra_pages(image
);
648 for_each_kimage_entry(image
, ptr
, entry
) {
649 if (entry
& IND_INDIRECTION
) {
650 /* Free the previous indirection page */
651 if (ind
& IND_INDIRECTION
)
652 kimage_free_entry(ind
);
653 /* Save this indirection page until we are
658 else if (entry
& IND_SOURCE
)
659 kimage_free_entry(entry
);
661 /* Free the final indirection page */
662 if (ind
& IND_INDIRECTION
)
663 kimage_free_entry(ind
);
665 /* Handle any machine specific cleanup */
666 machine_kexec_cleanup(image
);
668 /* Free the kexec control pages... */
669 kimage_free_page_list(&image
->control_pages
);
673 static kimage_entry_t
*kimage_dst_used(struct kimage
*image
,
676 kimage_entry_t
*ptr
, entry
;
677 unsigned long destination
= 0;
679 for_each_kimage_entry(image
, ptr
, entry
) {
680 if (entry
& IND_DESTINATION
)
681 destination
= entry
& PAGE_MASK
;
682 else if (entry
& IND_SOURCE
) {
683 if (page
== destination
)
685 destination
+= PAGE_SIZE
;
692 static struct page
*kimage_alloc_page(struct kimage
*image
,
694 unsigned long destination
)
697 * Here we implement safeguards to ensure that a source page
698 * is not copied to its destination page before the data on
699 * the destination page is no longer useful.
701 * To do this we maintain the invariant that a source page is
702 * either its own destination page, or it is not a
703 * destination page at all.
705 * That is slightly stronger than required, but the proof
706 * that no problems will not occur is trivial, and the
707 * implementation is simply to verify.
709 * When allocating all pages normally this algorithm will run
710 * in O(N) time, but in the worst case it will run in O(N^2)
711 * time. If the runtime is a problem the data structures can
718 * Walk through the list of destination pages, and see if I
721 list_for_each_entry(page
, &image
->dest_pages
, lru
) {
722 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
723 if (addr
== destination
) {
724 list_del(&page
->lru
);
732 /* Allocate a page, if we run out of memory give up */
733 page
= kimage_alloc_pages(gfp_mask
, 0);
736 /* If the page cannot be used file it away */
737 if (page_to_pfn(page
) >
738 (KEXEC_SOURCE_MEMORY_LIMIT
>> PAGE_SHIFT
)) {
739 list_add(&page
->lru
, &image
->unuseable_pages
);
742 addr
= page_to_pfn(page
) << PAGE_SHIFT
;
744 /* If it is the destination page we want use it */
745 if (addr
== destination
)
748 /* If the page is not a destination page use it */
749 if (!kimage_is_destination_range(image
, addr
,
754 * I know that the page is someones destination page.
755 * See if there is already a source page for this
756 * destination page. And if so swap the source pages.
758 old
= kimage_dst_used(image
, addr
);
761 unsigned long old_addr
;
762 struct page
*old_page
;
764 old_addr
= *old
& PAGE_MASK
;
765 old_page
= pfn_to_page(old_addr
>> PAGE_SHIFT
);
766 copy_highpage(page
, old_page
);
767 *old
= addr
| (*old
& ~PAGE_MASK
);
769 /* The old page I have found cannot be a
770 * destination page, so return it if it's
771 * gfp_flags honor the ones passed in.
773 if (!(gfp_mask
& __GFP_HIGHMEM
) &&
774 PageHighMem(old_page
)) {
775 kimage_free_pages(old_page
);
783 /* Place the page on the destination list I
786 list_add(&page
->lru
, &image
->dest_pages
);
793 static int kimage_load_normal_segment(struct kimage
*image
,
794 struct kexec_segment
*segment
)
797 unsigned long ubytes
, mbytes
;
799 unsigned char __user
*buf
;
803 ubytes
= segment
->bufsz
;
804 mbytes
= segment
->memsz
;
805 maddr
= segment
->mem
;
807 result
= kimage_set_destination(image
, maddr
);
814 size_t uchunk
, mchunk
;
816 page
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
);
821 result
= kimage_add_page(image
, page_to_pfn(page
)
827 /* Start with a clear page */
829 ptr
+= maddr
& ~PAGE_MASK
;
830 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
838 result
= copy_from_user(ptr
, buf
, uchunk
);
853 static int kimage_load_crash_segment(struct kimage
*image
,
854 struct kexec_segment
*segment
)
856 /* For crash dumps kernels we simply copy the data from
857 * user space to it's destination.
858 * We do things a page at a time for the sake of kmap.
861 unsigned long ubytes
, mbytes
;
863 unsigned char __user
*buf
;
867 ubytes
= segment
->bufsz
;
868 mbytes
= segment
->memsz
;
869 maddr
= segment
->mem
;
873 size_t uchunk
, mchunk
;
875 page
= pfn_to_page(maddr
>> PAGE_SHIFT
);
881 ptr
+= maddr
& ~PAGE_MASK
;
882 mchunk
= PAGE_SIZE
- (maddr
& ~PAGE_MASK
);
887 if (uchunk
> ubytes
) {
889 /* Zero the trailing part of the page */
890 memset(ptr
+ uchunk
, 0, mchunk
- uchunk
);
892 result
= copy_from_user(ptr
, buf
, uchunk
);
893 kexec_flush_icache_page(page
);
908 static int kimage_load_segment(struct kimage
*image
,
909 struct kexec_segment
*segment
)
911 int result
= -ENOMEM
;
913 switch (image
->type
) {
914 case KEXEC_TYPE_DEFAULT
:
915 result
= kimage_load_normal_segment(image
, segment
);
917 case KEXEC_TYPE_CRASH
:
918 result
= kimage_load_crash_segment(image
, segment
);
926 * Exec Kernel system call: for obvious reasons only root may call it.
928 * This call breaks up into three pieces.
929 * - A generic part which loads the new kernel from the current
930 * address space, and very carefully places the data in the
933 * - A generic part that interacts with the kernel and tells all of
934 * the devices to shut down. Preventing on-going dmas, and placing
935 * the devices in a consistent state so a later kernel can
938 * - A machine specific part that includes the syscall number
939 * and the copies the image to it's final destination. And
940 * jumps into the image at entry.
942 * kexec does not sync, or unmount filesystems so if you need
943 * that to happen you need to do that yourself.
945 struct kimage
*kexec_image
;
946 struct kimage
*kexec_crash_image
;
948 static DEFINE_MUTEX(kexec_mutex
);
950 asmlinkage
long kexec_load(unsigned long entry
, unsigned long nr_segments
, struct kexec_segment __user
*segments
, unsigned long flags
)
952 struct kimage
**dest_image
, *image
;
955 /* We only trust the superuser with rebooting the system. */
956 if (!capable(CAP_SYS_BOOT
))
960 * Verify we have a legal set of flags
961 * This leaves us room for future extensions.
963 if ((flags
& KEXEC_FLAGS
) != (flags
& ~KEXEC_ARCH_MASK
))
966 /* Verify we are on the appropriate architecture */
967 if (((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) &&
968 ((flags
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
))
971 /* Put an artificial cap on the number
972 * of segments passed to kexec_load.
974 if (nr_segments
> KEXEC_SEGMENT_MAX
)
980 /* Because we write directly to the reserved memory
981 * region when loading crash kernels we need a mutex here to
982 * prevent multiple crash kernels from attempting to load
983 * simultaneously, and to prevent a crash kernel from loading
984 * over the top of a in use crash kernel.
986 * KISS: always take the mutex.
988 if (!mutex_trylock(&kexec_mutex
))
991 dest_image
= &kexec_image
;
992 if (flags
& KEXEC_ON_CRASH
)
993 dest_image
= &kexec_crash_image
;
994 if (nr_segments
> 0) {
997 /* Loading another kernel to reboot into */
998 if ((flags
& KEXEC_ON_CRASH
) == 0)
999 result
= kimage_normal_alloc(&image
, entry
,
1000 nr_segments
, segments
);
1001 /* Loading another kernel to switch to if this one crashes */
1002 else if (flags
& KEXEC_ON_CRASH
) {
1003 /* Free any current crash dump kernel before
1006 kimage_free(xchg(&kexec_crash_image
, NULL
));
1007 result
= kimage_crash_alloc(&image
, entry
,
1008 nr_segments
, segments
);
1013 if (flags
& KEXEC_PRESERVE_CONTEXT
)
1014 image
->preserve_context
= 1;
1015 result
= machine_kexec_prepare(image
);
1019 for (i
= 0; i
< nr_segments
; i
++) {
1020 result
= kimage_load_segment(image
, &image
->segment
[i
]);
1024 kimage_terminate(image
);
1026 /* Install the new kernel, and Uninstall the old */
1027 image
= xchg(dest_image
, image
);
1030 mutex_unlock(&kexec_mutex
);
1036 #ifdef CONFIG_COMPAT
1037 asmlinkage
long compat_sys_kexec_load(unsigned long entry
,
1038 unsigned long nr_segments
,
1039 struct compat_kexec_segment __user
*segments
,
1040 unsigned long flags
)
1042 struct compat_kexec_segment in
;
1043 struct kexec_segment out
, __user
*ksegments
;
1044 unsigned long i
, result
;
1046 /* Don't allow clients that don't understand the native
1047 * architecture to do anything.
1049 if ((flags
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
)
1052 if (nr_segments
> KEXEC_SEGMENT_MAX
)
1055 ksegments
= compat_alloc_user_space(nr_segments
* sizeof(out
));
1056 for (i
=0; i
< nr_segments
; i
++) {
1057 result
= copy_from_user(&in
, &segments
[i
], sizeof(in
));
1061 out
.buf
= compat_ptr(in
.buf
);
1062 out
.bufsz
= in
.bufsz
;
1064 out
.memsz
= in
.memsz
;
1066 result
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
));
1071 return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
);
1075 void crash_kexec(struct pt_regs
*regs
)
1077 /* Take the kexec_mutex here to prevent sys_kexec_load
1078 * running on one cpu from replacing the crash kernel
1079 * we are using after a panic on a different cpu.
1081 * If the crash kernel was not located in a fixed area
1082 * of memory the xchg(&kexec_crash_image) would be
1083 * sufficient. But since I reuse the memory...
1085 if (mutex_trylock(&kexec_mutex
)) {
1086 if (kexec_crash_image
) {
1087 struct pt_regs fixed_regs
;
1088 crash_setup_regs(&fixed_regs
, regs
);
1089 crash_save_vmcoreinfo();
1090 machine_crash_shutdown(&fixed_regs
);
1091 machine_kexec(kexec_crash_image
);
1093 mutex_unlock(&kexec_mutex
);
1097 static u32
*append_elf_note(u32
*buf
, char *name
, unsigned type
, void *data
,
1100 struct elf_note note
;
1102 note
.n_namesz
= strlen(name
) + 1;
1103 note
.n_descsz
= data_len
;
1105 memcpy(buf
, ¬e
, sizeof(note
));
1106 buf
+= (sizeof(note
) + 3)/4;
1107 memcpy(buf
, name
, note
.n_namesz
);
1108 buf
+= (note
.n_namesz
+ 3)/4;
1109 memcpy(buf
, data
, note
.n_descsz
);
1110 buf
+= (note
.n_descsz
+ 3)/4;
1115 static void final_note(u32
*buf
)
1117 struct elf_note note
;
1122 memcpy(buf
, ¬e
, sizeof(note
));
1125 void crash_save_cpu(struct pt_regs
*regs
, int cpu
)
1127 struct elf_prstatus prstatus
;
1130 if ((cpu
< 0) || (cpu
>= nr_cpu_ids
))
1133 /* Using ELF notes here is opportunistic.
1134 * I need a well defined structure format
1135 * for the data I pass, and I need tags
1136 * on the data to indicate what information I have
1137 * squirrelled away. ELF notes happen to provide
1138 * all of that, so there is no need to invent something new.
1140 buf
= (u32
*)per_cpu_ptr(crash_notes
, cpu
);
1143 memset(&prstatus
, 0, sizeof(prstatus
));
1144 prstatus
.pr_pid
= current
->pid
;
1145 elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
);
1146 buf
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
,
1147 &prstatus
, sizeof(prstatus
));
1152 * parsing the "crashkernel" commandline
1154 * this code is intended to be called from architecture specific code
1159 * This function parses command lines in the format
1161 * crashkernel=ramsize-range:size[,...][@offset]
1163 * The function returns 0 on success and -EINVAL on failure.
1165 static int __init
parse_crashkernel_mem(char *cmdline
,
1166 unsigned long long system_ram
,
1167 unsigned long long *crash_size
,
1168 unsigned long long *crash_base
)
1170 char *cur
= cmdline
, *tmp
;
1172 /* for each entry of the comma-separated list */
1174 unsigned long long start
, end
= ULLONG_MAX
, size
;
1176 /* get the start of the range */
1177 start
= memparse(cur
, &tmp
);
1179 pr_warning("crashkernel: Memory value expected\n");
1184 pr_warning("crashkernel: '-' expected\n");
1189 /* if no ':' is here, than we read the end */
1191 end
= memparse(cur
, &tmp
);
1193 pr_warning("crashkernel: Memory "
1194 "value expected\n");
1199 pr_warning("crashkernel: end <= start\n");
1205 pr_warning("crashkernel: ':' expected\n");
1210 size
= memparse(cur
, &tmp
);
1212 pr_warning("Memory value expected\n");
1216 if (size
>= system_ram
) {
1217 pr_warning("crashkernel: invalid size\n");
1222 if (system_ram
>= start
&& system_ram
< end
) {
1226 } while (*cur
++ == ',');
1228 if (*crash_size
> 0) {
1229 while (*cur
&& *cur
!= ' ' && *cur
!= '@')
1233 *crash_base
= memparse(cur
, &tmp
);
1235 pr_warning("Memory value expected "
1246 * That function parses "simple" (old) crashkernel command lines like
1248 * crashkernel=size[@offset]
1250 * It returns 0 on success and -EINVAL on failure.
1252 static int __init
parse_crashkernel_simple(char *cmdline
,
1253 unsigned long long *crash_size
,
1254 unsigned long long *crash_base
)
1256 char *cur
= cmdline
;
1258 *crash_size
= memparse(cmdline
, &cur
);
1259 if (cmdline
== cur
) {
1260 pr_warning("crashkernel: memory value expected\n");
1265 *crash_base
= memparse(cur
+1, &cur
);
1271 * That function is the entry point for command line parsing and should be
1272 * called from the arch-specific code.
1274 int __init
parse_crashkernel(char *cmdline
,
1275 unsigned long long system_ram
,
1276 unsigned long long *crash_size
,
1277 unsigned long long *crash_base
)
1279 char *p
= cmdline
, *ck_cmdline
= NULL
;
1280 char *first_colon
, *first_space
;
1282 BUG_ON(!crash_size
|| !crash_base
);
1286 /* find crashkernel and use the last one if there are more */
1287 p
= strstr(p
, "crashkernel=");
1290 p
= strstr(p
+1, "crashkernel=");
1296 ck_cmdline
+= 12; /* strlen("crashkernel=") */
1299 * if the commandline contains a ':', then that's the extended
1300 * syntax -- if not, it must be the classic syntax
1302 first_colon
= strchr(ck_cmdline
, ':');
1303 first_space
= strchr(ck_cmdline
, ' ');
1304 if (first_colon
&& (!first_space
|| first_colon
< first_space
))
1305 return parse_crashkernel_mem(ck_cmdline
, system_ram
,
1306 crash_size
, crash_base
);
1308 return parse_crashkernel_simple(ck_cmdline
, crash_size
,
1316 void crash_save_vmcoreinfo(void)
1320 if (!vmcoreinfo_size
)
1323 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1325 buf
= (u32
*)vmcoreinfo_note
;
1327 buf
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
,
1333 void vmcoreinfo_append_str(const char *fmt
, ...)
1339 va_start(args
, fmt
);
1340 r
= vsnprintf(buf
, sizeof(buf
), fmt
, args
);
1343 if (r
+ vmcoreinfo_size
> vmcoreinfo_max_size
)
1344 r
= vmcoreinfo_max_size
- vmcoreinfo_size
;
1346 memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
);
1348 vmcoreinfo_size
+= r
;
1352 * provide an empty default implementation here -- architecture
1353 * code may override this
1355 void __attribute__ ((weak
)) arch_crash_save_vmcoreinfo(void)
1358 unsigned long __attribute__ ((weak
)) paddr_vmcoreinfo_note(void)
1360 return __pa((unsigned long)(char *)&vmcoreinfo_note
);
1364 * Move into place and start executing a preloaded standalone
1365 * executable. If nothing was preloaded return an error.
1367 int kernel_kexec(void)
1371 if (!mutex_trylock(&kexec_mutex
))
1378 #ifdef CONFIG_KEXEC_JUMP
1379 if (kexec_image
->preserve_context
) {
1380 mutex_lock(&pm_mutex
);
1381 pm_prepare_console();
1382 error
= freeze_processes();
1385 goto Restore_console
;
1388 error
= dpm_suspend_start(PMSG_FREEZE
);
1390 goto Resume_console
;
1391 /* At this point, dpm_suspend_start() has been called,
1392 * but *not* dpm_suspend_noirq(). We *must* call
1393 * dpm_suspend_noirq() now. Otherwise, drivers for
1394 * some devices (e.g. interrupt controllers) become
1395 * desynchronized with the actual state of the
1396 * hardware at resume time, and evil weirdness ensues.
1398 error
= dpm_suspend_noirq(PMSG_FREEZE
);
1400 goto Resume_devices
;
1401 error
= disable_nonboot_cpus();
1404 local_irq_disable();
1405 error
= syscore_suspend();
1411 kernel_restart_prepare(NULL
);
1412 printk(KERN_EMERG
"Starting new kernel\n");
1413 //machine_shutdown();
1416 machine_kexec(kexec_image
);
1418 #ifdef CONFIG_KEXEC_JUMP
1419 if (kexec_image
->preserve_context
) {
1424 enable_nonboot_cpus();
1425 dpm_resume_noirq(PMSG_RESTORE
);
1427 dpm_resume_end(PMSG_RESTORE
);
1432 pm_restore_console();
1433 mutex_unlock(&pm_mutex
);
1438 mutex_unlock(&kexec_mutex
);
1442 unsigned long **find_sys_call_table(void) {
1443 unsigned long **sctable
;
1445 extern int loops_per_jiffy
;
1447 for (ptr
= (unsigned long)&unlock_kernel
; ptr
< (unsigned long)&loops_per_jiffy
; ptr
+= sizeof(void *)) {
1449 p
= (unsigned long *)ptr
;
1450 if (p
[__NR_close
] == (unsigned long) sys_close
) {
1451 sctable
= (unsigned long **)p
;
1458 static int __init
kexec_module_init(void)
1461 sys_call_table
=(void **)find_sys_call_table();
1462 if(sys_call_table
==NULL
) {
1463 printk(KERN_ERR
"Cannot find the system call address\n");
1464 return -1; // do not load
1467 printk(KERN_INFO
"kexec: Found sys_call_table at: %p\n", sys_call_table
);
1470 sys_call_table
=(void **)SYS_CALL_TABLE
;
1471 printk(KERN_INFO
"kexec: Force sys_call_table at: %p\n", sys_call_table
);
1473 /* Set kexec_load() syscall. */
1474 sys_call_table
[__NR_kexec_load
]=kexec_load
;
1476 /* Swap reboot() syscall and store original */
1477 original_reboot
=sys_call_table
[__NR_reboot
];
1478 sys_call_table
[__NR_reboot
]=reboot
;
1480 /* crash_notes_memory_init */
1481 /* Allocate memory for saving cpu registers. */
1482 crash_notes
= alloc_percpu(note_buf_t
);
1484 printk("Kexec: Memory allocation for saving cpu register"
1485 " states failed\n");
1489 /* crash_vmcoreinfo_init */
1490 VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
);
1491 VMCOREINFO_PAGESIZE(PAGE_SIZE
);
1493 VMCOREINFO_SYMBOL(init_uts_ns
);
1494 VMCOREINFO_SYMBOL(node_online_map
);
1496 #ifndef CONFIG_NEED_MULTIPLE_NODES
1497 VMCOREINFO_SYMBOL(mem_map
);
1498 VMCOREINFO_SYMBOL(contig_page_data
);
1500 #ifdef CONFIG_SPARSEMEM
1501 VMCOREINFO_SYMBOL(mem_section
);
1502 VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
);
1503 VMCOREINFO_STRUCT_SIZE(mem_section
);
1504 VMCOREINFO_OFFSET(mem_section
, section_mem_map
);
1506 VMCOREINFO_STRUCT_SIZE(page
);
1507 VMCOREINFO_STRUCT_SIZE(pglist_data
);
1508 VMCOREINFO_STRUCT_SIZE(zone
);
1509 VMCOREINFO_STRUCT_SIZE(free_area
);
1510 VMCOREINFO_STRUCT_SIZE(list_head
);
1511 VMCOREINFO_SIZE(nodemask_t
);
1512 VMCOREINFO_OFFSET(page
, flags
);
1513 VMCOREINFO_OFFSET(page
, _count
);
1514 VMCOREINFO_OFFSET(page
, mapping
);
1515 VMCOREINFO_OFFSET(page
, lru
);
1516 VMCOREINFO_OFFSET(pglist_data
, node_zones
);
1517 VMCOREINFO_OFFSET(pglist_data
, nr_zones
);
1518 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1519 VMCOREINFO_OFFSET(pglist_data
, node_mem_map
);
1521 VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
);
1522 VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
);
1523 VMCOREINFO_OFFSET(pglist_data
, node_id
);
1524 VMCOREINFO_OFFSET(zone
, free_area
);
1525 VMCOREINFO_OFFSET(zone
, vm_stat
);
1526 VMCOREINFO_OFFSET(zone
, spanned_pages
);
1527 VMCOREINFO_OFFSET(free_area
, free_list
);
1528 VMCOREINFO_OFFSET(list_head
, next
);
1529 VMCOREINFO_OFFSET(list_head
, prev
);
1530 VMCOREINFO_OFFSET(vm_struct
, addr
);
1531 VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
);
1532 VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
);
1533 VMCOREINFO_NUMBER(NR_FREE_PAGES
);
1534 VMCOREINFO_NUMBER(PG_lru
);
1535 VMCOREINFO_NUMBER(PG_private
);
1536 VMCOREINFO_NUMBER(PG_swapcache
);
1538 arch_crash_save_vmcoreinfo();
1543 module_init(kexec_module_init
)