2  * kexec.c - kexec system call 
   3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com> 
   4  * This Edition is maintained by Matthew Veety (aliasxerog) <mveety@gmail.com> 
   6  * This source code is licensed under the GNU General Public License, 
   7  * Version 2.  See the file COPYING for more details. 
  10 #include <linux/capability.h> 
  12 #include <linux/file.h> 
  13 #include <linux/slab.h> 
  15 #include <linux/kexec.h> 
  16 #include <linux/mutex.h> 
  17 #include <linux/list.h> 
  18 #include <linux/highmem.h> 
  19 #include <linux/syscalls.h> 
  20 #include <linux/reboot.h> 
  21 #include <linux/ioport.h> 
  22 #include <linux/hardirq.h> 
  23 #include <linux/elf.h> 
  24 #include <linux/elfcore.h> 
  25 #include <linux/utsrelease.h> 
  26 #include <linux/utsname.h> 
  27 #include <linux/numa.h> 
  28 #include <linux/suspend.h> 
  29 #include <linux/device.h> 
  30 #include <linux/freezer.h> 
  32 #include <linux/cpu.h> 
  33 #include <linux/console.h> 
  34 #include <linux/vmalloc.h> 
  37 #include <asm/uaccess.h> 
  39 #include <asm/system.h> 
  40 #include <asm/sections.h> 
  41 #include <asm/unistd.h> 
  43 MODULE_LICENSE("GPL"); 
  46 void **sys_call_table
; 
  48 /* original and new reboot syscall */ 
  49 asmlinkage 
long (*original_reboot
)(int magic1
, int magic2
, unsigned int cmd
, void __user 
*arg
); 
  50 extern asmlinkage 
long reboot(int magic1
, int magic2
, unsigned int cmd
, void __user 
*arg
); 
  52 /* Per cpu memory for storing cpu states in case of system crash. */ 
  53 note_buf_t
* crash_notes
; 
  55 /* vmcoreinfo stuff */ 
  56 unsigned char vmcoreinfo_data
[VMCOREINFO_BYTES
]; 
  57 u32 vmcoreinfo_note
[VMCOREINFO_NOTE_SIZE
/4]; 
  58 size_t vmcoreinfo_size
; 
  59 size_t vmcoreinfo_max_size 
= sizeof(vmcoreinfo_data
); 
  61 /* Location of the reserved area for the crash kernel */ 
  62 struct resource crashk_res 
= { 
  63         .name  
= "Crash kernel", 
  66         .flags 
= IORESOURCE_BUSY 
| IORESOURCE_MEM
 
  69 int kexec_should_crash(struct task_struct 
*p
) 
  71         if (in_interrupt() || !p
->pid 
|| is_global_init(p
)) 
  77  * When kexec transitions to the new kernel there is a one-to-one 
  78  * mapping between physical and virtual addresses.  On processors 
  79  * where you can disable the MMU this is trivial, and easy.  For 
  80  * others it is still a simple predictable page table to setup. 
  82  * In that environment kexec copies the new kernel to its final 
  83  * resting place.  This means I can only support memory whose 
  84  * physical address can fit in an unsigned long.  In particular 
  85  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. 
  86  * If the assembly stub has more restrictive requirements 
  87  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be 
  88  * defined more restrictively in <asm/kexec.h>. 
  90  * The code for the transition from the current kernel to the 
  91  * the new kernel is placed in the control_code_buffer, whose size 
  92  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single 
  93  * page of memory is necessary, but some architectures require more. 
  94  * Because this memory must be identity mapped in the transition from 
  95  * virtual to physical addresses it must live in the range 
  96  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily 
  99  * The assembly stub in the control code buffer is passed a linked list 
 100  * of descriptor pages detailing the source pages of the new kernel, 
 101  * and the destination addresses of those source pages.  As this data 
 102  * structure is not used in the context of the current OS, it must 
 105  * The code has been made to work with highmem pages and will use a 
 106  * destination page in its final resting place (if it happens 
 107  * to allocate it).  The end product of this is that most of the 
 108  * physical address space, and most of RAM can be used. 
 110  * Future directions include: 
 111  *  - allocating a page table with the control code buffer identity 
 112  *    mapped, to simplify machine_kexec and make kexec_on_panic more 
 117  * KIMAGE_NO_DEST is an impossible destination address..., for 
 118  * allocating pages whose destination address we do not care about. 
 120 #define KIMAGE_NO_DEST (-1UL) 
 122 static int kimage_is_destination_range(struct kimage 
*image
, 
 123                                        unsigned long start
, unsigned long end
); 
 124 static struct page 
*kimage_alloc_page(struct kimage 
*image
, 
 128 static int do_kimage_alloc(struct kimage 
**rimage
, unsigned long entry
, 
 129                             unsigned long nr_segments
, 
 130                             struct kexec_segment __user 
*segments
) 
 132         size_t segment_bytes
; 
 133         struct kimage 
*image
; 
 137         /* Allocate a controlling structure */ 
 139         image 
= kzalloc(sizeof(*image
), GFP_KERNEL
); 
 144         image
->entry 
= &image
->head
; 
 145         image
->last_entry 
= &image
->head
; 
 146         image
->control_page 
= ~0; /* By default this does not apply */ 
 147         image
->start 
= entry
; 
 148         image
->type 
= KEXEC_TYPE_DEFAULT
; 
 150         /* Initialize the list of control pages */ 
 151         INIT_LIST_HEAD(&image
->control_pages
); 
 153         /* Initialize the list of destination pages */ 
 154         INIT_LIST_HEAD(&image
->dest_pages
); 
 156         /* Initialize the list of unuseable pages */ 
 157         INIT_LIST_HEAD(&image
->unuseable_pages
); 
 159         /* Read in the segments */ 
 160         image
->nr_segments 
= nr_segments
; 
 161         segment_bytes 
= nr_segments 
* sizeof(*segments
); 
 162         result 
= copy_from_user(image
->segment
, segments
, segment_bytes
); 
 167          * Verify we have good destination addresses.  The caller is 
 168          * responsible for making certain we don't attempt to load 
 169          * the new image into invalid or reserved areas of RAM.  This 
 170          * just verifies it is an address we can use. 
 172          * Since the kernel does everything in page size chunks ensure 
 173          * the destination addreses are page aligned.  Too many 
 174          * special cases crop of when we don't do this.  The most 
 175          * insidious is getting overlapping destination addresses 
 176          * simply because addresses are changed to page size 
 179         result 
= -EADDRNOTAVAIL
; 
 180         for (i 
= 0; i 
< nr_segments
; i
++) { 
 181                 unsigned long mstart
, mend
; 
 183                 mstart 
= image
->segment
[i
].mem
; 
 184                 mend   
= mstart 
+ image
->segment
[i
].memsz
; 
 185                 if ((mstart 
& ~PAGE_MASK
) || (mend 
& ~PAGE_MASK
)) 
 187                 if (mend 
>= KEXEC_DESTINATION_MEMORY_LIMIT
) 
 191         /* Verify our destination addresses do not overlap. 
 192          * If we alloed overlapping destination addresses 
 193          * through very weird things can happen with no 
 194          * easy explanation as one segment stops on another. 
 197         for (i 
= 0; i 
< nr_segments
; i
++) { 
 198                 unsigned long mstart
, mend
; 
 201                 mstart 
= image
->segment
[i
].mem
; 
 202                 mend   
= mstart 
+ image
->segment
[i
].memsz
; 
 203                 for (j 
= 0; j 
< i
; j
++) { 
 204                         unsigned long pstart
, pend
; 
 205                         pstart 
= image
->segment
[j
].mem
; 
 206                         pend   
= pstart 
+ image
->segment
[j
].memsz
; 
 207                         /* Do the segments overlap ? */ 
 208                         if ((mend 
> pstart
) && (mstart 
< pend
)) 
 213         /* Ensure our buffer sizes are strictly less than 
 214          * our memory sizes.  This should always be the case, 
 215          * and it is easier to check up front than to be surprised 
 219         for (i 
= 0; i 
< nr_segments
; i
++) { 
 220                 if (image
->segment
[i
].bufsz 
> image
->segment
[i
].memsz
) 
 235 static int kimage_normal_alloc(struct kimage 
**rimage
, unsigned long entry
, 
 236                                 unsigned long nr_segments
, 
 237                                 struct kexec_segment __user 
*segments
) 
 240         struct kimage 
*image
; 
 242         /* Allocate and initialize a controlling structure */ 
 244         result 
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
); 
 251          * Find a location for the control code buffer, and add it 
 252          * the vector of segments so that it's pages will also be 
 253          * counted as destination pages. 
 256         image
->control_code_page 
= kimage_alloc_control_pages(image
, 
 257                                            get_order(KEXEC_CONTROL_PAGE_SIZE
)); 
 258         if (!image
->control_code_page
) { 
 259                 printk(KERN_ERR 
"Could not allocate control_code_buffer\n"); 
 263         image
->swap_page 
= kimage_alloc_control_pages(image
, 0); 
 264         if (!image
->swap_page
) { 
 265                 printk(KERN_ERR 
"Could not allocate swap buffer\n"); 
 279 static int kimage_crash_alloc(struct kimage 
**rimage
, unsigned long entry
, 
 280                                 unsigned long nr_segments
, 
 281                                 struct kexec_segment __user 
*segments
) 
 284         struct kimage 
*image
; 
 288         /* Verify we have a valid entry point */ 
 289         if ((entry 
< crashk_res
.start
) || (entry 
> crashk_res
.end
)) { 
 290                 result 
= -EADDRNOTAVAIL
; 
 294         /* Allocate and initialize a controlling structure */ 
 295         result 
= do_kimage_alloc(&image
, entry
, nr_segments
, segments
); 
 299         /* Enable the special crash kernel control page 
 302         image
->control_page 
= crashk_res
.start
; 
 303         image
->type 
= KEXEC_TYPE_CRASH
; 
 306          * Verify we have good destination addresses.  Normally 
 307          * the caller is responsible for making certain we don't 
 308          * attempt to load the new image into invalid or reserved 
 309          * areas of RAM.  But crash kernels are preloaded into a 
 310          * reserved area of ram.  We must ensure the addresses 
 311          * are in the reserved area otherwise preloading the 
 312          * kernel could corrupt things. 
 314         result 
= -EADDRNOTAVAIL
; 
 315         for (i 
= 0; i 
< nr_segments
; i
++) { 
 316                 unsigned long mstart
, mend
; 
 318                 mstart 
= image
->segment
[i
].mem
; 
 319                 mend 
= mstart 
+ image
->segment
[i
].memsz 
- 1; 
 320                 /* Ensure we are within the crash kernel limits */ 
 321                 if ((mstart 
< crashk_res
.start
) || (mend 
> crashk_res
.end
)) 
 326          * Find a location for the control code buffer, and add 
 327          * the vector of segments so that it's pages will also be 
 328          * counted as destination pages. 
 331         image
->control_code_page 
= kimage_alloc_control_pages(image
, 
 332                                            get_order(KEXEC_CONTROL_PAGE_SIZE
)); 
 333         if (!image
->control_code_page
) { 
 334                 printk(KERN_ERR 
"Could not allocate control_code_buffer\n"); 
 348 static int kimage_is_destination_range(struct kimage 
*image
, 
 354         for (i 
= 0; i 
< image
->nr_segments
; i
++) { 
 355                 unsigned long mstart
, mend
; 
 357                 mstart 
= image
->segment
[i
].mem
; 
 358                 mend 
= mstart 
+ image
->segment
[i
].memsz
; 
 359                 if ((end 
> mstart
) && (start 
< mend
)) 
 366 static struct page 
*kimage_alloc_pages(gfp_t gfp_mask
, unsigned int order
) 
 370         pages 
= alloc_pages(gfp_mask
, order
); 
 372                 unsigned int count
, i
; 
 373                 pages
->mapping 
= NULL
; 
 374                 set_page_private(pages
, order
); 
 376                 for (i 
= 0; i 
< count
; i
++) 
 377                         SetPageReserved(pages 
+ i
); 
 383 static void kimage_free_pages(struct page 
*page
) 
 385         unsigned int order
, count
, i
; 
 387         order 
= page_private(page
); 
 389         for (i 
= 0; i 
< count
; i
++) 
 390                 ClearPageReserved(page 
+ i
); 
 391         __free_pages(page
, order
); 
 394 static void kimage_free_page_list(struct list_head 
*list
) 
 396         struct list_head 
*pos
, *next
; 
 398         list_for_each_safe(pos
, next
, list
) { 
 401                 page 
= list_entry(pos
, struct page
, lru
); 
 402                 list_del(&page
->lru
); 
 403                 kimage_free_pages(page
); 
 407 static struct page 
*kimage_alloc_normal_control_pages(struct kimage 
*image
, 
 410         /* Control pages are special, they are the intermediaries 
 411          * that are needed while we copy the rest of the pages 
 412          * to their final resting place.  As such they must 
 413          * not conflict with either the destination addresses 
 414          * or memory the kernel is already using. 
 416          * The only case where we really need more than one of 
 417          * these are for architectures where we cannot disable 
 418          * the MMU and must instead generate an identity mapped 
 419          * page table for all of the memory. 
 421          * At worst this runs in O(N) of the image size. 
 423         struct list_head extra_pages
; 
 428         INIT_LIST_HEAD(&extra_pages
); 
 430         /* Loop while I can allocate a page and the page allocated 
 431          * is a destination page. 
 434                 unsigned long pfn
, epfn
, addr
, eaddr
; 
 436                 pages 
= kimage_alloc_pages(GFP_KERNEL
, order
); 
 439                 pfn   
= page_to_pfn(pages
); 
 441                 addr  
= pfn 
<< PAGE_SHIFT
; 
 442                 eaddr 
= epfn 
<< PAGE_SHIFT
; 
 443                 if ((epfn 
>= (KEXEC_CONTROL_MEMORY_LIMIT 
>> PAGE_SHIFT
)) || 
 444                               kimage_is_destination_range(image
, addr
, eaddr
)) { 
 445                         list_add(&pages
->lru
, &extra_pages
); 
 451                 /* Remember the allocated page... */ 
 452                 list_add(&pages
->lru
, &image
->control_pages
); 
 454                 /* Because the page is already in it's destination 
 455                  * location we will never allocate another page at 
 456                  * that address.  Therefore kimage_alloc_pages 
 457                  * will not return it (again) and we don't need 
 458                  * to give it an entry in image->segment[]. 
 461         /* Deal with the destination pages I have inadvertently allocated. 
 463          * Ideally I would convert multi-page allocations into single 
 464          * page allocations, and add everyting to image->dest_pages. 
 466          * For now it is simpler to just free the pages. 
 468         kimage_free_page_list(&extra_pages
); 
 473 static struct page 
*kimage_alloc_crash_control_pages(struct kimage 
*image
, 
 476         /* Control pages are special, they are the intermediaries 
 477          * that are needed while we copy the rest of the pages 
 478          * to their final resting place.  As such they must 
 479          * not conflict with either the destination addresses 
 480          * or memory the kernel is already using. 
 482          * Control pages are also the only pags we must allocate 
 483          * when loading a crash kernel.  All of the other pages 
 484          * are specified by the segments and we just memcpy 
 485          * into them directly. 
 487          * The only case where we really need more than one of 
 488          * these are for architectures where we cannot disable 
 489          * the MMU and must instead generate an identity mapped 
 490          * page table for all of the memory. 
 492          * Given the low demand this implements a very simple 
 493          * allocator that finds the first hole of the appropriate 
 494          * size in the reserved memory region, and allocates all 
 495          * of the memory up to and including the hole. 
 497         unsigned long hole_start
, hole_end
, size
; 
 501         size 
= (1 << order
) << PAGE_SHIFT
; 
 502         hole_start 
= (image
->control_page 
+ (size 
- 1)) & ~(size 
- 1); 
 503         hole_end   
= hole_start 
+ size 
- 1; 
 504         while (hole_end 
<= crashk_res
.end
) { 
 507                 if (hole_end 
> KEXEC_CONTROL_MEMORY_LIMIT
) 
 509                 if (hole_end 
> crashk_res
.end
) 
 511                 /* See if I overlap any of the segments */ 
 512                 for (i 
= 0; i 
< image
->nr_segments
; i
++) { 
 513                         unsigned long mstart
, mend
; 
 515                         mstart 
= image
->segment
[i
].mem
; 
 516                         mend   
= mstart 
+ image
->segment
[i
].memsz 
- 1; 
 517                         if ((hole_end 
>= mstart
) && (hole_start 
<= mend
)) { 
 518                                 /* Advance the hole to the end of the segment */ 
 519                                 hole_start 
= (mend 
+ (size 
- 1)) & ~(size 
- 1); 
 520                                 hole_end   
= hole_start 
+ size 
- 1; 
 524                 /* If I don't overlap any segments I have found my hole! */ 
 525                 if (i 
== image
->nr_segments
) { 
 526                         pages 
= pfn_to_page(hole_start 
>> PAGE_SHIFT
); 
 531                 image
->control_page 
= hole_end
; 
 537 struct page 
*kimage_alloc_control_pages(struct kimage 
*image
, 
 540         struct page 
*pages 
= NULL
; 
 542         switch (image
->type
) { 
 543         case KEXEC_TYPE_DEFAULT
: 
 544                 pages 
= kimage_alloc_normal_control_pages(image
, order
); 
 546         case KEXEC_TYPE_CRASH
: 
 547                 pages 
= kimage_alloc_crash_control_pages(image
, order
); 
 554 static int kimage_add_entry(struct kimage 
*image
, kimage_entry_t entry
) 
 556         if (*image
->entry 
!= 0) 
 559         if (image
->entry 
== image
->last_entry
) { 
 560                 kimage_entry_t 
*ind_page
; 
 563                 page 
= kimage_alloc_page(image
, GFP_KERNEL
, KIMAGE_NO_DEST
); 
 567                 ind_page 
= page_address(page
); 
 568                 *image
->entry 
= virt_to_phys(ind_page
) | IND_INDIRECTION
; 
 569                 image
->entry 
= ind_page
; 
 570                 image
->last_entry 
= ind_page 
+ 
 571                                       ((PAGE_SIZE
/sizeof(kimage_entry_t
)) - 1); 
 573         *image
->entry 
= entry
; 
 580 static int kimage_set_destination(struct kimage 
*image
, 
 581                                    unsigned long destination
) 
 585         destination 
&= PAGE_MASK
; 
 586         result 
= kimage_add_entry(image
, destination 
| IND_DESTINATION
); 
 588                 image
->destination 
= destination
; 
 594 static int kimage_add_page(struct kimage 
*image
, unsigned long page
) 
 599         result 
= kimage_add_entry(image
, page 
| IND_SOURCE
); 
 601                 image
->destination 
+= PAGE_SIZE
; 
 607 static void kimage_free_extra_pages(struct kimage 
*image
) 
 609         /* Walk through and free any extra destination pages I may have */ 
 610         kimage_free_page_list(&image
->dest_pages
); 
 612         /* Walk through and free any unuseable pages I have cached */ 
 613         kimage_free_page_list(&image
->unuseable_pages
); 
 616 static void kimage_terminate(struct kimage 
*image
) 
 618         if (*image
->entry 
!= 0) 
 621         *image
->entry 
= IND_DONE
; 
 624 #define for_each_kimage_entry(image, ptr, entry) \ 
 625         for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 
 626                 ptr = (entry & IND_INDIRECTION)? \ 
 627                         phys_to_virt((entry & PAGE_MASK)): ptr +1) 
 629 static void kimage_free_entry(kimage_entry_t entry
) 
 633         page 
= pfn_to_page(entry 
>> PAGE_SHIFT
); 
 634         kimage_free_pages(page
); 
 637 static void kimage_free(struct kimage 
*image
) 
 639         kimage_entry_t 
*ptr
, entry
; 
 640         kimage_entry_t ind 
= 0; 
 645         kimage_free_extra_pages(image
); 
 646         for_each_kimage_entry(image
, ptr
, entry
) { 
 647                 if (entry 
& IND_INDIRECTION
) { 
 648                         /* Free the previous indirection page */ 
 649                         if (ind 
& IND_INDIRECTION
) 
 650                                 kimage_free_entry(ind
); 
 651                         /* Save this indirection page until we are 
 656                 else if (entry 
& IND_SOURCE
) 
 657                         kimage_free_entry(entry
); 
 659         /* Free the final indirection page */ 
 660         if (ind 
& IND_INDIRECTION
) 
 661                 kimage_free_entry(ind
); 
 663         /* Handle any machine specific cleanup */ 
 664         machine_kexec_cleanup(image
); 
 666         /* Free the kexec control pages... */ 
 667         kimage_free_page_list(&image
->control_pages
); 
 671 static kimage_entry_t 
*kimage_dst_used(struct kimage 
*image
, 
 674         kimage_entry_t 
*ptr
, entry
; 
 675         unsigned long destination 
= 0; 
 677         for_each_kimage_entry(image
, ptr
, entry
) { 
 678                 if (entry 
& IND_DESTINATION
) 
 679                         destination 
= entry 
& PAGE_MASK
; 
 680                 else if (entry 
& IND_SOURCE
) { 
 681                         if (page 
== destination
) 
 683                         destination 
+= PAGE_SIZE
; 
 690 static struct page 
*kimage_alloc_page(struct kimage 
*image
, 
 692                                         unsigned long destination
) 
 695          * Here we implement safeguards to ensure that a source page 
 696          * is not copied to its destination page before the data on 
 697          * the destination page is no longer useful. 
 699          * To do this we maintain the invariant that a source page is 
 700          * either its own destination page, or it is not a 
 701          * destination page at all. 
 703          * That is slightly stronger than required, but the proof 
 704          * that no problems will not occur is trivial, and the 
 705          * implementation is simply to verify. 
 707          * When allocating all pages normally this algorithm will run 
 708          * in O(N) time, but in the worst case it will run in O(N^2) 
 709          * time.   If the runtime is a problem the data structures can 
 716          * Walk through the list of destination pages, and see if I 
 719         list_for_each_entry(page
, &image
->dest_pages
, lru
) { 
 720                 addr 
= page_to_pfn(page
) << PAGE_SHIFT
; 
 721                 if (addr 
== destination
) { 
 722                         list_del(&page
->lru
); 
 730                 /* Allocate a page, if we run out of memory give up */ 
 731                 page 
= kimage_alloc_pages(gfp_mask
, 0); 
 734                 /* If the page cannot be used file it away */ 
 735                 if (page_to_pfn(page
) > 
 736                                 (KEXEC_SOURCE_MEMORY_LIMIT 
>> PAGE_SHIFT
)) { 
 737                         list_add(&page
->lru
, &image
->unuseable_pages
); 
 740                 addr 
= page_to_pfn(page
) << PAGE_SHIFT
; 
 742                 /* If it is the destination page we want use it */ 
 743                 if (addr 
== destination
) 
 746                 /* If the page is not a destination page use it */ 
 747                 if (!kimage_is_destination_range(image
, addr
, 
 752                  * I know that the page is someones destination page. 
 753                  * See if there is already a source page for this 
 754                  * destination page.  And if so swap the source pages. 
 756                 old 
= kimage_dst_used(image
, addr
); 
 759                         unsigned long old_addr
; 
 760                         struct page 
*old_page
; 
 762                         old_addr 
= *old 
& PAGE_MASK
; 
 763                         old_page 
= pfn_to_page(old_addr 
>> PAGE_SHIFT
); 
 764                         copy_highpage(page
, old_page
); 
 765                         *old 
= addr 
| (*old 
& ~PAGE_MASK
); 
 767                         /* The old page I have found cannot be a 
 768                          * destination page, so return it if it's 
 769                          * gfp_flags honor the ones passed in. 
 771                         if (!(gfp_mask 
& __GFP_HIGHMEM
) && 
 772                             PageHighMem(old_page
)) { 
 773                                 kimage_free_pages(old_page
); 
 781                         /* Place the page on the destination list I 
 784                         list_add(&page
->lru
, &image
->dest_pages
); 
 791 static int kimage_load_normal_segment(struct kimage 
*image
, 
 792                                          struct kexec_segment 
*segment
) 
 795         unsigned long ubytes
, mbytes
; 
 797         unsigned char __user 
*buf
; 
 801         ubytes 
= segment
->bufsz
; 
 802         mbytes 
= segment
->memsz
; 
 803         maddr 
= segment
->mem
; 
 805         result 
= kimage_set_destination(image
, maddr
); 
 812                 size_t uchunk
, mchunk
; 
 814                 page 
= kimage_alloc_page(image
, GFP_HIGHUSER
, maddr
); 
 819                 result 
= kimage_add_page(image
, page_to_pfn(page
) 
 825                 /* Start with a clear page */ 
 826                 memset(ptr
, 0, PAGE_SIZE
); 
 827                 ptr 
+= maddr 
& ~PAGE_MASK
; 
 828                 mchunk 
= PAGE_SIZE 
- (maddr 
& ~PAGE_MASK
); 
 836                 result 
= copy_from_user(ptr
, buf
, uchunk
); 
 839                         result 
= (result 
< 0) ? result 
: -EIO
; 
 851 static int kimage_load_crash_segment(struct kimage 
*image
, 
 852                                         struct kexec_segment 
*segment
) 
 854         /* For crash dumps kernels we simply copy the data from 
 855          * user space to it's destination. 
 856          * We do things a page at a time for the sake of kmap. 
 859         unsigned long ubytes
, mbytes
; 
 861         unsigned char __user 
*buf
; 
 865         ubytes 
= segment
->bufsz
; 
 866         mbytes 
= segment
->memsz
; 
 867         maddr 
= segment
->mem
; 
 871                 size_t uchunk
, mchunk
; 
 873                 page 
= pfn_to_page(maddr 
>> PAGE_SHIFT
); 
 879                 ptr 
+= maddr 
& ~PAGE_MASK
; 
 880                 mchunk 
= PAGE_SIZE 
- (maddr 
& ~PAGE_MASK
); 
 885                 if (uchunk 
> ubytes
) { 
 887                         /* Zero the trailing part of the page */ 
 888                         memset(ptr 
+ uchunk
, 0, mchunk 
- uchunk
); 
 890                 result 
= copy_from_user(ptr
, buf
, uchunk
); 
 891                 kexec_flush_icache_page(page
); 
 894                         result 
= (result 
< 0) ? result 
: -EIO
; 
 906 static int kimage_load_segment(struct kimage 
*image
, 
 907                                 struct kexec_segment 
*segment
) 
 909         int result 
= -ENOMEM
; 
 911         switch (image
->type
) { 
 912         case KEXEC_TYPE_DEFAULT
: 
 913                 result 
= kimage_load_normal_segment(image
, segment
); 
 915         case KEXEC_TYPE_CRASH
: 
 916                 result 
= kimage_load_crash_segment(image
, segment
); 
 924  * Exec Kernel system call: for obvious reasons only root may call it. 
 926  * This call breaks up into three pieces. 
 927  * - A generic part which loads the new kernel from the current 
 928  *   address space, and very carefully places the data in the 
 931  * - A generic part that interacts with the kernel and tells all of 
 932  *   the devices to shut down.  Preventing on-going dmas, and placing 
 933  *   the devices in a consistent state so a later kernel can 
 936  * - A machine specific part that includes the syscall number 
 937  *   and the copies the image to it's final destination.  And 
 938  *   jumps into the image at entry. 
 940  * kexec does not sync, or unmount filesystems so if you need 
 941  * that to happen you need to do that yourself. 
 943 struct kimage 
*kexec_image
; 
 944 struct kimage 
*kexec_crash_image
; 
 946 static DEFINE_MUTEX(kexec_mutex
); 
 948 asmlinkage 
long kexec_load(unsigned long entry
, unsigned long nr_segments
, struct kexec_segment __user 
*segments
, unsigned long flags
) 
 950         struct kimage 
**dest_image
, *image
; 
 953         /* We only trust the superuser with rebooting the system. */ 
 954         if (!capable(CAP_SYS_BOOT
)) 
 958          * Verify we have a legal set of flags 
 959          * This leaves us room for future extensions. 
 961         if ((flags 
& KEXEC_FLAGS
) != (flags 
& ~KEXEC_ARCH_MASK
)) 
 964         /* Verify we are on the appropriate architecture */ 
 965         if (((flags 
& KEXEC_ARCH_MASK
) != KEXEC_ARCH
) && 
 966                 ((flags 
& KEXEC_ARCH_MASK
) != KEXEC_ARCH_DEFAULT
)) 
 969         /* Put an artificial cap on the number 
 970          * of segments passed to kexec_load. 
 972         if (nr_segments 
> KEXEC_SEGMENT_MAX
) 
 978         /* Because we write directly to the reserved memory 
 979          * region when loading crash kernels we need a mutex here to 
 980          * prevent multiple crash  kernels from attempting to load 
 981          * simultaneously, and to prevent a crash kernel from loading 
 982          * over the top of a in use crash kernel. 
 984          * KISS: always take the mutex. 
 986         if (!mutex_trylock(&kexec_mutex
)) 
 989         dest_image 
= &kexec_image
; 
 990         if (flags 
& KEXEC_ON_CRASH
) 
 991                 dest_image 
= &kexec_crash_image
; 
 992         if (nr_segments 
> 0) { 
 995                 /* Loading another kernel to reboot into */ 
 996                 if ((flags 
& KEXEC_ON_CRASH
) == 0) 
 997                         result 
= kimage_normal_alloc(&image
, entry
, 
 998                                                         nr_segments
, segments
); 
 999                 /* Loading another kernel to switch to if this one crashes */ 
1000                 else if (flags 
& KEXEC_ON_CRASH
) { 
1001                         /* Free any current crash dump kernel before 
1004                         kimage_free(xchg(&kexec_crash_image
, NULL
)); 
1005                         result 
= kimage_crash_alloc(&image
, entry
, 
1006                                                      nr_segments
, segments
); 
1011                 if (flags 
& KEXEC_PRESERVE_CONTEXT
) 
1012                         image
->preserve_context 
= 1; 
1013                 result 
= machine_kexec_prepare(image
); 
1017                 for (i 
= 0; i 
< nr_segments
; i
++) { 
1018                         result 
= kimage_load_segment(image
, &image
->segment
[i
]); 
1022                 kimage_terminate(image
); 
1024         /* Install the new kernel, and  Uninstall the old */ 
1025         image 
= xchg(dest_image
, image
); 
1028         mutex_unlock(&kexec_mutex
); 
1034 #ifdef CONFIG_COMPAT 
1035 asmlinkage 
long compat_sys_kexec_load(unsigned long entry
, 
1036                                 unsigned long nr_segments
, 
1037                                 struct compat_kexec_segment __user 
*segments
, 
1038                                 unsigned long flags
) 
1040         struct compat_kexec_segment in
; 
1041         struct kexec_segment out
, __user 
*ksegments
; 
1042         unsigned long i
, result
; 
1044         /* Don't allow clients that don't understand the native 
1045          * architecture to do anything. 
1047         if ((flags 
& KEXEC_ARCH_MASK
) == KEXEC_ARCH_DEFAULT
) 
1050         if (nr_segments 
> KEXEC_SEGMENT_MAX
) 
1053         ksegments 
= compat_alloc_user_space(nr_segments 
* sizeof(out
)); 
1054         for (i
=0; i 
< nr_segments
; i
++) { 
1055                 result 
= copy_from_user(&in
, &segments
[i
], sizeof(in
)); 
1059                 out
.buf   
= compat_ptr(in
.buf
); 
1060                 out
.bufsz 
= in
.bufsz
; 
1062                 out
.memsz 
= in
.memsz
; 
1064                 result 
= copy_to_user(&ksegments
[i
], &out
, sizeof(out
)); 
1069         return sys_kexec_load(entry
, nr_segments
, ksegments
, flags
); 
1073 void crash_kexec(struct pt_regs 
*regs
) 
1075         /* Take the kexec_mutex here to prevent sys_kexec_load 
1076          * running on one cpu from replacing the crash kernel 
1077          * we are using after a panic on a different cpu. 
1079          * If the crash kernel was not located in a fixed area 
1080          * of memory the xchg(&kexec_crash_image) would be 
1081          * sufficient.  But since I reuse the memory... 
1083         if (mutex_trylock(&kexec_mutex
)) { 
1084                 if (kexec_crash_image
) { 
1085                         struct pt_regs fixed_regs
; 
1086                         crash_setup_regs(&fixed_regs
, regs
); 
1087                         crash_save_vmcoreinfo(); 
1088                         machine_crash_shutdown(&fixed_regs
); 
1089                         machine_kexec(kexec_crash_image
); 
1091                 mutex_unlock(&kexec_mutex
); 
1095 static u32 
*append_elf_note(u32 
*buf
, char *name
, unsigned type
, void *data
, 
1098         struct elf_note note
; 
1100         note
.n_namesz 
= strlen(name
) + 1; 
1101         note
.n_descsz 
= data_len
; 
1103         memcpy(buf
, ¬e
, sizeof(note
)); 
1104         buf 
+= (sizeof(note
) + 3)/4; 
1105         memcpy(buf
, name
, note
.n_namesz
); 
1106         buf 
+= (note
.n_namesz 
+ 3)/4; 
1107         memcpy(buf
, data
, note
.n_descsz
); 
1108         buf 
+= (note
.n_descsz 
+ 3)/4; 
1113 static void final_note(u32 
*buf
) 
1115         struct elf_note note
; 
1120         memcpy(buf
, ¬e
, sizeof(note
)); 
1123 void crash_save_cpu(struct pt_regs 
*regs
, int cpu
) 
1125         struct elf_prstatus prstatus
; 
1128         if ((cpu 
< 0) || (cpu 
>= nr_cpu_ids
)) 
1131         /* Using ELF notes here is opportunistic. 
1132          * I need a well defined structure format 
1133          * for the data I pass, and I need tags 
1134          * on the data to indicate what information I have 
1135          * squirrelled away.  ELF notes happen to provide 
1136          * all of that, so there is no need to invent something new. 
1138         buf 
= (u32
*)per_cpu_ptr(crash_notes
, cpu
); 
1141         memset(&prstatus
, 0, sizeof(prstatus
)); 
1142         prstatus
.pr_pid 
= current
->pid
; 
1143         elf_core_copy_kernel_regs(&prstatus
.pr_reg
, regs
); 
1144         buf 
= append_elf_note(buf
, KEXEC_CORE_NOTE_NAME
, NT_PRSTATUS
, 
1145                               &prstatus
, sizeof(prstatus
)); 
1150  * parsing the "crashkernel" commandline 
1152  * this code is intended to be called from architecture specific code 
1157  * This function parses command lines in the format 
1159  *   crashkernel=ramsize-range:size[,...][@offset] 
1161  * The function returns 0 on success and -EINVAL on failure. 
1163 static int __init 
parse_crashkernel_mem(char                    *cmdline
, 
1164                                         unsigned long long      system_ram
, 
1165                                         unsigned long long      *crash_size
, 
1166                                         unsigned long long      *crash_base
) 
1168         char *cur 
= cmdline
, *tmp
; 
1170         /* for each entry of the comma-separated list */ 
1172                 unsigned long long start
, end 
= ULLONG_MAX
, size
; 
1174                 /* get the start of the range */ 
1175                 start 
= memparse(cur
, &tmp
); 
1177                         pr_warning("crashkernel: Memory value expected\n"); 
1182                         pr_warning("crashkernel: '-' expected\n"); 
1187                 /* if no ':' is here, than we read the end */ 
1189                         end 
= memparse(cur
, &tmp
); 
1191                                 pr_warning("crashkernel: Memory " 
1192                                                 "value expected\n"); 
1197                                 pr_warning("crashkernel: end <= start\n"); 
1203                         pr_warning("crashkernel: ':' expected\n"); 
1208                 size 
= memparse(cur
, &tmp
); 
1210                         pr_warning("Memory value expected\n"); 
1214                 if (size 
>= system_ram
) { 
1215                         pr_warning("crashkernel: invalid size\n"); 
1220                 if (system_ram 
>= start 
&& system_ram 
< end
) { 
1224         } while (*cur
++ == ','); 
1226         if (*crash_size 
> 0) { 
1227                 while (*cur 
&& *cur 
!= ' ' && *cur 
!= '@') 
1231                         *crash_base 
= memparse(cur
, &tmp
); 
1233                                 pr_warning("Memory value expected " 
1244  * That function parses "simple" (old) crashkernel command lines like 
1246  *      crashkernel=size[@offset] 
1248  * It returns 0 on success and -EINVAL on failure. 
1250 static int __init 
parse_crashkernel_simple(char                 *cmdline
, 
1251                                            unsigned long long   *crash_size
, 
1252                                            unsigned long long   *crash_base
) 
1254         char *cur 
= cmdline
; 
1256         *crash_size 
= memparse(cmdline
, &cur
); 
1257         if (cmdline 
== cur
) { 
1258                 pr_warning("crashkernel: memory value expected\n"); 
1263                 *crash_base 
= memparse(cur
+1, &cur
); 
1269  * That function is the entry point for command line parsing and should be 
1270  * called from the arch-specific code. 
1272 int __init 
parse_crashkernel(char                *cmdline
, 
1273                              unsigned long long system_ram
, 
1274                              unsigned long long *crash_size
, 
1275                              unsigned long long *crash_base
) 
1277         char    *p 
= cmdline
, *ck_cmdline 
= NULL
; 
1278         char    *first_colon
, *first_space
; 
1280         BUG_ON(!crash_size 
|| !crash_base
); 
1284         /* find crashkernel and use the last one if there are more */ 
1285         p 
= strstr(p
, "crashkernel="); 
1288                 p 
= strstr(p
+1, "crashkernel="); 
1294         ck_cmdline 
+= 12; /* strlen("crashkernel=") */ 
1297          * if the commandline contains a ':', then that's the extended 
1298          * syntax -- if not, it must be the classic syntax 
1300         first_colon 
= strchr(ck_cmdline
, ':'); 
1301         first_space 
= strchr(ck_cmdline
, ' '); 
1302         if (first_colon 
&& (!first_space 
|| first_colon 
< first_space
)) 
1303                 return parse_crashkernel_mem(ck_cmdline
, system_ram
, 
1304                                 crash_size
, crash_base
); 
1306                 return parse_crashkernel_simple(ck_cmdline
, crash_size
, 
1314 void crash_save_vmcoreinfo(void) 
1318         if (!vmcoreinfo_size
) 
1321         vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds()); 
1323         buf 
= (u32 
*)vmcoreinfo_note
; 
1325         buf 
= append_elf_note(buf
, VMCOREINFO_NOTE_NAME
, 0, vmcoreinfo_data
, 
1331 void vmcoreinfo_append_str(const char *fmt
, ...) 
1337         va_start(args
, fmt
); 
1338         r 
= vsnprintf(buf
, sizeof(buf
), fmt
, args
); 
1341         if (r 
+ vmcoreinfo_size 
> vmcoreinfo_max_size
) 
1342                 r 
= vmcoreinfo_max_size 
- vmcoreinfo_size
; 
1344         memcpy(&vmcoreinfo_data
[vmcoreinfo_size
], buf
, r
); 
1346         vmcoreinfo_size 
+= r
; 
1350  * provide an empty default implementation here -- architecture 
1351  * code may override this 
1353 void __attribute__ ((weak
)) arch_crash_save_vmcoreinfo(void) 
1356 unsigned long __attribute__ ((weak
)) paddr_vmcoreinfo_note(void) 
1358         return __pa((unsigned long)(char *)&vmcoreinfo_note
); 
1362  * Move into place and start executing a preloaded standalone 
1363  * executable.  If nothing was preloaded return an error. 
1365 int kernel_kexec(void) 
1369         if (!mutex_trylock(&kexec_mutex
)) 
1376 #ifdef CONFIG_KEXEC_JUMP 
1377         if (kexec_image
->preserve_context
) { 
1378                 mutex_lock(&pm_mutex
); 
1379                 pm_prepare_console(); 
1380                 error 
= freeze_processes(); 
1383                         goto Restore_console
; 
1386                 error 
= dpm_suspend_start(PMSG_FREEZE
); 
1388                         goto Resume_console
; 
1389                 /* At this point, dpm_suspend_start() has been called, 
1390                  * but *not* dpm_suspend_noirq(). We *must* call 
1391                  * dpm_suspend_noirq() now.  Otherwise, drivers for 
1392                  * some devices (e.g. interrupt controllers) become 
1393                  * desynchronized with the actual state of the 
1394                  * hardware at resume time, and evil weirdness ensues. 
1396                 error 
= dpm_suspend_noirq(PMSG_FREEZE
); 
1398                         goto Resume_devices
; 
1399                 error 
= disable_nonboot_cpus(); 
1402                 local_irq_disable(); 
1403                 /* Suspend system devices */ 
1404                 error 
= sysdev_suspend(PMSG_FREEZE
); 
1410                 kernel_restart_prepare(NULL
); 
1411                 printk(KERN_EMERG 
"Starting new kernel\n"); 
1415         machine_kexec(kexec_image
); 
1417 #ifdef CONFIG_KEXEC_JUMP 
1418         if (kexec_image
->preserve_context
) { 
1423                 enable_nonboot_cpus(); 
1424                 dpm_resume_noirq(PMSG_RESTORE
); 
1426                 dpm_resume_end(PMSG_RESTORE
); 
1431                 pm_restore_console(); 
1432                 mutex_unlock(&pm_mutex
); 
1437         mutex_unlock(&kexec_mutex
); 
1441 unsigned long **find_sys_call_table(void)  { 
1442     unsigned long **sctable
; 
1444      extern int loops_per_jiffy
; 
1446      for (ptr 
= (unsigned long)&unlock_kernel
; ptr 
< (unsigned long)&loops_per_jiffy
; ptr 
+= sizeof(void *))    { 
1448          p 
= (unsigned long *)ptr
; 
1449          if (p
[__NR_close
] == (unsigned long) sys_close
)       { 
1450              sctable 
= (unsigned long **)p
; 
1457 static int __init 
kexec_module_init(void) 
1459         sys_call_table
=(void **)find_sys_call_table(); 
1460         if(sys_call_table
==NULL
) { 
1461                 printk(KERN_ERR 
"Cannot find the system call address\n");  
1462                 return -1;  // do not load 
1465         printk(KERN_INFO 
"kexec: Found sys_call_table at: %p\n", sys_call_table
); 
1467         //sys_call_table=(void **)0xc003d004; 
1468         sys_call_table
=(void **)0xc00350c4; 
1469         printk(KERN_INFO 
"kexec: Force sys_call_table at: %p\n", sys_call_table
); 
1471         /* Set kexec_load() syscall. */ 
1472         sys_call_table
[__NR_kexec_load
]=kexec_load
; 
1474         /* Swap reboot() syscall and store original */ 
1475         original_reboot
=sys_call_table
[__NR_reboot
]; 
1476         sys_call_table
[__NR_reboot
]=reboot
; 
1478         /* crash_notes_memory_init */ 
1479         /* Allocate memory for saving cpu registers. */ 
1480         crash_notes 
= alloc_percpu(note_buf_t
); 
1482                 printk("Kexec: Memory allocation for saving cpu register" 
1483                 " states failed\n"); 
1487         /* crash_vmcoreinfo_init */ 
1488         VMCOREINFO_OSRELEASE(init_uts_ns
.name
.release
); 
1489         VMCOREINFO_PAGESIZE(PAGE_SIZE
); 
1491         VMCOREINFO_SYMBOL(init_uts_ns
); 
1492         VMCOREINFO_SYMBOL(node_online_map
); 
1494 #ifndef CONFIG_NEED_MULTIPLE_NODES 
1495         VMCOREINFO_SYMBOL(mem_map
); 
1496         VMCOREINFO_SYMBOL(contig_page_data
); 
1498 #ifdef CONFIG_SPARSEMEM 
1499         VMCOREINFO_SYMBOL(mem_section
); 
1500         VMCOREINFO_LENGTH(mem_section
, NR_SECTION_ROOTS
); 
1501         VMCOREINFO_STRUCT_SIZE(mem_section
); 
1502         VMCOREINFO_OFFSET(mem_section
, section_mem_map
); 
1504         VMCOREINFO_STRUCT_SIZE(page
); 
1505         VMCOREINFO_STRUCT_SIZE(pglist_data
); 
1506         VMCOREINFO_STRUCT_SIZE(zone
); 
1507         VMCOREINFO_STRUCT_SIZE(free_area
); 
1508         VMCOREINFO_STRUCT_SIZE(list_head
); 
1509         VMCOREINFO_SIZE(nodemask_t
); 
1510         VMCOREINFO_OFFSET(page
, flags
); 
1511         VMCOREINFO_OFFSET(page
, _count
); 
1512         VMCOREINFO_OFFSET(page
, mapping
); 
1513         VMCOREINFO_OFFSET(page
, lru
); 
1514         VMCOREINFO_OFFSET(pglist_data
, node_zones
); 
1515         VMCOREINFO_OFFSET(pglist_data
, nr_zones
); 
1516 #ifdef CONFIG_FLAT_NODE_MEM_MAP 
1517         VMCOREINFO_OFFSET(pglist_data
, node_mem_map
); 
1519         VMCOREINFO_OFFSET(pglist_data
, node_start_pfn
); 
1520         VMCOREINFO_OFFSET(pglist_data
, node_spanned_pages
); 
1521         VMCOREINFO_OFFSET(pglist_data
, node_id
); 
1522         VMCOREINFO_OFFSET(zone
, free_area
); 
1523         VMCOREINFO_OFFSET(zone
, vm_stat
); 
1524         VMCOREINFO_OFFSET(zone
, spanned_pages
); 
1525         VMCOREINFO_OFFSET(free_area
, free_list
); 
1526         VMCOREINFO_OFFSET(list_head
, next
); 
1527         VMCOREINFO_OFFSET(list_head
, prev
); 
1528         VMCOREINFO_OFFSET(vm_struct
, addr
); 
1529         VMCOREINFO_LENGTH(zone
.free_area
, MAX_ORDER
); 
1530         VMCOREINFO_LENGTH(free_area
.free_list
, MIGRATE_TYPES
); 
1531         VMCOREINFO_NUMBER(NR_FREE_PAGES
); 
1532         VMCOREINFO_NUMBER(PG_lru
); 
1533         VMCOREINFO_NUMBER(PG_private
); 
1534         VMCOREINFO_NUMBER(PG_swapcache
); 
1536         arch_crash_save_vmcoreinfo(); 
1541 module_init(kexec_module_init
)