]>
Commit | Line | Data |
---|---|---|
4e93cb00 MG |
1 | /* |
2 | * kexec.c - kexec system call | |
3 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> | |
4 | * This Edition is maintained by Matthew Veety (aliasxerog) <mveety@gmail.com> | |
5 | * | |
6 | * This source code is licensed under the GNU General Public License, | |
7 | * Version 2. See the file COPYING for more details. | |
8 | */ | |
9 | ||
10 | #include <linux/capability.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/file.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/kexec.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/highmem.h> | |
19 | #include <linux/syscalls.h> | |
20 | #include <linux/reboot.h> | |
21 | #include <linux/ioport.h> | |
22 | #include <linux/hardirq.h> | |
23 | #include <linux/elf.h> | |
24 | #include <linux/elfcore.h> | |
25 | #include <linux/utsrelease.h> | |
26 | #include <linux/utsname.h> | |
27 | #include <linux/numa.h> | |
28 | #include <linux/suspend.h> | |
29 | #include <linux/device.h> | |
30 | #include <linux/freezer.h> | |
31 | #include <linux/pm.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/console.h> | |
34 | #include <linux/vmalloc.h> | |
35 | ||
36 | #include <asm/page.h> | |
37 | #include <asm/uaccess.h> | |
38 | #include <asm/io.h> | |
39 | #include <asm/system.h> | |
40 | #include <asm/sections.h> | |
41 | #include <asm/unistd.h> | |
42 | ||
43 | MODULE_LICENSE("GPL"); | |
44 | ||
45 | /* Syscall table */ | |
46 | void **sys_call_table; | |
47 | ||
48 | /* original and new reboot syscall */ | |
49 | asmlinkage long (*original_reboot)(int magic1, int magic2, unsigned int cmd, void __user *arg); | |
50 | extern asmlinkage long reboot(int magic1, int magic2, unsigned int cmd, void __user *arg); | |
51 | ||
52 | /* Per cpu memory for storing cpu states in case of system crash. */ | |
53 | note_buf_t* crash_notes; | |
54 | ||
55 | /* vmcoreinfo stuff */ | |
56 | unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; | |
57 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | |
58 | size_t vmcoreinfo_size; | |
59 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | |
60 | ||
61 | /* Location of the reserved area for the crash kernel */ | |
62 | struct resource crashk_res = { | |
63 | .name = "Crash kernel", | |
64 | .start = 0, | |
65 | .end = 0, | |
66 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
67 | }; | |
68 | ||
69 | int kexec_should_crash(struct task_struct *p) | |
70 | { | |
71 | if (in_interrupt() || !p->pid || is_global_init(p)) | |
72 | return 1; | |
73 | return 0; | |
74 | } | |
75 | ||
76 | /* | |
77 | * When kexec transitions to the new kernel there is a one-to-one | |
78 | * mapping between physical and virtual addresses. On processors | |
79 | * where you can disable the MMU this is trivial, and easy. For | |
80 | * others it is still a simple predictable page table to setup. | |
81 | * | |
82 | * In that environment kexec copies the new kernel to its final | |
83 | * resting place. This means I can only support memory whose | |
84 | * physical address can fit in an unsigned long. In particular | |
85 | * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. | |
86 | * If the assembly stub has more restrictive requirements | |
87 | * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be | |
88 | * defined more restrictively in <asm/kexec.h>. | |
89 | * | |
90 | * The code for the transition from the current kernel to the | |
91 | * the new kernel is placed in the control_code_buffer, whose size | |
92 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single | |
93 | * page of memory is necessary, but some architectures require more. | |
94 | * Because this memory must be identity mapped in the transition from | |
95 | * virtual to physical addresses it must live in the range | |
96 | * 0 - TASK_SIZE, as only the user space mappings are arbitrarily | |
97 | * modifiable. | |
98 | * | |
99 | * The assembly stub in the control code buffer is passed a linked list | |
100 | * of descriptor pages detailing the source pages of the new kernel, | |
101 | * and the destination addresses of those source pages. As this data | |
102 | * structure is not used in the context of the current OS, it must | |
103 | * be self-contained. | |
104 | * | |
105 | * The code has been made to work with highmem pages and will use a | |
106 | * destination page in its final resting place (if it happens | |
107 | * to allocate it). The end product of this is that most of the | |
108 | * physical address space, and most of RAM can be used. | |
109 | * | |
110 | * Future directions include: | |
111 | * - allocating a page table with the control code buffer identity | |
112 | * mapped, to simplify machine_kexec and make kexec_on_panic more | |
113 | * reliable. | |
114 | */ | |
115 | ||
116 | /* | |
117 | * KIMAGE_NO_DEST is an impossible destination address..., for | |
118 | * allocating pages whose destination address we do not care about. | |
119 | */ | |
120 | #define KIMAGE_NO_DEST (-1UL) | |
121 | ||
122 | static int kimage_is_destination_range(struct kimage *image, | |
123 | unsigned long start, unsigned long end); | |
124 | static struct page *kimage_alloc_page(struct kimage *image, | |
125 | gfp_t gfp_mask, | |
126 | unsigned long dest); | |
127 | ||
128 | static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |
129 | unsigned long nr_segments, | |
130 | struct kexec_segment __user *segments) | |
131 | { | |
132 | size_t segment_bytes; | |
133 | struct kimage *image; | |
134 | unsigned long i; | |
135 | int result; | |
136 | ||
137 | /* Allocate a controlling structure */ | |
138 | result = -ENOMEM; | |
139 | image = kzalloc(sizeof(*image), GFP_KERNEL); | |
140 | if (!image) | |
141 | goto out; | |
142 | ||
143 | image->head = 0; | |
144 | image->entry = &image->head; | |
145 | image->last_entry = &image->head; | |
146 | image->control_page = ~0; /* By default this does not apply */ | |
147 | image->start = entry; | |
148 | image->type = KEXEC_TYPE_DEFAULT; | |
149 | ||
150 | /* Initialize the list of control pages */ | |
151 | INIT_LIST_HEAD(&image->control_pages); | |
152 | ||
153 | /* Initialize the list of destination pages */ | |
154 | INIT_LIST_HEAD(&image->dest_pages); | |
155 | ||
634c0524 | 156 | /* Initialize the list of unusable pages */ |
4e93cb00 MG |
157 | INIT_LIST_HEAD(&image->unuseable_pages); |
158 | ||
159 | /* Read in the segments */ | |
160 | image->nr_segments = nr_segments; | |
161 | segment_bytes = nr_segments * sizeof(*segments); | |
162 | result = copy_from_user(image->segment, segments, segment_bytes); | |
634c0524 MG |
163 | if (result) { |
164 | result = -EFAULT; | |
4e93cb00 | 165 | goto out; |
634c0524 | 166 | } |
4e93cb00 MG |
167 | |
168 | /* | |
169 | * Verify we have good destination addresses. The caller is | |
170 | * responsible for making certain we don't attempt to load | |
171 | * the new image into invalid or reserved areas of RAM. This | |
172 | * just verifies it is an address we can use. | |
173 | * | |
174 | * Since the kernel does everything in page size chunks ensure | |
634c0524 | 175 | * the destination addresses are page aligned. Too many |
4e93cb00 MG |
176 | * special cases crop of when we don't do this. The most |
177 | * insidious is getting overlapping destination addresses | |
178 | * simply because addresses are changed to page size | |
179 | * granularity. | |
180 | */ | |
181 | result = -EADDRNOTAVAIL; | |
182 | for (i = 0; i < nr_segments; i++) { | |
183 | unsigned long mstart, mend; | |
184 | ||
185 | mstart = image->segment[i].mem; | |
186 | mend = mstart + image->segment[i].memsz; | |
187 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | |
188 | goto out; | |
189 | if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) | |
190 | goto out; | |
191 | } | |
192 | ||
193 | /* Verify our destination addresses do not overlap. | |
194 | * If we alloed overlapping destination addresses | |
195 | * through very weird things can happen with no | |
196 | * easy explanation as one segment stops on another. | |
197 | */ | |
198 | result = -EINVAL; | |
199 | for (i = 0; i < nr_segments; i++) { | |
200 | unsigned long mstart, mend; | |
201 | unsigned long j; | |
202 | ||
203 | mstart = image->segment[i].mem; | |
204 | mend = mstart + image->segment[i].memsz; | |
205 | for (j = 0; j < i; j++) { | |
206 | unsigned long pstart, pend; | |
207 | pstart = image->segment[j].mem; | |
208 | pend = pstart + image->segment[j].memsz; | |
209 | /* Do the segments overlap ? */ | |
210 | if ((mend > pstart) && (mstart < pend)) | |
211 | goto out; | |
212 | } | |
213 | } | |
214 | ||
215 | /* Ensure our buffer sizes are strictly less than | |
216 | * our memory sizes. This should always be the case, | |
217 | * and it is easier to check up front than to be surprised | |
218 | * later on. | |
219 | */ | |
220 | result = -EINVAL; | |
221 | for (i = 0; i < nr_segments; i++) { | |
222 | if (image->segment[i].bufsz > image->segment[i].memsz) | |
223 | goto out; | |
224 | } | |
225 | ||
226 | result = 0; | |
227 | out: | |
228 | if (result == 0) | |
229 | *rimage = image; | |
230 | else | |
231 | kfree(image); | |
232 | ||
233 | return result; | |
234 | ||
235 | } | |
236 | ||
237 | static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |
238 | unsigned long nr_segments, | |
239 | struct kexec_segment __user *segments) | |
240 | { | |
241 | int result; | |
242 | struct kimage *image; | |
243 | ||
244 | /* Allocate and initialize a controlling structure */ | |
245 | image = NULL; | |
246 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | |
247 | if (result) | |
248 | goto out; | |
249 | ||
250 | *rimage = image; | |
251 | ||
252 | /* | |
253 | * Find a location for the control code buffer, and add it | |
254 | * the vector of segments so that it's pages will also be | |
255 | * counted as destination pages. | |
256 | */ | |
257 | result = -ENOMEM; | |
258 | image->control_code_page = kimage_alloc_control_pages(image, | |
259 | get_order(KEXEC_CONTROL_PAGE_SIZE)); | |
260 | if (!image->control_code_page) { | |
261 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | |
262 | goto out; | |
263 | } | |
264 | ||
265 | image->swap_page = kimage_alloc_control_pages(image, 0); | |
266 | if (!image->swap_page) { | |
267 | printk(KERN_ERR "Could not allocate swap buffer\n"); | |
268 | goto out; | |
269 | } | |
270 | ||
271 | result = 0; | |
272 | out: | |
273 | if (result == 0) | |
274 | *rimage = image; | |
275 | else | |
276 | kfree(image); | |
277 | ||
278 | return result; | |
279 | } | |
280 | ||
281 | static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |
282 | unsigned long nr_segments, | |
283 | struct kexec_segment __user *segments) | |
284 | { | |
285 | int result; | |
286 | struct kimage *image; | |
287 | unsigned long i; | |
288 | ||
289 | image = NULL; | |
290 | /* Verify we have a valid entry point */ | |
291 | if ((entry < crashk_res.start) || (entry > crashk_res.end)) { | |
292 | result = -EADDRNOTAVAIL; | |
293 | goto out; | |
294 | } | |
295 | ||
296 | /* Allocate and initialize a controlling structure */ | |
297 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | |
298 | if (result) | |
299 | goto out; | |
300 | ||
301 | /* Enable the special crash kernel control page | |
302 | * allocation policy. | |
303 | */ | |
304 | image->control_page = crashk_res.start; | |
305 | image->type = KEXEC_TYPE_CRASH; | |
306 | ||
307 | /* | |
308 | * Verify we have good destination addresses. Normally | |
309 | * the caller is responsible for making certain we don't | |
310 | * attempt to load the new image into invalid or reserved | |
311 | * areas of RAM. But crash kernels are preloaded into a | |
312 | * reserved area of ram. We must ensure the addresses | |
313 | * are in the reserved area otherwise preloading the | |
314 | * kernel could corrupt things. | |
315 | */ | |
316 | result = -EADDRNOTAVAIL; | |
317 | for (i = 0; i < nr_segments; i++) { | |
318 | unsigned long mstart, mend; | |
319 | ||
320 | mstart = image->segment[i].mem; | |
321 | mend = mstart + image->segment[i].memsz - 1; | |
322 | /* Ensure we are within the crash kernel limits */ | |
323 | if ((mstart < crashk_res.start) || (mend > crashk_res.end)) | |
324 | goto out; | |
325 | } | |
326 | ||
327 | /* | |
328 | * Find a location for the control code buffer, and add | |
329 | * the vector of segments so that it's pages will also be | |
330 | * counted as destination pages. | |
331 | */ | |
332 | result = -ENOMEM; | |
333 | image->control_code_page = kimage_alloc_control_pages(image, | |
334 | get_order(KEXEC_CONTROL_PAGE_SIZE)); | |
335 | if (!image->control_code_page) { | |
336 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | |
337 | goto out; | |
338 | } | |
339 | ||
340 | result = 0; | |
341 | out: | |
342 | if (result == 0) | |
343 | *rimage = image; | |
344 | else | |
345 | kfree(image); | |
346 | ||
347 | return result; | |
348 | } | |
349 | ||
350 | static int kimage_is_destination_range(struct kimage *image, | |
351 | unsigned long start, | |
352 | unsigned long end) | |
353 | { | |
354 | unsigned long i; | |
355 | ||
356 | for (i = 0; i < image->nr_segments; i++) { | |
357 | unsigned long mstart, mend; | |
358 | ||
359 | mstart = image->segment[i].mem; | |
360 | mend = mstart + image->segment[i].memsz; | |
361 | if ((end > mstart) && (start < mend)) | |
362 | return 1; | |
363 | } | |
364 | ||
365 | return 0; | |
366 | } | |
367 | ||
368 | static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) | |
369 | { | |
370 | struct page *pages; | |
371 | ||
372 | pages = alloc_pages(gfp_mask, order); | |
373 | if (pages) { | |
374 | unsigned int count, i; | |
375 | pages->mapping = NULL; | |
376 | set_page_private(pages, order); | |
377 | count = 1 << order; | |
378 | for (i = 0; i < count; i++) | |
379 | SetPageReserved(pages + i); | |
380 | } | |
381 | ||
382 | return pages; | |
383 | } | |
384 | ||
385 | static void kimage_free_pages(struct page *page) | |
386 | { | |
387 | unsigned int order, count, i; | |
388 | ||
389 | order = page_private(page); | |
390 | count = 1 << order; | |
391 | for (i = 0; i < count; i++) | |
392 | ClearPageReserved(page + i); | |
393 | __free_pages(page, order); | |
394 | } | |
395 | ||
396 | static void kimage_free_page_list(struct list_head *list) | |
397 | { | |
398 | struct list_head *pos, *next; | |
399 | ||
400 | list_for_each_safe(pos, next, list) { | |
401 | struct page *page; | |
402 | ||
403 | page = list_entry(pos, struct page, lru); | |
404 | list_del(&page->lru); | |
405 | kimage_free_pages(page); | |
406 | } | |
407 | } | |
408 | ||
409 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |
410 | unsigned int order) | |
411 | { | |
412 | /* Control pages are special, they are the intermediaries | |
413 | * that are needed while we copy the rest of the pages | |
414 | * to their final resting place. As such they must | |
415 | * not conflict with either the destination addresses | |
416 | * or memory the kernel is already using. | |
417 | * | |
418 | * The only case where we really need more than one of | |
419 | * these are for architectures where we cannot disable | |
420 | * the MMU and must instead generate an identity mapped | |
421 | * page table for all of the memory. | |
422 | * | |
423 | * At worst this runs in O(N) of the image size. | |
424 | */ | |
425 | struct list_head extra_pages; | |
426 | struct page *pages; | |
427 | unsigned int count; | |
428 | ||
429 | count = 1 << order; | |
430 | INIT_LIST_HEAD(&extra_pages); | |
431 | ||
432 | /* Loop while I can allocate a page and the page allocated | |
433 | * is a destination page. | |
434 | */ | |
435 | do { | |
436 | unsigned long pfn, epfn, addr, eaddr; | |
437 | ||
438 | pages = kimage_alloc_pages(GFP_KERNEL, order); | |
439 | if (!pages) | |
440 | break; | |
441 | pfn = page_to_pfn(pages); | |
442 | epfn = pfn + count; | |
443 | addr = pfn << PAGE_SHIFT; | |
444 | eaddr = epfn << PAGE_SHIFT; | |
445 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | |
446 | kimage_is_destination_range(image, addr, eaddr)) { | |
447 | list_add(&pages->lru, &extra_pages); | |
448 | pages = NULL; | |
449 | } | |
450 | } while (!pages); | |
451 | ||
452 | if (pages) { | |
453 | /* Remember the allocated page... */ | |
454 | list_add(&pages->lru, &image->control_pages); | |
455 | ||
456 | /* Because the page is already in it's destination | |
457 | * location we will never allocate another page at | |
458 | * that address. Therefore kimage_alloc_pages | |
459 | * will not return it (again) and we don't need | |
460 | * to give it an entry in image->segment[]. | |
461 | */ | |
462 | } | |
463 | /* Deal with the destination pages I have inadvertently allocated. | |
464 | * | |
465 | * Ideally I would convert multi-page allocations into single | |
634c0524 | 466 | * page allocations, and add everything to image->dest_pages. |
4e93cb00 MG |
467 | * |
468 | * For now it is simpler to just free the pages. | |
469 | */ | |
470 | kimage_free_page_list(&extra_pages); | |
471 | ||
472 | return pages; | |
473 | } | |
474 | ||
475 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, | |
476 | unsigned int order) | |
477 | { | |
478 | /* Control pages are special, they are the intermediaries | |
479 | * that are needed while we copy the rest of the pages | |
480 | * to their final resting place. As such they must | |
481 | * not conflict with either the destination addresses | |
482 | * or memory the kernel is already using. | |
483 | * | |
484 | * Control pages are also the only pags we must allocate | |
485 | * when loading a crash kernel. All of the other pages | |
486 | * are specified by the segments and we just memcpy | |
487 | * into them directly. | |
488 | * | |
489 | * The only case where we really need more than one of | |
490 | * these are for architectures where we cannot disable | |
491 | * the MMU and must instead generate an identity mapped | |
492 | * page table for all of the memory. | |
493 | * | |
494 | * Given the low demand this implements a very simple | |
495 | * allocator that finds the first hole of the appropriate | |
496 | * size in the reserved memory region, and allocates all | |
497 | * of the memory up to and including the hole. | |
498 | */ | |
499 | unsigned long hole_start, hole_end, size; | |
500 | struct page *pages; | |
501 | ||
502 | pages = NULL; | |
503 | size = (1 << order) << PAGE_SHIFT; | |
504 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | |
505 | hole_end = hole_start + size - 1; | |
506 | while (hole_end <= crashk_res.end) { | |
507 | unsigned long i; | |
508 | ||
509 | if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) | |
510 | break; | |
511 | if (hole_end > crashk_res.end) | |
512 | break; | |
513 | /* See if I overlap any of the segments */ | |
514 | for (i = 0; i < image->nr_segments; i++) { | |
515 | unsigned long mstart, mend; | |
516 | ||
517 | mstart = image->segment[i].mem; | |
518 | mend = mstart + image->segment[i].memsz - 1; | |
519 | if ((hole_end >= mstart) && (hole_start <= mend)) { | |
520 | /* Advance the hole to the end of the segment */ | |
521 | hole_start = (mend + (size - 1)) & ~(size - 1); | |
522 | hole_end = hole_start + size - 1; | |
523 | break; | |
524 | } | |
525 | } | |
526 | /* If I don't overlap any segments I have found my hole! */ | |
527 | if (i == image->nr_segments) { | |
528 | pages = pfn_to_page(hole_start >> PAGE_SHIFT); | |
529 | break; | |
530 | } | |
531 | } | |
532 | if (pages) | |
533 | image->control_page = hole_end; | |
534 | ||
535 | return pages; | |
536 | } | |
537 | ||
538 | ||
539 | struct page *kimage_alloc_control_pages(struct kimage *image, | |
540 | unsigned int order) | |
541 | { | |
542 | struct page *pages = NULL; | |
543 | ||
544 | switch (image->type) { | |
545 | case KEXEC_TYPE_DEFAULT: | |
546 | pages = kimage_alloc_normal_control_pages(image, order); | |
547 | break; | |
548 | case KEXEC_TYPE_CRASH: | |
549 | pages = kimage_alloc_crash_control_pages(image, order); | |
550 | break; | |
551 | } | |
552 | ||
553 | return pages; | |
554 | } | |
555 | ||
556 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | |
557 | { | |
558 | if (*image->entry != 0) | |
559 | image->entry++; | |
560 | ||
561 | if (image->entry == image->last_entry) { | |
562 | kimage_entry_t *ind_page; | |
563 | struct page *page; | |
564 | ||
565 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); | |
566 | if (!page) | |
567 | return -ENOMEM; | |
568 | ||
569 | ind_page = page_address(page); | |
570 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | |
571 | image->entry = ind_page; | |
572 | image->last_entry = ind_page + | |
573 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | |
574 | } | |
575 | *image->entry = entry; | |
576 | image->entry++; | |
577 | *image->entry = 0; | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
582 | static int kimage_set_destination(struct kimage *image, | |
583 | unsigned long destination) | |
584 | { | |
585 | int result; | |
586 | ||
587 | destination &= PAGE_MASK; | |
588 | result = kimage_add_entry(image, destination | IND_DESTINATION); | |
589 | if (result == 0) | |
590 | image->destination = destination; | |
591 | ||
592 | return result; | |
593 | } | |
594 | ||
595 | ||
596 | static int kimage_add_page(struct kimage *image, unsigned long page) | |
597 | { | |
598 | int result; | |
599 | ||
600 | page &= PAGE_MASK; | |
601 | result = kimage_add_entry(image, page | IND_SOURCE); | |
602 | if (result == 0) | |
603 | image->destination += PAGE_SIZE; | |
604 | ||
605 | return result; | |
606 | } | |
607 | ||
608 | ||
609 | static void kimage_free_extra_pages(struct kimage *image) | |
610 | { | |
611 | /* Walk through and free any extra destination pages I may have */ | |
612 | kimage_free_page_list(&image->dest_pages); | |
613 | ||
634c0524 | 614 | /* Walk through and free any unusable pages I have cached */ |
4e93cb00 MG |
615 | kimage_free_page_list(&image->unuseable_pages); |
616 | ||
617 | } | |
618 | static void kimage_terminate(struct kimage *image) | |
619 | { | |
620 | if (*image->entry != 0) | |
621 | image->entry++; | |
622 | ||
623 | *image->entry = IND_DONE; | |
624 | } | |
625 | ||
626 | #define for_each_kimage_entry(image, ptr, entry) \ | |
627 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ | |
628 | ptr = (entry & IND_INDIRECTION)? \ | |
629 | phys_to_virt((entry & PAGE_MASK)): ptr +1) | |
630 | ||
631 | static void kimage_free_entry(kimage_entry_t entry) | |
632 | { | |
633 | struct page *page; | |
634 | ||
635 | page = pfn_to_page(entry >> PAGE_SHIFT); | |
636 | kimage_free_pages(page); | |
637 | } | |
638 | ||
639 | static void kimage_free(struct kimage *image) | |
640 | { | |
641 | kimage_entry_t *ptr, entry; | |
642 | kimage_entry_t ind = 0; | |
643 | ||
644 | if (!image) | |
645 | return; | |
646 | ||
647 | kimage_free_extra_pages(image); | |
648 | for_each_kimage_entry(image, ptr, entry) { | |
649 | if (entry & IND_INDIRECTION) { | |
650 | /* Free the previous indirection page */ | |
651 | if (ind & IND_INDIRECTION) | |
652 | kimage_free_entry(ind); | |
653 | /* Save this indirection page until we are | |
654 | * done with it. | |
655 | */ | |
656 | ind = entry; | |
657 | } | |
658 | else if (entry & IND_SOURCE) | |
659 | kimage_free_entry(entry); | |
660 | } | |
661 | /* Free the final indirection page */ | |
662 | if (ind & IND_INDIRECTION) | |
663 | kimage_free_entry(ind); | |
664 | ||
665 | /* Handle any machine specific cleanup */ | |
666 | machine_kexec_cleanup(image); | |
667 | ||
668 | /* Free the kexec control pages... */ | |
669 | kimage_free_page_list(&image->control_pages); | |
670 | kfree(image); | |
671 | } | |
672 | ||
673 | static kimage_entry_t *kimage_dst_used(struct kimage *image, | |
674 | unsigned long page) | |
675 | { | |
676 | kimage_entry_t *ptr, entry; | |
677 | unsigned long destination = 0; | |
678 | ||
679 | for_each_kimage_entry(image, ptr, entry) { | |
680 | if (entry & IND_DESTINATION) | |
681 | destination = entry & PAGE_MASK; | |
682 | else if (entry & IND_SOURCE) { | |
683 | if (page == destination) | |
684 | return ptr; | |
685 | destination += PAGE_SIZE; | |
686 | } | |
687 | } | |
688 | ||
689 | return NULL; | |
690 | } | |
691 | ||
692 | static struct page *kimage_alloc_page(struct kimage *image, | |
693 | gfp_t gfp_mask, | |
694 | unsigned long destination) | |
695 | { | |
696 | /* | |
697 | * Here we implement safeguards to ensure that a source page | |
698 | * is not copied to its destination page before the data on | |
699 | * the destination page is no longer useful. | |
700 | * | |
701 | * To do this we maintain the invariant that a source page is | |
702 | * either its own destination page, or it is not a | |
703 | * destination page at all. | |
704 | * | |
705 | * That is slightly stronger than required, but the proof | |
706 | * that no problems will not occur is trivial, and the | |
707 | * implementation is simply to verify. | |
708 | * | |
709 | * When allocating all pages normally this algorithm will run | |
710 | * in O(N) time, but in the worst case it will run in O(N^2) | |
711 | * time. If the runtime is a problem the data structures can | |
712 | * be fixed. | |
713 | */ | |
714 | struct page *page; | |
715 | unsigned long addr; | |
716 | ||
717 | /* | |
718 | * Walk through the list of destination pages, and see if I | |
719 | * have a match. | |
720 | */ | |
721 | list_for_each_entry(page, &image->dest_pages, lru) { | |
722 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
723 | if (addr == destination) { | |
724 | list_del(&page->lru); | |
725 | return page; | |
726 | } | |
727 | } | |
728 | page = NULL; | |
729 | while (1) { | |
730 | kimage_entry_t *old; | |
731 | ||
732 | /* Allocate a page, if we run out of memory give up */ | |
733 | page = kimage_alloc_pages(gfp_mask, 0); | |
734 | if (!page) | |
735 | return NULL; | |
736 | /* If the page cannot be used file it away */ | |
737 | if (page_to_pfn(page) > | |
738 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | |
739 | list_add(&page->lru, &image->unuseable_pages); | |
740 | continue; | |
741 | } | |
742 | addr = page_to_pfn(page) << PAGE_SHIFT; | |
743 | ||
744 | /* If it is the destination page we want use it */ | |
745 | if (addr == destination) | |
746 | break; | |
747 | ||
748 | /* If the page is not a destination page use it */ | |
749 | if (!kimage_is_destination_range(image, addr, | |
750 | addr + PAGE_SIZE)) | |
751 | break; | |
752 | ||
753 | /* | |
754 | * I know that the page is someones destination page. | |
755 | * See if there is already a source page for this | |
756 | * destination page. And if so swap the source pages. | |
757 | */ | |
758 | old = kimage_dst_used(image, addr); | |
759 | if (old) { | |
760 | /* If so move it */ | |
761 | unsigned long old_addr; | |
762 | struct page *old_page; | |
763 | ||
764 | old_addr = *old & PAGE_MASK; | |
765 | old_page = pfn_to_page(old_addr >> PAGE_SHIFT); | |
766 | copy_highpage(page, old_page); | |
767 | *old = addr | (*old & ~PAGE_MASK); | |
768 | ||
769 | /* The old page I have found cannot be a | |
770 | * destination page, so return it if it's | |
771 | * gfp_flags honor the ones passed in. | |
772 | */ | |
773 | if (!(gfp_mask & __GFP_HIGHMEM) && | |
774 | PageHighMem(old_page)) { | |
775 | kimage_free_pages(old_page); | |
776 | continue; | |
777 | } | |
778 | addr = old_addr; | |
779 | page = old_page; | |
780 | break; | |
781 | } | |
782 | else { | |
783 | /* Place the page on the destination list I | |
784 | * will use it later. | |
785 | */ | |
786 | list_add(&page->lru, &image->dest_pages); | |
787 | } | |
788 | } | |
789 | ||
790 | return page; | |
791 | } | |
792 | ||
793 | static int kimage_load_normal_segment(struct kimage *image, | |
794 | struct kexec_segment *segment) | |
795 | { | |
796 | unsigned long maddr; | |
797 | unsigned long ubytes, mbytes; | |
798 | int result; | |
799 | unsigned char __user *buf; | |
800 | ||
801 | result = 0; | |
802 | buf = segment->buf; | |
803 | ubytes = segment->bufsz; | |
804 | mbytes = segment->memsz; | |
805 | maddr = segment->mem; | |
806 | ||
807 | result = kimage_set_destination(image, maddr); | |
808 | if (result < 0) | |
809 | goto out; | |
810 | ||
811 | while (mbytes) { | |
812 | struct page *page; | |
813 | char *ptr; | |
814 | size_t uchunk, mchunk; | |
815 | ||
816 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); | |
817 | if (!page) { | |
818 | result = -ENOMEM; | |
819 | goto out; | |
820 | } | |
821 | result = kimage_add_page(image, page_to_pfn(page) | |
822 | << PAGE_SHIFT); | |
823 | if (result < 0) | |
824 | goto out; | |
825 | ||
826 | ptr = kmap(page); | |
827 | /* Start with a clear page */ | |
634c0524 | 828 | clear_page(ptr); |
4e93cb00 MG |
829 | ptr += maddr & ~PAGE_MASK; |
830 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | |
831 | if (mchunk > mbytes) | |
832 | mchunk = mbytes; | |
833 | ||
834 | uchunk = mchunk; | |
835 | if (uchunk > ubytes) | |
836 | uchunk = ubytes; | |
837 | ||
838 | result = copy_from_user(ptr, buf, uchunk); | |
839 | kunmap(page); | |
840 | if (result) { | |
634c0524 | 841 | result = -EFAULT; |
4e93cb00 MG |
842 | goto out; |
843 | } | |
844 | ubytes -= uchunk; | |
845 | maddr += mchunk; | |
846 | buf += mchunk; | |
847 | mbytes -= mchunk; | |
848 | } | |
849 | out: | |
850 | return result; | |
851 | } | |
852 | ||
853 | static int kimage_load_crash_segment(struct kimage *image, | |
854 | struct kexec_segment *segment) | |
855 | { | |
856 | /* For crash dumps kernels we simply copy the data from | |
857 | * user space to it's destination. | |
858 | * We do things a page at a time for the sake of kmap. | |
859 | */ | |
860 | unsigned long maddr; | |
861 | unsigned long ubytes, mbytes; | |
862 | int result; | |
863 | unsigned char __user *buf; | |
864 | ||
865 | result = 0; | |
866 | buf = segment->buf; | |
867 | ubytes = segment->bufsz; | |
868 | mbytes = segment->memsz; | |
869 | maddr = segment->mem; | |
870 | while (mbytes) { | |
871 | struct page *page; | |
872 | char *ptr; | |
873 | size_t uchunk, mchunk; | |
874 | ||
875 | page = pfn_to_page(maddr >> PAGE_SHIFT); | |
876 | if (!page) { | |
877 | result = -ENOMEM; | |
878 | goto out; | |
879 | } | |
880 | ptr = kmap(page); | |
881 | ptr += maddr & ~PAGE_MASK; | |
882 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | |
883 | if (mchunk > mbytes) | |
884 | mchunk = mbytes; | |
885 | ||
886 | uchunk = mchunk; | |
887 | if (uchunk > ubytes) { | |
888 | uchunk = ubytes; | |
889 | /* Zero the trailing part of the page */ | |
890 | memset(ptr + uchunk, 0, mchunk - uchunk); | |
891 | } | |
892 | result = copy_from_user(ptr, buf, uchunk); | |
893 | kexec_flush_icache_page(page); | |
894 | kunmap(page); | |
895 | if (result) { | |
634c0524 | 896 | result = -EFAULT; |
4e93cb00 MG |
897 | goto out; |
898 | } | |
899 | ubytes -= uchunk; | |
900 | maddr += mchunk; | |
901 | buf += mchunk; | |
902 | mbytes -= mchunk; | |
903 | } | |
904 | out: | |
905 | return result; | |
906 | } | |
907 | ||
908 | static int kimage_load_segment(struct kimage *image, | |
909 | struct kexec_segment *segment) | |
910 | { | |
911 | int result = -ENOMEM; | |
912 | ||
913 | switch (image->type) { | |
914 | case KEXEC_TYPE_DEFAULT: | |
915 | result = kimage_load_normal_segment(image, segment); | |
916 | break; | |
917 | case KEXEC_TYPE_CRASH: | |
918 | result = kimage_load_crash_segment(image, segment); | |
919 | break; | |
920 | } | |
921 | ||
922 | return result; | |
923 | } | |
924 | ||
925 | /* | |
926 | * Exec Kernel system call: for obvious reasons only root may call it. | |
927 | * | |
928 | * This call breaks up into three pieces. | |
929 | * - A generic part which loads the new kernel from the current | |
930 | * address space, and very carefully places the data in the | |
931 | * allocated pages. | |
932 | * | |
933 | * - A generic part that interacts with the kernel and tells all of | |
934 | * the devices to shut down. Preventing on-going dmas, and placing | |
935 | * the devices in a consistent state so a later kernel can | |
936 | * reinitialize them. | |
937 | * | |
938 | * - A machine specific part that includes the syscall number | |
939 | * and the copies the image to it's final destination. And | |
940 | * jumps into the image at entry. | |
941 | * | |
942 | * kexec does not sync, or unmount filesystems so if you need | |
943 | * that to happen you need to do that yourself. | |
944 | */ | |
945 | struct kimage *kexec_image; | |
946 | struct kimage *kexec_crash_image; | |
947 | ||
948 | static DEFINE_MUTEX(kexec_mutex); | |
949 | ||
950 | asmlinkage long kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, unsigned long flags) | |
951 | { | |
952 | struct kimage **dest_image, *image; | |
953 | int result; | |
954 | ||
955 | /* We only trust the superuser with rebooting the system. */ | |
956 | if (!capable(CAP_SYS_BOOT)) | |
957 | return -EPERM; | |
958 | ||
959 | /* | |
960 | * Verify we have a legal set of flags | |
961 | * This leaves us room for future extensions. | |
962 | */ | |
963 | if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) | |
964 | return -EINVAL; | |
965 | ||
966 | /* Verify we are on the appropriate architecture */ | |
967 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | |
968 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | |
969 | return -EINVAL; | |
970 | ||
971 | /* Put an artificial cap on the number | |
972 | * of segments passed to kexec_load. | |
973 | */ | |
974 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
975 | return -EINVAL; | |
976 | ||
977 | image = NULL; | |
978 | result = 0; | |
979 | ||
980 | /* Because we write directly to the reserved memory | |
981 | * region when loading crash kernels we need a mutex here to | |
982 | * prevent multiple crash kernels from attempting to load | |
983 | * simultaneously, and to prevent a crash kernel from loading | |
984 | * over the top of a in use crash kernel. | |
985 | * | |
986 | * KISS: always take the mutex. | |
987 | */ | |
988 | if (!mutex_trylock(&kexec_mutex)) | |
989 | return -EBUSY; | |
990 | ||
991 | dest_image = &kexec_image; | |
992 | if (flags & KEXEC_ON_CRASH) | |
993 | dest_image = &kexec_crash_image; | |
994 | if (nr_segments > 0) { | |
995 | unsigned long i; | |
996 | ||
997 | /* Loading another kernel to reboot into */ | |
998 | if ((flags & KEXEC_ON_CRASH) == 0) | |
999 | result = kimage_normal_alloc(&image, entry, | |
1000 | nr_segments, segments); | |
1001 | /* Loading another kernel to switch to if this one crashes */ | |
1002 | else if (flags & KEXEC_ON_CRASH) { | |
1003 | /* Free any current crash dump kernel before | |
1004 | * we corrupt it. | |
1005 | */ | |
1006 | kimage_free(xchg(&kexec_crash_image, NULL)); | |
1007 | result = kimage_crash_alloc(&image, entry, | |
1008 | nr_segments, segments); | |
1009 | } | |
1010 | if (result) | |
1011 | goto out; | |
1012 | ||
1013 | if (flags & KEXEC_PRESERVE_CONTEXT) | |
1014 | image->preserve_context = 1; | |
1015 | result = machine_kexec_prepare(image); | |
1016 | if (result) | |
1017 | goto out; | |
1018 | ||
1019 | for (i = 0; i < nr_segments; i++) { | |
1020 | result = kimage_load_segment(image, &image->segment[i]); | |
1021 | if (result) | |
1022 | goto out; | |
1023 | } | |
1024 | kimage_terminate(image); | |
1025 | } | |
1026 | /* Install the new kernel, and Uninstall the old */ | |
1027 | image = xchg(dest_image, image); | |
1028 | ||
1029 | out: | |
1030 | mutex_unlock(&kexec_mutex); | |
1031 | kimage_free(image); | |
1032 | ||
1033 | return result; | |
1034 | } | |
1035 | ||
1036 | #ifdef CONFIG_COMPAT | |
1037 | asmlinkage long compat_sys_kexec_load(unsigned long entry, | |
1038 | unsigned long nr_segments, | |
1039 | struct compat_kexec_segment __user *segments, | |
1040 | unsigned long flags) | |
1041 | { | |
1042 | struct compat_kexec_segment in; | |
1043 | struct kexec_segment out, __user *ksegments; | |
1044 | unsigned long i, result; | |
1045 | ||
1046 | /* Don't allow clients that don't understand the native | |
1047 | * architecture to do anything. | |
1048 | */ | |
1049 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) | |
1050 | return -EINVAL; | |
1051 | ||
1052 | if (nr_segments > KEXEC_SEGMENT_MAX) | |
1053 | return -EINVAL; | |
1054 | ||
1055 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); | |
1056 | for (i=0; i < nr_segments; i++) { | |
1057 | result = copy_from_user(&in, &segments[i], sizeof(in)); | |
1058 | if (result) | |
1059 | return -EFAULT; | |
1060 | ||
1061 | out.buf = compat_ptr(in.buf); | |
1062 | out.bufsz = in.bufsz; | |
1063 | out.mem = in.mem; | |
1064 | out.memsz = in.memsz; | |
1065 | ||
1066 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | |
1067 | if (result) | |
1068 | return -EFAULT; | |
1069 | } | |
1070 | ||
1071 | return sys_kexec_load(entry, nr_segments, ksegments, flags); | |
1072 | } | |
1073 | #endif | |
1074 | ||
1075 | void crash_kexec(struct pt_regs *regs) | |
1076 | { | |
1077 | /* Take the kexec_mutex here to prevent sys_kexec_load | |
1078 | * running on one cpu from replacing the crash kernel | |
1079 | * we are using after a panic on a different cpu. | |
1080 | * | |
1081 | * If the crash kernel was not located in a fixed area | |
1082 | * of memory the xchg(&kexec_crash_image) would be | |
1083 | * sufficient. But since I reuse the memory... | |
1084 | */ | |
1085 | if (mutex_trylock(&kexec_mutex)) { | |
1086 | if (kexec_crash_image) { | |
1087 | struct pt_regs fixed_regs; | |
1088 | crash_setup_regs(&fixed_regs, regs); | |
1089 | crash_save_vmcoreinfo(); | |
1090 | machine_crash_shutdown(&fixed_regs); | |
1091 | machine_kexec(kexec_crash_image); | |
1092 | } | |
1093 | mutex_unlock(&kexec_mutex); | |
1094 | } | |
1095 | } | |
1096 | ||
1097 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, | |
1098 | size_t data_len) | |
1099 | { | |
1100 | struct elf_note note; | |
1101 | ||
1102 | note.n_namesz = strlen(name) + 1; | |
1103 | note.n_descsz = data_len; | |
1104 | note.n_type = type; | |
1105 | memcpy(buf, ¬e, sizeof(note)); | |
1106 | buf += (sizeof(note) + 3)/4; | |
1107 | memcpy(buf, name, note.n_namesz); | |
1108 | buf += (note.n_namesz + 3)/4; | |
1109 | memcpy(buf, data, note.n_descsz); | |
1110 | buf += (note.n_descsz + 3)/4; | |
1111 | ||
1112 | return buf; | |
1113 | } | |
1114 | ||
1115 | static void final_note(u32 *buf) | |
1116 | { | |
1117 | struct elf_note note; | |
1118 | ||
1119 | note.n_namesz = 0; | |
1120 | note.n_descsz = 0; | |
1121 | note.n_type = 0; | |
1122 | memcpy(buf, ¬e, sizeof(note)); | |
1123 | } | |
1124 | ||
1125 | void crash_save_cpu(struct pt_regs *regs, int cpu) | |
1126 | { | |
1127 | struct elf_prstatus prstatus; | |
1128 | u32 *buf; | |
1129 | ||
1130 | if ((cpu < 0) || (cpu >= nr_cpu_ids)) | |
1131 | return; | |
1132 | ||
1133 | /* Using ELF notes here is opportunistic. | |
1134 | * I need a well defined structure format | |
1135 | * for the data I pass, and I need tags | |
1136 | * on the data to indicate what information I have | |
1137 | * squirrelled away. ELF notes happen to provide | |
1138 | * all of that, so there is no need to invent something new. | |
1139 | */ | |
1140 | buf = (u32*)per_cpu_ptr(crash_notes, cpu); | |
1141 | if (!buf) | |
1142 | return; | |
1143 | memset(&prstatus, 0, sizeof(prstatus)); | |
1144 | prstatus.pr_pid = current->pid; | |
019f9a36 | 1145 | elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); |
4e93cb00 MG |
1146 | buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, |
1147 | &prstatus, sizeof(prstatus)); | |
1148 | final_note(buf); | |
1149 | } | |
1150 | ||
1151 | /* | |
1152 | * parsing the "crashkernel" commandline | |
1153 | * | |
1154 | * this code is intended to be called from architecture specific code | |
1155 | */ | |
1156 | ||
1157 | ||
1158 | /* | |
1159 | * This function parses command lines in the format | |
1160 | * | |
1161 | * crashkernel=ramsize-range:size[,...][@offset] | |
1162 | * | |
1163 | * The function returns 0 on success and -EINVAL on failure. | |
1164 | */ | |
1165 | static int __init parse_crashkernel_mem(char *cmdline, | |
1166 | unsigned long long system_ram, | |
1167 | unsigned long long *crash_size, | |
1168 | unsigned long long *crash_base) | |
1169 | { | |
1170 | char *cur = cmdline, *tmp; | |
1171 | ||
1172 | /* for each entry of the comma-separated list */ | |
1173 | do { | |
1174 | unsigned long long start, end = ULLONG_MAX, size; | |
1175 | ||
1176 | /* get the start of the range */ | |
1177 | start = memparse(cur, &tmp); | |
1178 | if (cur == tmp) { | |
1179 | pr_warning("crashkernel: Memory value expected\n"); | |
1180 | return -EINVAL; | |
1181 | } | |
1182 | cur = tmp; | |
1183 | if (*cur != '-') { | |
1184 | pr_warning("crashkernel: '-' expected\n"); | |
1185 | return -EINVAL; | |
1186 | } | |
1187 | cur++; | |
1188 | ||
1189 | /* if no ':' is here, than we read the end */ | |
1190 | if (*cur != ':') { | |
1191 | end = memparse(cur, &tmp); | |
1192 | if (cur == tmp) { | |
1193 | pr_warning("crashkernel: Memory " | |
1194 | "value expected\n"); | |
1195 | return -EINVAL; | |
1196 | } | |
1197 | cur = tmp; | |
1198 | if (end <= start) { | |
1199 | pr_warning("crashkernel: end <= start\n"); | |
1200 | return -EINVAL; | |
1201 | } | |
1202 | } | |
1203 | ||
1204 | if (*cur != ':') { | |
1205 | pr_warning("crashkernel: ':' expected\n"); | |
1206 | return -EINVAL; | |
1207 | } | |
1208 | cur++; | |
1209 | ||
1210 | size = memparse(cur, &tmp); | |
1211 | if (cur == tmp) { | |
1212 | pr_warning("Memory value expected\n"); | |
1213 | return -EINVAL; | |
1214 | } | |
1215 | cur = tmp; | |
1216 | if (size >= system_ram) { | |
1217 | pr_warning("crashkernel: invalid size\n"); | |
1218 | return -EINVAL; | |
1219 | } | |
1220 | ||
1221 | /* match ? */ | |
1222 | if (system_ram >= start && system_ram < end) { | |
1223 | *crash_size = size; | |
1224 | break; | |
1225 | } | |
1226 | } while (*cur++ == ','); | |
1227 | ||
1228 | if (*crash_size > 0) { | |
019f9a36 | 1229 | while (*cur && *cur != ' ' && *cur != '@') |
4e93cb00 MG |
1230 | cur++; |
1231 | if (*cur == '@') { | |
1232 | cur++; | |
1233 | *crash_base = memparse(cur, &tmp); | |
1234 | if (cur == tmp) { | |
1235 | pr_warning("Memory value expected " | |
1236 | "after '@'\n"); | |
1237 | return -EINVAL; | |
1238 | } | |
1239 | } | |
1240 | } | |
1241 | ||
1242 | return 0; | |
1243 | } | |
1244 | ||
1245 | /* | |
1246 | * That function parses "simple" (old) crashkernel command lines like | |
1247 | * | |
1248 | * crashkernel=size[@offset] | |
1249 | * | |
1250 | * It returns 0 on success and -EINVAL on failure. | |
1251 | */ | |
1252 | static int __init parse_crashkernel_simple(char *cmdline, | |
1253 | unsigned long long *crash_size, | |
1254 | unsigned long long *crash_base) | |
1255 | { | |
1256 | char *cur = cmdline; | |
1257 | ||
1258 | *crash_size = memparse(cmdline, &cur); | |
1259 | if (cmdline == cur) { | |
1260 | pr_warning("crashkernel: memory value expected\n"); | |
1261 | return -EINVAL; | |
1262 | } | |
1263 | ||
1264 | if (*cur == '@') | |
1265 | *crash_base = memparse(cur+1, &cur); | |
1266 | ||
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | /* | |
1271 | * That function is the entry point for command line parsing and should be | |
1272 | * called from the arch-specific code. | |
1273 | */ | |
1274 | int __init parse_crashkernel(char *cmdline, | |
1275 | unsigned long long system_ram, | |
1276 | unsigned long long *crash_size, | |
1277 | unsigned long long *crash_base) | |
1278 | { | |
1279 | char *p = cmdline, *ck_cmdline = NULL; | |
1280 | char *first_colon, *first_space; | |
1281 | ||
1282 | BUG_ON(!crash_size || !crash_base); | |
1283 | *crash_size = 0; | |
1284 | *crash_base = 0; | |
1285 | ||
1286 | /* find crashkernel and use the last one if there are more */ | |
1287 | p = strstr(p, "crashkernel="); | |
1288 | while (p) { | |
1289 | ck_cmdline = p; | |
1290 | p = strstr(p+1, "crashkernel="); | |
1291 | } | |
1292 | ||
1293 | if (!ck_cmdline) | |
1294 | return -EINVAL; | |
1295 | ||
1296 | ck_cmdline += 12; /* strlen("crashkernel=") */ | |
1297 | ||
1298 | /* | |
1299 | * if the commandline contains a ':', then that's the extended | |
1300 | * syntax -- if not, it must be the classic syntax | |
1301 | */ | |
1302 | first_colon = strchr(ck_cmdline, ':'); | |
1303 | first_space = strchr(ck_cmdline, ' '); | |
1304 | if (first_colon && (!first_space || first_colon < first_space)) | |
1305 | return parse_crashkernel_mem(ck_cmdline, system_ram, | |
1306 | crash_size, crash_base); | |
1307 | else | |
1308 | return parse_crashkernel_simple(ck_cmdline, crash_size, | |
1309 | crash_base); | |
1310 | ||
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | ||
1315 | ||
1316 | void crash_save_vmcoreinfo(void) | |
1317 | { | |
1318 | u32 *buf; | |
1319 | ||
1320 | if (!vmcoreinfo_size) | |
1321 | return; | |
1322 | ||
1323 | vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds()); | |
1324 | ||
1325 | buf = (u32 *)vmcoreinfo_note; | |
1326 | ||
1327 | buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, | |
1328 | vmcoreinfo_size); | |
1329 | ||
1330 | final_note(buf); | |
1331 | } | |
1332 | ||
1333 | void vmcoreinfo_append_str(const char *fmt, ...) | |
1334 | { | |
1335 | va_list args; | |
1336 | char buf[0x50]; | |
1337 | int r; | |
1338 | ||
1339 | va_start(args, fmt); | |
1340 | r = vsnprintf(buf, sizeof(buf), fmt, args); | |
1341 | va_end(args); | |
1342 | ||
1343 | if (r + vmcoreinfo_size > vmcoreinfo_max_size) | |
1344 | r = vmcoreinfo_max_size - vmcoreinfo_size; | |
1345 | ||
1346 | memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); | |
1347 | ||
1348 | vmcoreinfo_size += r; | |
1349 | } | |
1350 | ||
1351 | /* | |
1352 | * provide an empty default implementation here -- architecture | |
1353 | * code may override this | |
1354 | */ | |
1355 | void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) | |
1356 | {} | |
1357 | ||
1358 | unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) | |
1359 | { | |
1360 | return __pa((unsigned long)(char *)&vmcoreinfo_note); | |
1361 | } | |
1362 | ||
1363 | /* | |
1364 | * Move into place and start executing a preloaded standalone | |
1365 | * executable. If nothing was preloaded return an error. | |
1366 | */ | |
1367 | int kernel_kexec(void) | |
1368 | { | |
1369 | int error = 0; | |
1370 | ||
1371 | if (!mutex_trylock(&kexec_mutex)) | |
1372 | return -EBUSY; | |
1373 | if (!kexec_image) { | |
1374 | error = -EINVAL; | |
1375 | goto Unlock; | |
1376 | } | |
1377 | ||
1378 | #ifdef CONFIG_KEXEC_JUMP | |
1379 | if (kexec_image->preserve_context) { | |
1380 | mutex_lock(&pm_mutex); | |
1381 | pm_prepare_console(); | |
1382 | error = freeze_processes(); | |
1383 | if (error) { | |
1384 | error = -EBUSY; | |
1385 | goto Restore_console; | |
1386 | } | |
1387 | suspend_console(); | |
019f9a36 | 1388 | error = dpm_suspend_start(PMSG_FREEZE); |
4e93cb00 MG |
1389 | if (error) |
1390 | goto Resume_console; | |
019f9a36 MG |
1391 | /* At this point, dpm_suspend_start() has been called, |
1392 | * but *not* dpm_suspend_noirq(). We *must* call | |
1393 | * dpm_suspend_noirq() now. Otherwise, drivers for | |
4e93cb00 MG |
1394 | * some devices (e.g. interrupt controllers) become |
1395 | * desynchronized with the actual state of the | |
1396 | * hardware at resume time, and evil weirdness ensues. | |
1397 | */ | |
019f9a36 | 1398 | error = dpm_suspend_noirq(PMSG_FREEZE); |
4e93cb00 | 1399 | if (error) |
019f9a36 MG |
1400 | goto Resume_devices; |
1401 | error = disable_nonboot_cpus(); | |
1402 | if (error) | |
1403 | goto Enable_cpus; | |
1404 | local_irq_disable(); | |
634c0524 | 1405 | error = syscore_suspend(); |
4e93cb00 | 1406 | if (error) |
019f9a36 | 1407 | goto Enable_irqs; |
4e93cb00 MG |
1408 | } else |
1409 | #endif | |
1410 | { | |
1411 | kernel_restart_prepare(NULL); | |
1412 | printk(KERN_EMERG "Starting new kernel\n"); | |
0c549ba1 | 1413 | //machine_shutdown(); |
4e93cb00 MG |
1414 | } |
1415 | ||
1416 | machine_kexec(kexec_image); | |
1417 | ||
1418 | #ifdef CONFIG_KEXEC_JUMP | |
1419 | if (kexec_image->preserve_context) { | |
634c0524 | 1420 | syscore_resume(); |
4e93cb00 MG |
1421 | Enable_irqs: |
1422 | local_irq_enable(); | |
019f9a36 | 1423 | Enable_cpus: |
4e93cb00 | 1424 | enable_nonboot_cpus(); |
019f9a36 | 1425 | dpm_resume_noirq(PMSG_RESTORE); |
4e93cb00 | 1426 | Resume_devices: |
019f9a36 | 1427 | dpm_resume_end(PMSG_RESTORE); |
4e93cb00 MG |
1428 | Resume_console: |
1429 | resume_console(); | |
1430 | thaw_processes(); | |
1431 | Restore_console: | |
1432 | pm_restore_console(); | |
1433 | mutex_unlock(&pm_mutex); | |
1434 | } | |
1435 | #endif | |
1436 | ||
1437 | Unlock: | |
1438 | mutex_unlock(&kexec_mutex); | |
1439 | return error; | |
1440 | } | |
1441 | ||
1442 | unsigned long **find_sys_call_table(void) { | |
1443 | unsigned long **sctable; | |
1444 | unsigned long ptr; | |
1445 | extern int loops_per_jiffy; | |
1446 | sctable = NULL; | |
1447 | for (ptr = (unsigned long)&unlock_kernel; ptr < (unsigned long)&loops_per_jiffy; ptr += sizeof(void *)) { | |
1448 | unsigned long *p; | |
1449 | p = (unsigned long *)ptr; | |
1450 | if (p[__NR_close] == (unsigned long) sys_close) { | |
1451 | sctable = (unsigned long **)p; | |
1452 | return &sctable[0]; | |
1453 | } | |
1454 | } | |
1455 | return NULL; | |
1456 | } | |
1457 | ||
1458 | static int __init kexec_module_init(void) | |
1459 | { | |
4f8bca66 | 1460 | #if 0 |
e750b7ef MG |
1461 | sys_call_table=(void **)find_sys_call_table(); |
1462 | if(sys_call_table==NULL) { | |
1463 | printk(KERN_ERR "Cannot find the system call address\n"); | |
1464 | return -1; // do not load | |
1465 | } | |
1466 | ||
1467 | printk(KERN_INFO "kexec: Found sys_call_table at: %p\n", sys_call_table); | |
4f8bca66 | 1468 | #endif |
4e93cb00 | 1469 | |
4f8bca66 | 1470 | sys_call_table=(void **)SYS_CALL_TABLE; |
e750b7ef | 1471 | printk(KERN_INFO "kexec: Force sys_call_table at: %p\n", sys_call_table); |
4e93cb00 MG |
1472 | |
1473 | /* Set kexec_load() syscall. */ | |
1474 | sys_call_table[__NR_kexec_load]=kexec_load; | |
1475 | ||
1476 | /* Swap reboot() syscall and store original */ | |
1477 | original_reboot=sys_call_table[__NR_reboot]; | |
1478 | sys_call_table[__NR_reboot]=reboot; | |
1479 | ||
1480 | /* crash_notes_memory_init */ | |
1481 | /* Allocate memory for saving cpu registers. */ | |
1482 | crash_notes = alloc_percpu(note_buf_t); | |
1483 | if (!crash_notes) { | |
1484 | printk("Kexec: Memory allocation for saving cpu register" | |
1485 | " states failed\n"); | |
1486 | return -ENOMEM; | |
1487 | } | |
1488 | ||
1489 | /* crash_vmcoreinfo_init */ | |
1490 | VMCOREINFO_OSRELEASE(init_uts_ns.name.release); | |
1491 | VMCOREINFO_PAGESIZE(PAGE_SIZE); | |
1492 | ||
1493 | VMCOREINFO_SYMBOL(init_uts_ns); | |
1494 | VMCOREINFO_SYMBOL(node_online_map); | |
1495 | ||
1496 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
1497 | VMCOREINFO_SYMBOL(mem_map); | |
1498 | VMCOREINFO_SYMBOL(contig_page_data); | |
1499 | #endif | |
1500 | #ifdef CONFIG_SPARSEMEM | |
1501 | VMCOREINFO_SYMBOL(mem_section); | |
1502 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); | |
1503 | VMCOREINFO_STRUCT_SIZE(mem_section); | |
1504 | VMCOREINFO_OFFSET(mem_section, section_mem_map); | |
1505 | #endif | |
1506 | VMCOREINFO_STRUCT_SIZE(page); | |
1507 | VMCOREINFO_STRUCT_SIZE(pglist_data); | |
1508 | VMCOREINFO_STRUCT_SIZE(zone); | |
1509 | VMCOREINFO_STRUCT_SIZE(free_area); | |
1510 | VMCOREINFO_STRUCT_SIZE(list_head); | |
1511 | VMCOREINFO_SIZE(nodemask_t); | |
1512 | VMCOREINFO_OFFSET(page, flags); | |
1513 | VMCOREINFO_OFFSET(page, _count); | |
1514 | VMCOREINFO_OFFSET(page, mapping); | |
1515 | VMCOREINFO_OFFSET(page, lru); | |
1516 | VMCOREINFO_OFFSET(pglist_data, node_zones); | |
1517 | VMCOREINFO_OFFSET(pglist_data, nr_zones); | |
1518 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | |
1519 | VMCOREINFO_OFFSET(pglist_data, node_mem_map); | |
1520 | #endif | |
1521 | VMCOREINFO_OFFSET(pglist_data, node_start_pfn); | |
1522 | VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); | |
1523 | VMCOREINFO_OFFSET(pglist_data, node_id); | |
1524 | VMCOREINFO_OFFSET(zone, free_area); | |
1525 | VMCOREINFO_OFFSET(zone, vm_stat); | |
1526 | VMCOREINFO_OFFSET(zone, spanned_pages); | |
1527 | VMCOREINFO_OFFSET(free_area, free_list); | |
1528 | VMCOREINFO_OFFSET(list_head, next); | |
1529 | VMCOREINFO_OFFSET(list_head, prev); | |
1530 | VMCOREINFO_OFFSET(vm_struct, addr); | |
1531 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); | |
1532 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); | |
1533 | VMCOREINFO_NUMBER(NR_FREE_PAGES); | |
1534 | VMCOREINFO_NUMBER(PG_lru); | |
1535 | VMCOREINFO_NUMBER(PG_private); | |
1536 | VMCOREINFO_NUMBER(PG_swapcache); | |
1537 | ||
1538 | arch_crash_save_vmcoreinfo(); | |
1539 | ||
1540 | return 0; | |
1541 | } | |
1542 | ||
1543 | module_init(kexec_module_init) | |
1544 |