]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/kexec.c
ARM: dts: keystone: fix the clock node for mdio
[karo-tx-linux.git] / kernel / kexec.c
1 /*
2  * kexec.c - kexec system call
3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8
9 #define pr_fmt(fmt)     "kexec: " fmt
10
11 #include <linux/capability.h>
12 #include <linux/mm.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/fs.h>
16 #include <linux/kexec.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/console.h>
34 #include <linux/vmalloc.h>
35 #include <linux/swap.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/compiler.h>
38 #include <linux/hugetlb.h>
39
40 #include <asm/page.h>
41 #include <asm/uaccess.h>
42 #include <asm/io.h>
43 #include <asm/sections.h>
44
45 #include <crypto/hash.h>
46 #include <crypto/sha.h>
47
48 /* Per cpu memory for storing cpu states in case of system crash. */
49 note_buf_t __percpu *crash_notes;
50
51 /* vmcoreinfo stuff */
52 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
53 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
54 size_t vmcoreinfo_size;
55 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
56
57 /* Flag to indicate we are going to kexec a new kernel */
58 bool kexec_in_progress = false;
59
60 /*
61  * Declare these symbols weak so that if architecture provides a purgatory,
62  * these will be overridden.
63  */
64 char __weak kexec_purgatory[0];
65 size_t __weak kexec_purgatory_size = 0;
66
67 #ifdef CONFIG_KEXEC_FILE
68 static int kexec_calculate_store_digests(struct kimage *image);
69 #endif
70
71 /* Location of the reserved area for the crash kernel */
72 struct resource crashk_res = {
73         .name  = "Crash kernel",
74         .start = 0,
75         .end   = 0,
76         .flags = IORESOURCE_BUSY | IORESOURCE_MEM
77 };
78 struct resource crashk_low_res = {
79         .name  = "Crash kernel",
80         .start = 0,
81         .end   = 0,
82         .flags = IORESOURCE_BUSY | IORESOURCE_MEM
83 };
84
85 int kexec_should_crash(struct task_struct *p)
86 {
87         /*
88          * If crash_kexec_post_notifiers is enabled, don't run
89          * crash_kexec() here yet, which must be run after panic
90          * notifiers in panic().
91          */
92         if (crash_kexec_post_notifiers)
93                 return 0;
94         /*
95          * There are 4 panic() calls in do_exit() path, each of which
96          * corresponds to each of these 4 conditions.
97          */
98         if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
99                 return 1;
100         return 0;
101 }
102
103 /*
104  * When kexec transitions to the new kernel there is a one-to-one
105  * mapping between physical and virtual addresses.  On processors
106  * where you can disable the MMU this is trivial, and easy.  For
107  * others it is still a simple predictable page table to setup.
108  *
109  * In that environment kexec copies the new kernel to its final
110  * resting place.  This means I can only support memory whose
111  * physical address can fit in an unsigned long.  In particular
112  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
113  * If the assembly stub has more restrictive requirements
114  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
115  * defined more restrictively in <asm/kexec.h>.
116  *
117  * The code for the transition from the current kernel to the
118  * the new kernel is placed in the control_code_buffer, whose size
119  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
120  * page of memory is necessary, but some architectures require more.
121  * Because this memory must be identity mapped in the transition from
122  * virtual to physical addresses it must live in the range
123  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
124  * modifiable.
125  *
126  * The assembly stub in the control code buffer is passed a linked list
127  * of descriptor pages detailing the source pages of the new kernel,
128  * and the destination addresses of those source pages.  As this data
129  * structure is not used in the context of the current OS, it must
130  * be self-contained.
131  *
132  * The code has been made to work with highmem pages and will use a
133  * destination page in its final resting place (if it happens
134  * to allocate it).  The end product of this is that most of the
135  * physical address space, and most of RAM can be used.
136  *
137  * Future directions include:
138  *  - allocating a page table with the control code buffer identity
139  *    mapped, to simplify machine_kexec and make kexec_on_panic more
140  *    reliable.
141  */
142
143 /*
144  * KIMAGE_NO_DEST is an impossible destination address..., for
145  * allocating pages whose destination address we do not care about.
146  */
147 #define KIMAGE_NO_DEST (-1UL)
148
149 static int kimage_is_destination_range(struct kimage *image,
150                                        unsigned long start, unsigned long end);
151 static struct page *kimage_alloc_page(struct kimage *image,
152                                        gfp_t gfp_mask,
153                                        unsigned long dest);
154
155 static int copy_user_segment_list(struct kimage *image,
156                                   unsigned long nr_segments,
157                                   struct kexec_segment __user *segments)
158 {
159         int ret;
160         size_t segment_bytes;
161
162         /* Read in the segments */
163         image->nr_segments = nr_segments;
164         segment_bytes = nr_segments * sizeof(*segments);
165         ret = copy_from_user(image->segment, segments, segment_bytes);
166         if (ret)
167                 ret = -EFAULT;
168
169         return ret;
170 }
171
172 static int sanity_check_segment_list(struct kimage *image)
173 {
174         int result, i;
175         unsigned long nr_segments = image->nr_segments;
176
177         /*
178          * Verify we have good destination addresses.  The caller is
179          * responsible for making certain we don't attempt to load
180          * the new image into invalid or reserved areas of RAM.  This
181          * just verifies it is an address we can use.
182          *
183          * Since the kernel does everything in page size chunks ensure
184          * the destination addresses are page aligned.  Too many
185          * special cases crop of when we don't do this.  The most
186          * insidious is getting overlapping destination addresses
187          * simply because addresses are changed to page size
188          * granularity.
189          */
190         result = -EADDRNOTAVAIL;
191         for (i = 0; i < nr_segments; i++) {
192                 unsigned long mstart, mend;
193
194                 mstart = image->segment[i].mem;
195                 mend   = mstart + image->segment[i].memsz;
196                 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
197                         return result;
198                 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
199                         return result;
200         }
201
202         /* Verify our destination addresses do not overlap.
203          * If we alloed overlapping destination addresses
204          * through very weird things can happen with no
205          * easy explanation as one segment stops on another.
206          */
207         result = -EINVAL;
208         for (i = 0; i < nr_segments; i++) {
209                 unsigned long mstart, mend;
210                 unsigned long j;
211
212                 mstart = image->segment[i].mem;
213                 mend   = mstart + image->segment[i].memsz;
214                 for (j = 0; j < i; j++) {
215                         unsigned long pstart, pend;
216                         pstart = image->segment[j].mem;
217                         pend   = pstart + image->segment[j].memsz;
218                         /* Do the segments overlap ? */
219                         if ((mend > pstart) && (mstart < pend))
220                                 return result;
221                 }
222         }
223
224         /* Ensure our buffer sizes are strictly less than
225          * our memory sizes.  This should always be the case,
226          * and it is easier to check up front than to be surprised
227          * later on.
228          */
229         result = -EINVAL;
230         for (i = 0; i < nr_segments; i++) {
231                 if (image->segment[i].bufsz > image->segment[i].memsz)
232                         return result;
233         }
234
235         /*
236          * Verify we have good destination addresses.  Normally
237          * the caller is responsible for making certain we don't
238          * attempt to load the new image into invalid or reserved
239          * areas of RAM.  But crash kernels are preloaded into a
240          * reserved area of ram.  We must ensure the addresses
241          * are in the reserved area otherwise preloading the
242          * kernel could corrupt things.
243          */
244
245         if (image->type == KEXEC_TYPE_CRASH) {
246                 result = -EADDRNOTAVAIL;
247                 for (i = 0; i < nr_segments; i++) {
248                         unsigned long mstart, mend;
249
250                         mstart = image->segment[i].mem;
251                         mend = mstart + image->segment[i].memsz - 1;
252                         /* Ensure we are within the crash kernel limits */
253                         if ((mstart < crashk_res.start) ||
254                             (mend > crashk_res.end))
255                                 return result;
256                 }
257         }
258
259         return 0;
260 }
261
262 static struct kimage *do_kimage_alloc_init(void)
263 {
264         struct kimage *image;
265
266         /* Allocate a controlling structure */
267         image = kzalloc(sizeof(*image), GFP_KERNEL);
268         if (!image)
269                 return NULL;
270
271         image->head = 0;
272         image->entry = &image->head;
273         image->last_entry = &image->head;
274         image->control_page = ~0; /* By default this does not apply */
275         image->type = KEXEC_TYPE_DEFAULT;
276
277         /* Initialize the list of control pages */
278         INIT_LIST_HEAD(&image->control_pages);
279
280         /* Initialize the list of destination pages */
281         INIT_LIST_HEAD(&image->dest_pages);
282
283         /* Initialize the list of unusable pages */
284         INIT_LIST_HEAD(&image->unusable_pages);
285
286         return image;
287 }
288
289 static void kimage_free_page_list(struct list_head *list);
290
291 static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
292                              unsigned long nr_segments,
293                              struct kexec_segment __user *segments,
294                              unsigned long flags)
295 {
296         int ret;
297         struct kimage *image;
298         bool kexec_on_panic = flags & KEXEC_ON_CRASH;
299
300         if (kexec_on_panic) {
301                 /* Verify we have a valid entry point */
302                 if ((entry < crashk_res.start) || (entry > crashk_res.end))
303                         return -EADDRNOTAVAIL;
304         }
305
306         /* Allocate and initialize a controlling structure */
307         image = do_kimage_alloc_init();
308         if (!image)
309                 return -ENOMEM;
310
311         image->start = entry;
312
313         ret = copy_user_segment_list(image, nr_segments, segments);
314         if (ret)
315                 goto out_free_image;
316
317         ret = sanity_check_segment_list(image);
318         if (ret)
319                 goto out_free_image;
320
321          /* Enable the special crash kernel control page allocation policy. */
322         if (kexec_on_panic) {
323                 image->control_page = crashk_res.start;
324                 image->type = KEXEC_TYPE_CRASH;
325         }
326
327         /*
328          * Find a location for the control code buffer, and add it
329          * the vector of segments so that it's pages will also be
330          * counted as destination pages.
331          */
332         ret = -ENOMEM;
333         image->control_code_page = kimage_alloc_control_pages(image,
334                                            get_order(KEXEC_CONTROL_PAGE_SIZE));
335         if (!image->control_code_page) {
336                 pr_err("Could not allocate control_code_buffer\n");
337                 goto out_free_image;
338         }
339
340         if (!kexec_on_panic) {
341                 image->swap_page = kimage_alloc_control_pages(image, 0);
342                 if (!image->swap_page) {
343                         pr_err("Could not allocate swap buffer\n");
344                         goto out_free_control_pages;
345                 }
346         }
347
348         *rimage = image;
349         return 0;
350 out_free_control_pages:
351         kimage_free_page_list(&image->control_pages);
352 out_free_image:
353         kfree(image);
354         return ret;
355 }
356
357 #ifdef CONFIG_KEXEC_FILE
358 static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len)
359 {
360         struct fd f = fdget(fd);
361         int ret;
362         struct kstat stat;
363         loff_t pos;
364         ssize_t bytes = 0;
365
366         if (!f.file)
367                 return -EBADF;
368
369         ret = vfs_getattr(&f.file->f_path, &stat);
370         if (ret)
371                 goto out;
372
373         if (stat.size > INT_MAX) {
374                 ret = -EFBIG;
375                 goto out;
376         }
377
378         /* Don't hand 0 to vmalloc, it whines. */
379         if (stat.size == 0) {
380                 ret = -EINVAL;
381                 goto out;
382         }
383
384         *buf = vmalloc(stat.size);
385         if (!*buf) {
386                 ret = -ENOMEM;
387                 goto out;
388         }
389
390         pos = 0;
391         while (pos < stat.size) {
392                 bytes = kernel_read(f.file, pos, (char *)(*buf) + pos,
393                                     stat.size - pos);
394                 if (bytes < 0) {
395                         vfree(*buf);
396                         ret = bytes;
397                         goto out;
398                 }
399
400                 if (bytes == 0)
401                         break;
402                 pos += bytes;
403         }
404
405         if (pos != stat.size) {
406                 ret = -EBADF;
407                 vfree(*buf);
408                 goto out;
409         }
410
411         *buf_len = pos;
412 out:
413         fdput(f);
414         return ret;
415 }
416
417 /* Architectures can provide this probe function */
418 int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
419                                          unsigned long buf_len)
420 {
421         return -ENOEXEC;
422 }
423
424 void * __weak arch_kexec_kernel_image_load(struct kimage *image)
425 {
426         return ERR_PTR(-ENOEXEC);
427 }
428
429 void __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
430 {
431 }
432
433 int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
434                                         unsigned long buf_len)
435 {
436         return -EKEYREJECTED;
437 }
438
439 /* Apply relocations of type RELA */
440 int __weak
441 arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
442                                  unsigned int relsec)
443 {
444         pr_err("RELA relocation unsupported.\n");
445         return -ENOEXEC;
446 }
447
448 /* Apply relocations of type REL */
449 int __weak
450 arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
451                              unsigned int relsec)
452 {
453         pr_err("REL relocation unsupported.\n");
454         return -ENOEXEC;
455 }
456
457 /*
458  * Free up memory used by kernel, initrd, and command line. This is temporary
459  * memory allocation which is not needed any more after these buffers have
460  * been loaded into separate segments and have been copied elsewhere.
461  */
462 static void kimage_file_post_load_cleanup(struct kimage *image)
463 {
464         struct purgatory_info *pi = &image->purgatory_info;
465
466         vfree(image->kernel_buf);
467         image->kernel_buf = NULL;
468
469         vfree(image->initrd_buf);
470         image->initrd_buf = NULL;
471
472         kfree(image->cmdline_buf);
473         image->cmdline_buf = NULL;
474
475         vfree(pi->purgatory_buf);
476         pi->purgatory_buf = NULL;
477
478         vfree(pi->sechdrs);
479         pi->sechdrs = NULL;
480
481         /* See if architecture has anything to cleanup post load */
482         arch_kimage_file_post_load_cleanup(image);
483
484         /*
485          * Above call should have called into bootloader to free up
486          * any data stored in kimage->image_loader_data. It should
487          * be ok now to free it up.
488          */
489         kfree(image->image_loader_data);
490         image->image_loader_data = NULL;
491 }
492
493 /*
494  * In file mode list of segments is prepared by kernel. Copy relevant
495  * data from user space, do error checking, prepare segment list
496  */
497 static int
498 kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
499                              const char __user *cmdline_ptr,
500                              unsigned long cmdline_len, unsigned flags)
501 {
502         int ret = 0;
503         void *ldata;
504
505         ret = copy_file_from_fd(kernel_fd, &image->kernel_buf,
506                                 &image->kernel_buf_len);
507         if (ret)
508                 return ret;
509
510         /* Call arch image probe handlers */
511         ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
512                                             image->kernel_buf_len);
513
514         if (ret)
515                 goto out;
516
517 #ifdef CONFIG_KEXEC_VERIFY_SIG
518         ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
519                                            image->kernel_buf_len);
520         if (ret) {
521                 pr_debug("kernel signature verification failed.\n");
522                 goto out;
523         }
524         pr_debug("kernel signature verification successful.\n");
525 #endif
526         /* It is possible that there no initramfs is being loaded */
527         if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
528                 ret = copy_file_from_fd(initrd_fd, &image->initrd_buf,
529                                         &image->initrd_buf_len);
530                 if (ret)
531                         goto out;
532         }
533
534         if (cmdline_len) {
535                 image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL);
536                 if (!image->cmdline_buf) {
537                         ret = -ENOMEM;
538                         goto out;
539                 }
540
541                 ret = copy_from_user(image->cmdline_buf, cmdline_ptr,
542                                      cmdline_len);
543                 if (ret) {
544                         ret = -EFAULT;
545                         goto out;
546                 }
547
548                 image->cmdline_buf_len = cmdline_len;
549
550                 /* command line should be a string with last byte null */
551                 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
552                         ret = -EINVAL;
553                         goto out;
554                 }
555         }
556
557         /* Call arch image load handlers */
558         ldata = arch_kexec_kernel_image_load(image);
559
560         if (IS_ERR(ldata)) {
561                 ret = PTR_ERR(ldata);
562                 goto out;
563         }
564
565         image->image_loader_data = ldata;
566 out:
567         /* In case of error, free up all allocated memory in this function */
568         if (ret)
569                 kimage_file_post_load_cleanup(image);
570         return ret;
571 }
572
573 static int
574 kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
575                        int initrd_fd, const char __user *cmdline_ptr,
576                        unsigned long cmdline_len, unsigned long flags)
577 {
578         int ret;
579         struct kimage *image;
580         bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
581
582         image = do_kimage_alloc_init();
583         if (!image)
584                 return -ENOMEM;
585
586         image->file_mode = 1;
587
588         if (kexec_on_panic) {
589                 /* Enable special crash kernel control page alloc policy. */
590                 image->control_page = crashk_res.start;
591                 image->type = KEXEC_TYPE_CRASH;
592         }
593
594         ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
595                                            cmdline_ptr, cmdline_len, flags);
596         if (ret)
597                 goto out_free_image;
598
599         ret = sanity_check_segment_list(image);
600         if (ret)
601                 goto out_free_post_load_bufs;
602
603         ret = -ENOMEM;
604         image->control_code_page = kimage_alloc_control_pages(image,
605                                            get_order(KEXEC_CONTROL_PAGE_SIZE));
606         if (!image->control_code_page) {
607                 pr_err("Could not allocate control_code_buffer\n");
608                 goto out_free_post_load_bufs;
609         }
610
611         if (!kexec_on_panic) {
612                 image->swap_page = kimage_alloc_control_pages(image, 0);
613                 if (!image->swap_page) {
614                         pr_err("Could not allocate swap buffer\n");
615                         goto out_free_control_pages;
616                 }
617         }
618
619         *rimage = image;
620         return 0;
621 out_free_control_pages:
622         kimage_free_page_list(&image->control_pages);
623 out_free_post_load_bufs:
624         kimage_file_post_load_cleanup(image);
625 out_free_image:
626         kfree(image);
627         return ret;
628 }
629 #else /* CONFIG_KEXEC_FILE */
630 static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
631 #endif /* CONFIG_KEXEC_FILE */
632
633 static int kimage_is_destination_range(struct kimage *image,
634                                         unsigned long start,
635                                         unsigned long end)
636 {
637         unsigned long i;
638
639         for (i = 0; i < image->nr_segments; i++) {
640                 unsigned long mstart, mend;
641
642                 mstart = image->segment[i].mem;
643                 mend = mstart + image->segment[i].memsz;
644                 if ((end > mstart) && (start < mend))
645                         return 1;
646         }
647
648         return 0;
649 }
650
651 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
652 {
653         struct page *pages;
654
655         pages = alloc_pages(gfp_mask, order);
656         if (pages) {
657                 unsigned int count, i;
658                 pages->mapping = NULL;
659                 set_page_private(pages, order);
660                 count = 1 << order;
661                 for (i = 0; i < count; i++)
662                         SetPageReserved(pages + i);
663         }
664
665         return pages;
666 }
667
668 static void kimage_free_pages(struct page *page)
669 {
670         unsigned int order, count, i;
671
672         order = page_private(page);
673         count = 1 << order;
674         for (i = 0; i < count; i++)
675                 ClearPageReserved(page + i);
676         __free_pages(page, order);
677 }
678
679 static void kimage_free_page_list(struct list_head *list)
680 {
681         struct list_head *pos, *next;
682
683         list_for_each_safe(pos, next, list) {
684                 struct page *page;
685
686                 page = list_entry(pos, struct page, lru);
687                 list_del(&page->lru);
688                 kimage_free_pages(page);
689         }
690 }
691
692 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
693                                                         unsigned int order)
694 {
695         /* Control pages are special, they are the intermediaries
696          * that are needed while we copy the rest of the pages
697          * to their final resting place.  As such they must
698          * not conflict with either the destination addresses
699          * or memory the kernel is already using.
700          *
701          * The only case where we really need more than one of
702          * these are for architectures where we cannot disable
703          * the MMU and must instead generate an identity mapped
704          * page table for all of the memory.
705          *
706          * At worst this runs in O(N) of the image size.
707          */
708         struct list_head extra_pages;
709         struct page *pages;
710         unsigned int count;
711
712         count = 1 << order;
713         INIT_LIST_HEAD(&extra_pages);
714
715         /* Loop while I can allocate a page and the page allocated
716          * is a destination page.
717          */
718         do {
719                 unsigned long pfn, epfn, addr, eaddr;
720
721                 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
722                 if (!pages)
723                         break;
724                 pfn   = page_to_pfn(pages);
725                 epfn  = pfn + count;
726                 addr  = pfn << PAGE_SHIFT;
727                 eaddr = epfn << PAGE_SHIFT;
728                 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
729                               kimage_is_destination_range(image, addr, eaddr)) {
730                         list_add(&pages->lru, &extra_pages);
731                         pages = NULL;
732                 }
733         } while (!pages);
734
735         if (pages) {
736                 /* Remember the allocated page... */
737                 list_add(&pages->lru, &image->control_pages);
738
739                 /* Because the page is already in it's destination
740                  * location we will never allocate another page at
741                  * that address.  Therefore kimage_alloc_pages
742                  * will not return it (again) and we don't need
743                  * to give it an entry in image->segment[].
744                  */
745         }
746         /* Deal with the destination pages I have inadvertently allocated.
747          *
748          * Ideally I would convert multi-page allocations into single
749          * page allocations, and add everything to image->dest_pages.
750          *
751          * For now it is simpler to just free the pages.
752          */
753         kimage_free_page_list(&extra_pages);
754
755         return pages;
756 }
757
758 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
759                                                       unsigned int order)
760 {
761         /* Control pages are special, they are the intermediaries
762          * that are needed while we copy the rest of the pages
763          * to their final resting place.  As such they must
764          * not conflict with either the destination addresses
765          * or memory the kernel is already using.
766          *
767          * Control pages are also the only pags we must allocate
768          * when loading a crash kernel.  All of the other pages
769          * are specified by the segments and we just memcpy
770          * into them directly.
771          *
772          * The only case where we really need more than one of
773          * these are for architectures where we cannot disable
774          * the MMU and must instead generate an identity mapped
775          * page table for all of the memory.
776          *
777          * Given the low demand this implements a very simple
778          * allocator that finds the first hole of the appropriate
779          * size in the reserved memory region, and allocates all
780          * of the memory up to and including the hole.
781          */
782         unsigned long hole_start, hole_end, size;
783         struct page *pages;
784
785         pages = NULL;
786         size = (1 << order) << PAGE_SHIFT;
787         hole_start = (image->control_page + (size - 1)) & ~(size - 1);
788         hole_end   = hole_start + size - 1;
789         while (hole_end <= crashk_res.end) {
790                 unsigned long i;
791
792                 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
793                         break;
794                 /* See if I overlap any of the segments */
795                 for (i = 0; i < image->nr_segments; i++) {
796                         unsigned long mstart, mend;
797
798                         mstart = image->segment[i].mem;
799                         mend   = mstart + image->segment[i].memsz - 1;
800                         if ((hole_end >= mstart) && (hole_start <= mend)) {
801                                 /* Advance the hole to the end of the segment */
802                                 hole_start = (mend + (size - 1)) & ~(size - 1);
803                                 hole_end   = hole_start + size - 1;
804                                 break;
805                         }
806                 }
807                 /* If I don't overlap any segments I have found my hole! */
808                 if (i == image->nr_segments) {
809                         pages = pfn_to_page(hole_start >> PAGE_SHIFT);
810                         break;
811                 }
812         }
813         if (pages)
814                 image->control_page = hole_end;
815
816         return pages;
817 }
818
819
820 struct page *kimage_alloc_control_pages(struct kimage *image,
821                                          unsigned int order)
822 {
823         struct page *pages = NULL;
824
825         switch (image->type) {
826         case KEXEC_TYPE_DEFAULT:
827                 pages = kimage_alloc_normal_control_pages(image, order);
828                 break;
829         case KEXEC_TYPE_CRASH:
830                 pages = kimage_alloc_crash_control_pages(image, order);
831                 break;
832         }
833
834         return pages;
835 }
836
837 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
838 {
839         if (*image->entry != 0)
840                 image->entry++;
841
842         if (image->entry == image->last_entry) {
843                 kimage_entry_t *ind_page;
844                 struct page *page;
845
846                 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
847                 if (!page)
848                         return -ENOMEM;
849
850                 ind_page = page_address(page);
851                 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
852                 image->entry = ind_page;
853                 image->last_entry = ind_page +
854                                       ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
855         }
856         *image->entry = entry;
857         image->entry++;
858         *image->entry = 0;
859
860         return 0;
861 }
862
863 static int kimage_set_destination(struct kimage *image,
864                                    unsigned long destination)
865 {
866         int result;
867
868         destination &= PAGE_MASK;
869         result = kimage_add_entry(image, destination | IND_DESTINATION);
870
871         return result;
872 }
873
874
875 static int kimage_add_page(struct kimage *image, unsigned long page)
876 {
877         int result;
878
879         page &= PAGE_MASK;
880         result = kimage_add_entry(image, page | IND_SOURCE);
881
882         return result;
883 }
884
885
886 static void kimage_free_extra_pages(struct kimage *image)
887 {
888         /* Walk through and free any extra destination pages I may have */
889         kimage_free_page_list(&image->dest_pages);
890
891         /* Walk through and free any unusable pages I have cached */
892         kimage_free_page_list(&image->unusable_pages);
893
894 }
895 static void kimage_terminate(struct kimage *image)
896 {
897         if (*image->entry != 0)
898                 image->entry++;
899
900         *image->entry = IND_DONE;
901 }
902
903 #define for_each_kimage_entry(image, ptr, entry) \
904         for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
905                 ptr = (entry & IND_INDIRECTION) ? \
906                         phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
907
908 static void kimage_free_entry(kimage_entry_t entry)
909 {
910         struct page *page;
911
912         page = pfn_to_page(entry >> PAGE_SHIFT);
913         kimage_free_pages(page);
914 }
915
916 static void kimage_free(struct kimage *image)
917 {
918         kimage_entry_t *ptr, entry;
919         kimage_entry_t ind = 0;
920
921         if (!image)
922                 return;
923
924         kimage_free_extra_pages(image);
925         for_each_kimage_entry(image, ptr, entry) {
926                 if (entry & IND_INDIRECTION) {
927                         /* Free the previous indirection page */
928                         if (ind & IND_INDIRECTION)
929                                 kimage_free_entry(ind);
930                         /* Save this indirection page until we are
931                          * done with it.
932                          */
933                         ind = entry;
934                 } else if (entry & IND_SOURCE)
935                         kimage_free_entry(entry);
936         }
937         /* Free the final indirection page */
938         if (ind & IND_INDIRECTION)
939                 kimage_free_entry(ind);
940
941         /* Handle any machine specific cleanup */
942         machine_kexec_cleanup(image);
943
944         /* Free the kexec control pages... */
945         kimage_free_page_list(&image->control_pages);
946
947         /*
948          * Free up any temporary buffers allocated. This might hit if
949          * error occurred much later after buffer allocation.
950          */
951         if (image->file_mode)
952                 kimage_file_post_load_cleanup(image);
953
954         kfree(image);
955 }
956
957 static kimage_entry_t *kimage_dst_used(struct kimage *image,
958                                         unsigned long page)
959 {
960         kimage_entry_t *ptr, entry;
961         unsigned long destination = 0;
962
963         for_each_kimage_entry(image, ptr, entry) {
964                 if (entry & IND_DESTINATION)
965                         destination = entry & PAGE_MASK;
966                 else if (entry & IND_SOURCE) {
967                         if (page == destination)
968                                 return ptr;
969                         destination += PAGE_SIZE;
970                 }
971         }
972
973         return NULL;
974 }
975
976 static struct page *kimage_alloc_page(struct kimage *image,
977                                         gfp_t gfp_mask,
978                                         unsigned long destination)
979 {
980         /*
981          * Here we implement safeguards to ensure that a source page
982          * is not copied to its destination page before the data on
983          * the destination page is no longer useful.
984          *
985          * To do this we maintain the invariant that a source page is
986          * either its own destination page, or it is not a
987          * destination page at all.
988          *
989          * That is slightly stronger than required, but the proof
990          * that no problems will not occur is trivial, and the
991          * implementation is simply to verify.
992          *
993          * When allocating all pages normally this algorithm will run
994          * in O(N) time, but in the worst case it will run in O(N^2)
995          * time.   If the runtime is a problem the data structures can
996          * be fixed.
997          */
998         struct page *page;
999         unsigned long addr;
1000
1001         /*
1002          * Walk through the list of destination pages, and see if I
1003          * have a match.
1004          */
1005         list_for_each_entry(page, &image->dest_pages, lru) {
1006                 addr = page_to_pfn(page) << PAGE_SHIFT;
1007                 if (addr == destination) {
1008                         list_del(&page->lru);
1009                         return page;
1010                 }
1011         }
1012         page = NULL;
1013         while (1) {
1014                 kimage_entry_t *old;
1015
1016                 /* Allocate a page, if we run out of memory give up */
1017                 page = kimage_alloc_pages(gfp_mask, 0);
1018                 if (!page)
1019                         return NULL;
1020                 /* If the page cannot be used file it away */
1021                 if (page_to_pfn(page) >
1022                                 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
1023                         list_add(&page->lru, &image->unusable_pages);
1024                         continue;
1025                 }
1026                 addr = page_to_pfn(page) << PAGE_SHIFT;
1027
1028                 /* If it is the destination page we want use it */
1029                 if (addr == destination)
1030                         break;
1031
1032                 /* If the page is not a destination page use it */
1033                 if (!kimage_is_destination_range(image, addr,
1034                                                   addr + PAGE_SIZE))
1035                         break;
1036
1037                 /*
1038                  * I know that the page is someones destination page.
1039                  * See if there is already a source page for this
1040                  * destination page.  And if so swap the source pages.
1041                  */
1042                 old = kimage_dst_used(image, addr);
1043                 if (old) {
1044                         /* If so move it */
1045                         unsigned long old_addr;
1046                         struct page *old_page;
1047
1048                         old_addr = *old & PAGE_MASK;
1049                         old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
1050                         copy_highpage(page, old_page);
1051                         *old = addr | (*old & ~PAGE_MASK);
1052
1053                         /* The old page I have found cannot be a
1054                          * destination page, so return it if it's
1055                          * gfp_flags honor the ones passed in.
1056                          */
1057                         if (!(gfp_mask & __GFP_HIGHMEM) &&
1058                             PageHighMem(old_page)) {
1059                                 kimage_free_pages(old_page);
1060                                 continue;
1061                         }
1062                         addr = old_addr;
1063                         page = old_page;
1064                         break;
1065                 } else {
1066                         /* Place the page on the destination list I
1067                          * will use it later.
1068                          */
1069                         list_add(&page->lru, &image->dest_pages);
1070                 }
1071         }
1072
1073         return page;
1074 }
1075
1076 static int kimage_load_normal_segment(struct kimage *image,
1077                                          struct kexec_segment *segment)
1078 {
1079         unsigned long maddr;
1080         size_t ubytes, mbytes;
1081         int result;
1082         unsigned char __user *buf = NULL;
1083         unsigned char *kbuf = NULL;
1084
1085         result = 0;
1086         if (image->file_mode)
1087                 kbuf = segment->kbuf;
1088         else
1089                 buf = segment->buf;
1090         ubytes = segment->bufsz;
1091         mbytes = segment->memsz;
1092         maddr = segment->mem;
1093
1094         result = kimage_set_destination(image, maddr);
1095         if (result < 0)
1096                 goto out;
1097
1098         while (mbytes) {
1099                 struct page *page;
1100                 char *ptr;
1101                 size_t uchunk, mchunk;
1102
1103                 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
1104                 if (!page) {
1105                         result  = -ENOMEM;
1106                         goto out;
1107                 }
1108                 result = kimage_add_page(image, page_to_pfn(page)
1109                                                                 << PAGE_SHIFT);
1110                 if (result < 0)
1111                         goto out;
1112
1113                 ptr = kmap(page);
1114                 /* Start with a clear page */
1115                 clear_page(ptr);
1116                 ptr += maddr & ~PAGE_MASK;
1117                 mchunk = min_t(size_t, mbytes,
1118                                 PAGE_SIZE - (maddr & ~PAGE_MASK));
1119                 uchunk = min(ubytes, mchunk);
1120
1121                 /* For file based kexec, source pages are in kernel memory */
1122                 if (image->file_mode)
1123                         memcpy(ptr, kbuf, uchunk);
1124                 else
1125                         result = copy_from_user(ptr, buf, uchunk);
1126                 kunmap(page);
1127                 if (result) {
1128                         result = -EFAULT;
1129                         goto out;
1130                 }
1131                 ubytes -= uchunk;
1132                 maddr  += mchunk;
1133                 if (image->file_mode)
1134                         kbuf += mchunk;
1135                 else
1136                         buf += mchunk;
1137                 mbytes -= mchunk;
1138         }
1139 out:
1140         return result;
1141 }
1142
1143 static int kimage_load_crash_segment(struct kimage *image,
1144                                         struct kexec_segment *segment)
1145 {
1146         /* For crash dumps kernels we simply copy the data from
1147          * user space to it's destination.
1148          * We do things a page at a time for the sake of kmap.
1149          */
1150         unsigned long maddr;
1151         size_t ubytes, mbytes;
1152         int result;
1153         unsigned char __user *buf = NULL;
1154         unsigned char *kbuf = NULL;
1155
1156         result = 0;
1157         if (image->file_mode)
1158                 kbuf = segment->kbuf;
1159         else
1160                 buf = segment->buf;
1161         ubytes = segment->bufsz;
1162         mbytes = segment->memsz;
1163         maddr = segment->mem;
1164         while (mbytes) {
1165                 struct page *page;
1166                 char *ptr;
1167                 size_t uchunk, mchunk;
1168
1169                 page = pfn_to_page(maddr >> PAGE_SHIFT);
1170                 if (!page) {
1171                         result  = -ENOMEM;
1172                         goto out;
1173                 }
1174                 ptr = kmap(page);
1175                 ptr += maddr & ~PAGE_MASK;
1176                 mchunk = min_t(size_t, mbytes,
1177                                 PAGE_SIZE - (maddr & ~PAGE_MASK));
1178                 uchunk = min(ubytes, mchunk);
1179                 if (mchunk > uchunk) {
1180                         /* Zero the trailing part of the page */
1181                         memset(ptr + uchunk, 0, mchunk - uchunk);
1182                 }
1183
1184                 /* For file based kexec, source pages are in kernel memory */
1185                 if (image->file_mode)
1186                         memcpy(ptr, kbuf, uchunk);
1187                 else
1188                         result = copy_from_user(ptr, buf, uchunk);
1189                 kexec_flush_icache_page(page);
1190                 kunmap(page);
1191                 if (result) {
1192                         result = -EFAULT;
1193                         goto out;
1194                 }
1195                 ubytes -= uchunk;
1196                 maddr  += mchunk;
1197                 if (image->file_mode)
1198                         kbuf += mchunk;
1199                 else
1200                         buf += mchunk;
1201                 mbytes -= mchunk;
1202         }
1203 out:
1204         return result;
1205 }
1206
1207 static int kimage_load_segment(struct kimage *image,
1208                                 struct kexec_segment *segment)
1209 {
1210         int result = -ENOMEM;
1211
1212         switch (image->type) {
1213         case KEXEC_TYPE_DEFAULT:
1214                 result = kimage_load_normal_segment(image, segment);
1215                 break;
1216         case KEXEC_TYPE_CRASH:
1217                 result = kimage_load_crash_segment(image, segment);
1218                 break;
1219         }
1220
1221         return result;
1222 }
1223
1224 /*
1225  * Exec Kernel system call: for obvious reasons only root may call it.
1226  *
1227  * This call breaks up into three pieces.
1228  * - A generic part which loads the new kernel from the current
1229  *   address space, and very carefully places the data in the
1230  *   allocated pages.
1231  *
1232  * - A generic part that interacts with the kernel and tells all of
1233  *   the devices to shut down.  Preventing on-going dmas, and placing
1234  *   the devices in a consistent state so a later kernel can
1235  *   reinitialize them.
1236  *
1237  * - A machine specific part that includes the syscall number
1238  *   and then copies the image to it's final destination.  And
1239  *   jumps into the image at entry.
1240  *
1241  * kexec does not sync, or unmount filesystems so if you need
1242  * that to happen you need to do that yourself.
1243  */
1244 struct kimage *kexec_image;
1245 struct kimage *kexec_crash_image;
1246 int kexec_load_disabled;
1247
1248 static DEFINE_MUTEX(kexec_mutex);
1249
1250 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
1251                 struct kexec_segment __user *, segments, unsigned long, flags)
1252 {
1253         struct kimage **dest_image, *image;
1254         int result;
1255
1256         /* We only trust the superuser with rebooting the system. */
1257         if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1258                 return -EPERM;
1259
1260         /*
1261          * Verify we have a legal set of flags
1262          * This leaves us room for future extensions.
1263          */
1264         if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
1265                 return -EINVAL;
1266
1267         /* Verify we are on the appropriate architecture */
1268         if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
1269                 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
1270                 return -EINVAL;
1271
1272         /* Put an artificial cap on the number
1273          * of segments passed to kexec_load.
1274          */
1275         if (nr_segments > KEXEC_SEGMENT_MAX)
1276                 return -EINVAL;
1277
1278         image = NULL;
1279         result = 0;
1280
1281         /* Because we write directly to the reserved memory
1282          * region when loading crash kernels we need a mutex here to
1283          * prevent multiple crash  kernels from attempting to load
1284          * simultaneously, and to prevent a crash kernel from loading
1285          * over the top of a in use crash kernel.
1286          *
1287          * KISS: always take the mutex.
1288          */
1289         if (!mutex_trylock(&kexec_mutex))
1290                 return -EBUSY;
1291
1292         dest_image = &kexec_image;
1293         if (flags & KEXEC_ON_CRASH)
1294                 dest_image = &kexec_crash_image;
1295         if (nr_segments > 0) {
1296                 unsigned long i;
1297
1298                 if (flags & KEXEC_ON_CRASH) {
1299                         /*
1300                          * Loading another kernel to switch to if this one
1301                          * crashes.  Free any current crash dump kernel before
1302                          * we corrupt it.
1303                          */
1304
1305                         kimage_free(xchg(&kexec_crash_image, NULL));
1306                         result = kimage_alloc_init(&image, entry, nr_segments,
1307                                                    segments, flags);
1308                         crash_map_reserved_pages();
1309                 } else {
1310                         /* Loading another kernel to reboot into. */
1311
1312                         result = kimage_alloc_init(&image, entry, nr_segments,
1313                                                    segments, flags);
1314                 }
1315                 if (result)
1316                         goto out;
1317
1318                 if (flags & KEXEC_PRESERVE_CONTEXT)
1319                         image->preserve_context = 1;
1320                 result = machine_kexec_prepare(image);
1321                 if (result)
1322                         goto out;
1323
1324                 for (i = 0; i < nr_segments; i++) {
1325                         result = kimage_load_segment(image, &image->segment[i]);
1326                         if (result)
1327                                 goto out;
1328                 }
1329                 kimage_terminate(image);
1330                 if (flags & KEXEC_ON_CRASH)
1331                         crash_unmap_reserved_pages();
1332         }
1333         /* Install the new kernel, and  Uninstall the old */
1334         image = xchg(dest_image, image);
1335
1336 out:
1337         mutex_unlock(&kexec_mutex);
1338         kimage_free(image);
1339
1340         return result;
1341 }
1342
1343 /*
1344  * Add and remove page tables for crashkernel memory
1345  *
1346  * Provide an empty default implementation here -- architecture
1347  * code may override this
1348  */
1349 void __weak crash_map_reserved_pages(void)
1350 {}
1351
1352 void __weak crash_unmap_reserved_pages(void)
1353 {}
1354
1355 #ifdef CONFIG_COMPAT
1356 COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1357                        compat_ulong_t, nr_segments,
1358                        struct compat_kexec_segment __user *, segments,
1359                        compat_ulong_t, flags)
1360 {
1361         struct compat_kexec_segment in;
1362         struct kexec_segment out, __user *ksegments;
1363         unsigned long i, result;
1364
1365         /* Don't allow clients that don't understand the native
1366          * architecture to do anything.
1367          */
1368         if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1369                 return -EINVAL;
1370
1371         if (nr_segments > KEXEC_SEGMENT_MAX)
1372                 return -EINVAL;
1373
1374         ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1375         for (i = 0; i < nr_segments; i++) {
1376                 result = copy_from_user(&in, &segments[i], sizeof(in));
1377                 if (result)
1378                         return -EFAULT;
1379
1380                 out.buf   = compat_ptr(in.buf);
1381                 out.bufsz = in.bufsz;
1382                 out.mem   = in.mem;
1383                 out.memsz = in.memsz;
1384
1385                 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1386                 if (result)
1387                         return -EFAULT;
1388         }
1389
1390         return sys_kexec_load(entry, nr_segments, ksegments, flags);
1391 }
1392 #endif
1393
1394 #ifdef CONFIG_KEXEC_FILE
1395 SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
1396                 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
1397                 unsigned long, flags)
1398 {
1399         int ret = 0, i;
1400         struct kimage **dest_image, *image;
1401
1402         /* We only trust the superuser with rebooting the system. */
1403         if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1404                 return -EPERM;
1405
1406         /* Make sure we have a legal set of flags */
1407         if (flags != (flags & KEXEC_FILE_FLAGS))
1408                 return -EINVAL;
1409
1410         image = NULL;
1411
1412         if (!mutex_trylock(&kexec_mutex))
1413                 return -EBUSY;
1414
1415         dest_image = &kexec_image;
1416         if (flags & KEXEC_FILE_ON_CRASH)
1417                 dest_image = &kexec_crash_image;
1418
1419         if (flags & KEXEC_FILE_UNLOAD)
1420                 goto exchange;
1421
1422         /*
1423          * In case of crash, new kernel gets loaded in reserved region. It is
1424          * same memory where old crash kernel might be loaded. Free any
1425          * current crash dump kernel before we corrupt it.
1426          */
1427         if (flags & KEXEC_FILE_ON_CRASH)
1428                 kimage_free(xchg(&kexec_crash_image, NULL));
1429
1430         ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
1431                                      cmdline_len, flags);
1432         if (ret)
1433                 goto out;
1434
1435         ret = machine_kexec_prepare(image);
1436         if (ret)
1437                 goto out;
1438
1439         ret = kexec_calculate_store_digests(image);
1440         if (ret)
1441                 goto out;
1442
1443         for (i = 0; i < image->nr_segments; i++) {
1444                 struct kexec_segment *ksegment;
1445
1446                 ksegment = &image->segment[i];
1447                 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
1448                          i, ksegment->buf, ksegment->bufsz, ksegment->mem,
1449                          ksegment->memsz);
1450
1451                 ret = kimage_load_segment(image, &image->segment[i]);
1452                 if (ret)
1453                         goto out;
1454         }
1455
1456         kimage_terminate(image);
1457
1458         /*
1459          * Free up any temporary buffers allocated which are not needed
1460          * after image has been loaded
1461          */
1462         kimage_file_post_load_cleanup(image);
1463 exchange:
1464         image = xchg(dest_image, image);
1465 out:
1466         mutex_unlock(&kexec_mutex);
1467         kimage_free(image);
1468         return ret;
1469 }
1470
1471 #endif /* CONFIG_KEXEC_FILE */
1472
1473 void crash_kexec(struct pt_regs *regs)
1474 {
1475         /* Take the kexec_mutex here to prevent sys_kexec_load
1476          * running on one cpu from replacing the crash kernel
1477          * we are using after a panic on a different cpu.
1478          *
1479          * If the crash kernel was not located in a fixed area
1480          * of memory the xchg(&kexec_crash_image) would be
1481          * sufficient.  But since I reuse the memory...
1482          */
1483         if (mutex_trylock(&kexec_mutex)) {
1484                 if (kexec_crash_image) {
1485                         struct pt_regs fixed_regs;
1486
1487                         crash_setup_regs(&fixed_regs, regs);
1488                         crash_save_vmcoreinfo();
1489                         machine_crash_shutdown(&fixed_regs);
1490                         machine_kexec(kexec_crash_image);
1491                 }
1492                 mutex_unlock(&kexec_mutex);
1493         }
1494 }
1495
1496 size_t crash_get_memory_size(void)
1497 {
1498         size_t size = 0;
1499         mutex_lock(&kexec_mutex);
1500         if (crashk_res.end != crashk_res.start)
1501                 size = resource_size(&crashk_res);
1502         mutex_unlock(&kexec_mutex);
1503         return size;
1504 }
1505
1506 void __weak crash_free_reserved_phys_range(unsigned long begin,
1507                                            unsigned long end)
1508 {
1509         unsigned long addr;
1510
1511         for (addr = begin; addr < end; addr += PAGE_SIZE)
1512                 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1513 }
1514
1515 int crash_shrink_memory(unsigned long new_size)
1516 {
1517         int ret = 0;
1518         unsigned long start, end;
1519         unsigned long old_size;
1520         struct resource *ram_res;
1521
1522         mutex_lock(&kexec_mutex);
1523
1524         if (kexec_crash_image) {
1525                 ret = -ENOENT;
1526                 goto unlock;
1527         }
1528         start = crashk_res.start;
1529         end = crashk_res.end;
1530         old_size = (end == 0) ? 0 : end - start + 1;
1531         if (new_size >= old_size) {
1532                 ret = (new_size == old_size) ? 0 : -EINVAL;
1533                 goto unlock;
1534         }
1535
1536         ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1537         if (!ram_res) {
1538                 ret = -ENOMEM;
1539                 goto unlock;
1540         }
1541
1542         start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1543         end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1544
1545         crash_map_reserved_pages();
1546         crash_free_reserved_phys_range(end, crashk_res.end);
1547
1548         if ((start == end) && (crashk_res.parent != NULL))
1549                 release_resource(&crashk_res);
1550
1551         ram_res->start = end;
1552         ram_res->end = crashk_res.end;
1553         ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1554         ram_res->name = "System RAM";
1555
1556         crashk_res.end = end - 1;
1557
1558         insert_resource(&iomem_resource, ram_res);
1559         crash_unmap_reserved_pages();
1560
1561 unlock:
1562         mutex_unlock(&kexec_mutex);
1563         return ret;
1564 }
1565
1566 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1567                             size_t data_len)
1568 {
1569         struct elf_note note;
1570
1571         note.n_namesz = strlen(name) + 1;
1572         note.n_descsz = data_len;
1573         note.n_type   = type;
1574         memcpy(buf, &note, sizeof(note));
1575         buf += (sizeof(note) + 3)/4;
1576         memcpy(buf, name, note.n_namesz);
1577         buf += (note.n_namesz + 3)/4;
1578         memcpy(buf, data, note.n_descsz);
1579         buf += (note.n_descsz + 3)/4;
1580
1581         return buf;
1582 }
1583
1584 static void final_note(u32 *buf)
1585 {
1586         struct elf_note note;
1587
1588         note.n_namesz = 0;
1589         note.n_descsz = 0;
1590         note.n_type   = 0;
1591         memcpy(buf, &note, sizeof(note));
1592 }
1593
1594 void crash_save_cpu(struct pt_regs *regs, int cpu)
1595 {
1596         struct elf_prstatus prstatus;
1597         u32 *buf;
1598
1599         if ((cpu < 0) || (cpu >= nr_cpu_ids))
1600                 return;
1601
1602         /* Using ELF notes here is opportunistic.
1603          * I need a well defined structure format
1604          * for the data I pass, and I need tags
1605          * on the data to indicate what information I have
1606          * squirrelled away.  ELF notes happen to provide
1607          * all of that, so there is no need to invent something new.
1608          */
1609         buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1610         if (!buf)
1611                 return;
1612         memset(&prstatus, 0, sizeof(prstatus));
1613         prstatus.pr_pid = current->pid;
1614         elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1615         buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1616                               &prstatus, sizeof(prstatus));
1617         final_note(buf);
1618 }
1619
1620 static int __init crash_notes_memory_init(void)
1621 {
1622         /* Allocate memory for saving cpu registers. */
1623         crash_notes = alloc_percpu(note_buf_t);
1624         if (!crash_notes) {
1625                 pr_warn("Kexec: Memory allocation for saving cpu register states failed\n");
1626                 return -ENOMEM;
1627         }
1628         return 0;
1629 }
1630 subsys_initcall(crash_notes_memory_init);
1631
1632
1633 /*
1634  * parsing the "crashkernel" commandline
1635  *
1636  * this code is intended to be called from architecture specific code
1637  */
1638
1639
1640 /*
1641  * This function parses command lines in the format
1642  *
1643  *   crashkernel=ramsize-range:size[,...][@offset]
1644  *
1645  * The function returns 0 on success and -EINVAL on failure.
1646  */
1647 static int __init parse_crashkernel_mem(char *cmdline,
1648                                         unsigned long long system_ram,
1649                                         unsigned long long *crash_size,
1650                                         unsigned long long *crash_base)
1651 {
1652         char *cur = cmdline, *tmp;
1653
1654         /* for each entry of the comma-separated list */
1655         do {
1656                 unsigned long long start, end = ULLONG_MAX, size;
1657
1658                 /* get the start of the range */
1659                 start = memparse(cur, &tmp);
1660                 if (cur == tmp) {
1661                         pr_warn("crashkernel: Memory value expected\n");
1662                         return -EINVAL;
1663                 }
1664                 cur = tmp;
1665                 if (*cur != '-') {
1666                         pr_warn("crashkernel: '-' expected\n");
1667                         return -EINVAL;
1668                 }
1669                 cur++;
1670
1671                 /* if no ':' is here, than we read the end */
1672                 if (*cur != ':') {
1673                         end = memparse(cur, &tmp);
1674                         if (cur == tmp) {
1675                                 pr_warn("crashkernel: Memory value expected\n");
1676                                 return -EINVAL;
1677                         }
1678                         cur = tmp;
1679                         if (end <= start) {
1680                                 pr_warn("crashkernel: end <= start\n");
1681                                 return -EINVAL;
1682                         }
1683                 }
1684
1685                 if (*cur != ':') {
1686                         pr_warn("crashkernel: ':' expected\n");
1687                         return -EINVAL;
1688                 }
1689                 cur++;
1690
1691                 size = memparse(cur, &tmp);
1692                 if (cur == tmp) {
1693                         pr_warn("Memory value expected\n");
1694                         return -EINVAL;
1695                 }
1696                 cur = tmp;
1697                 if (size >= system_ram) {
1698                         pr_warn("crashkernel: invalid size\n");
1699                         return -EINVAL;
1700                 }
1701
1702                 /* match ? */
1703                 if (system_ram >= start && system_ram < end) {
1704                         *crash_size = size;
1705                         break;
1706                 }
1707         } while (*cur++ == ',');
1708
1709         if (*crash_size > 0) {
1710                 while (*cur && *cur != ' ' && *cur != '@')
1711                         cur++;
1712                 if (*cur == '@') {
1713                         cur++;
1714                         *crash_base = memparse(cur, &tmp);
1715                         if (cur == tmp) {
1716                                 pr_warn("Memory value expected after '@'\n");
1717                                 return -EINVAL;
1718                         }
1719                 }
1720         }
1721
1722         return 0;
1723 }
1724
1725 /*
1726  * That function parses "simple" (old) crashkernel command lines like
1727  *
1728  *      crashkernel=size[@offset]
1729  *
1730  * It returns 0 on success and -EINVAL on failure.
1731  */
1732 static int __init parse_crashkernel_simple(char *cmdline,
1733                                            unsigned long long *crash_size,
1734                                            unsigned long long *crash_base)
1735 {
1736         char *cur = cmdline;
1737
1738         *crash_size = memparse(cmdline, &cur);
1739         if (cmdline == cur) {
1740                 pr_warn("crashkernel: memory value expected\n");
1741                 return -EINVAL;
1742         }
1743
1744         if (*cur == '@')
1745                 *crash_base = memparse(cur+1, &cur);
1746         else if (*cur != ' ' && *cur != '\0') {
1747                 pr_warn("crashkernel: unrecognized char\n");
1748                 return -EINVAL;
1749         }
1750
1751         return 0;
1752 }
1753
1754 #define SUFFIX_HIGH 0
1755 #define SUFFIX_LOW  1
1756 #define SUFFIX_NULL 2
1757 static __initdata char *suffix_tbl[] = {
1758         [SUFFIX_HIGH] = ",high",
1759         [SUFFIX_LOW]  = ",low",
1760         [SUFFIX_NULL] = NULL,
1761 };
1762
1763 /*
1764  * That function parses "suffix"  crashkernel command lines like
1765  *
1766  *      crashkernel=size,[high|low]
1767  *
1768  * It returns 0 on success and -EINVAL on failure.
1769  */
1770 static int __init parse_crashkernel_suffix(char *cmdline,
1771                                            unsigned long long   *crash_size,
1772                                            const char *suffix)
1773 {
1774         char *cur = cmdline;
1775
1776         *crash_size = memparse(cmdline, &cur);
1777         if (cmdline == cur) {
1778                 pr_warn("crashkernel: memory value expected\n");
1779                 return -EINVAL;
1780         }
1781
1782         /* check with suffix */
1783         if (strncmp(cur, suffix, strlen(suffix))) {
1784                 pr_warn("crashkernel: unrecognized char\n");
1785                 return -EINVAL;
1786         }
1787         cur += strlen(suffix);
1788         if (*cur != ' ' && *cur != '\0') {
1789                 pr_warn("crashkernel: unrecognized char\n");
1790                 return -EINVAL;
1791         }
1792
1793         return 0;
1794 }
1795
1796 static __init char *get_last_crashkernel(char *cmdline,
1797                              const char *name,
1798                              const char *suffix)
1799 {
1800         char *p = cmdline, *ck_cmdline = NULL;
1801
1802         /* find crashkernel and use the last one if there are more */
1803         p = strstr(p, name);
1804         while (p) {
1805                 char *end_p = strchr(p, ' ');
1806                 char *q;
1807
1808                 if (!end_p)
1809                         end_p = p + strlen(p);
1810
1811                 if (!suffix) {
1812                         int i;
1813
1814                         /* skip the one with any known suffix */
1815                         for (i = 0; suffix_tbl[i]; i++) {
1816                                 q = end_p - strlen(suffix_tbl[i]);
1817                                 if (!strncmp(q, suffix_tbl[i],
1818                                              strlen(suffix_tbl[i])))
1819                                         goto next;
1820                         }
1821                         ck_cmdline = p;
1822                 } else {
1823                         q = end_p - strlen(suffix);
1824                         if (!strncmp(q, suffix, strlen(suffix)))
1825                                 ck_cmdline = p;
1826                 }
1827 next:
1828                 p = strstr(p+1, name);
1829         }
1830
1831         if (!ck_cmdline)
1832                 return NULL;
1833
1834         return ck_cmdline;
1835 }
1836
1837 static int __init __parse_crashkernel(char *cmdline,
1838                              unsigned long long system_ram,
1839                              unsigned long long *crash_size,
1840                              unsigned long long *crash_base,
1841                              const char *name,
1842                              const char *suffix)
1843 {
1844         char    *first_colon, *first_space;
1845         char    *ck_cmdline;
1846
1847         BUG_ON(!crash_size || !crash_base);
1848         *crash_size = 0;
1849         *crash_base = 0;
1850
1851         ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1852
1853         if (!ck_cmdline)
1854                 return -EINVAL;
1855
1856         ck_cmdline += strlen(name);
1857
1858         if (suffix)
1859                 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1860                                 suffix);
1861         /*
1862          * if the commandline contains a ':', then that's the extended
1863          * syntax -- if not, it must be the classic syntax
1864          */
1865         first_colon = strchr(ck_cmdline, ':');
1866         first_space = strchr(ck_cmdline, ' ');
1867         if (first_colon && (!first_space || first_colon < first_space))
1868                 return parse_crashkernel_mem(ck_cmdline, system_ram,
1869                                 crash_size, crash_base);
1870
1871         return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1872 }
1873
1874 /*
1875  * That function is the entry point for command line parsing and should be
1876  * called from the arch-specific code.
1877  */
1878 int __init parse_crashkernel(char *cmdline,
1879                              unsigned long long system_ram,
1880                              unsigned long long *crash_size,
1881                              unsigned long long *crash_base)
1882 {
1883         return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1884                                         "crashkernel=", NULL);
1885 }
1886
1887 int __init parse_crashkernel_high(char *cmdline,
1888                              unsigned long long system_ram,
1889                              unsigned long long *crash_size,
1890                              unsigned long long *crash_base)
1891 {
1892         return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1893                                 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1894 }
1895
1896 int __init parse_crashkernel_low(char *cmdline,
1897                              unsigned long long system_ram,
1898                              unsigned long long *crash_size,
1899                              unsigned long long *crash_base)
1900 {
1901         return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1902                                 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1903 }
1904
1905 static void update_vmcoreinfo_note(void)
1906 {
1907         u32 *buf = vmcoreinfo_note;
1908
1909         if (!vmcoreinfo_size)
1910                 return;
1911         buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1912                               vmcoreinfo_size);
1913         final_note(buf);
1914 }
1915
1916 void crash_save_vmcoreinfo(void)
1917 {
1918         vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1919         update_vmcoreinfo_note();
1920 }
1921
1922 void vmcoreinfo_append_str(const char *fmt, ...)
1923 {
1924         va_list args;
1925         char buf[0x50];
1926         size_t r;
1927
1928         va_start(args, fmt);
1929         r = vscnprintf(buf, sizeof(buf), fmt, args);
1930         va_end(args);
1931
1932         r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1933
1934         memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1935
1936         vmcoreinfo_size += r;
1937 }
1938
1939 /*
1940  * provide an empty default implementation here -- architecture
1941  * code may override this
1942  */
1943 void __weak arch_crash_save_vmcoreinfo(void)
1944 {}
1945
1946 unsigned long __weak paddr_vmcoreinfo_note(void)
1947 {
1948         return __pa((unsigned long)(char *)&vmcoreinfo_note);
1949 }
1950
1951 static int __init crash_save_vmcoreinfo_init(void)
1952 {
1953         VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1954         VMCOREINFO_PAGESIZE(PAGE_SIZE);
1955
1956         VMCOREINFO_SYMBOL(init_uts_ns);
1957         VMCOREINFO_SYMBOL(node_online_map);
1958 #ifdef CONFIG_MMU
1959         VMCOREINFO_SYMBOL(swapper_pg_dir);
1960 #endif
1961         VMCOREINFO_SYMBOL(_stext);
1962         VMCOREINFO_SYMBOL(vmap_area_list);
1963
1964 #ifndef CONFIG_NEED_MULTIPLE_NODES
1965         VMCOREINFO_SYMBOL(mem_map);
1966         VMCOREINFO_SYMBOL(contig_page_data);
1967 #endif
1968 #ifdef CONFIG_SPARSEMEM
1969         VMCOREINFO_SYMBOL(mem_section);
1970         VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1971         VMCOREINFO_STRUCT_SIZE(mem_section);
1972         VMCOREINFO_OFFSET(mem_section, section_mem_map);
1973 #endif
1974         VMCOREINFO_STRUCT_SIZE(page);
1975         VMCOREINFO_STRUCT_SIZE(pglist_data);
1976         VMCOREINFO_STRUCT_SIZE(zone);
1977         VMCOREINFO_STRUCT_SIZE(free_area);
1978         VMCOREINFO_STRUCT_SIZE(list_head);
1979         VMCOREINFO_SIZE(nodemask_t);
1980         VMCOREINFO_OFFSET(page, flags);
1981         VMCOREINFO_OFFSET(page, _count);
1982         VMCOREINFO_OFFSET(page, mapping);
1983         VMCOREINFO_OFFSET(page, lru);
1984         VMCOREINFO_OFFSET(page, _mapcount);
1985         VMCOREINFO_OFFSET(page, private);
1986         VMCOREINFO_OFFSET(pglist_data, node_zones);
1987         VMCOREINFO_OFFSET(pglist_data, nr_zones);
1988 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1989         VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1990 #endif
1991         VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1992         VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1993         VMCOREINFO_OFFSET(pglist_data, node_id);
1994         VMCOREINFO_OFFSET(zone, free_area);
1995         VMCOREINFO_OFFSET(zone, vm_stat);
1996         VMCOREINFO_OFFSET(zone, spanned_pages);
1997         VMCOREINFO_OFFSET(free_area, free_list);
1998         VMCOREINFO_OFFSET(list_head, next);
1999         VMCOREINFO_OFFSET(list_head, prev);
2000         VMCOREINFO_OFFSET(vmap_area, va_start);
2001         VMCOREINFO_OFFSET(vmap_area, list);
2002         VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
2003         log_buf_kexec_setup();
2004         VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
2005         VMCOREINFO_NUMBER(NR_FREE_PAGES);
2006         VMCOREINFO_NUMBER(PG_lru);
2007         VMCOREINFO_NUMBER(PG_private);
2008         VMCOREINFO_NUMBER(PG_swapcache);
2009         VMCOREINFO_NUMBER(PG_slab);
2010 #ifdef CONFIG_MEMORY_FAILURE
2011         VMCOREINFO_NUMBER(PG_hwpoison);
2012 #endif
2013         VMCOREINFO_NUMBER(PG_head_mask);
2014         VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
2015 #ifdef CONFIG_HUGETLBFS
2016         VMCOREINFO_SYMBOL(free_huge_page);
2017 #endif
2018
2019         arch_crash_save_vmcoreinfo();
2020         update_vmcoreinfo_note();
2021
2022         return 0;
2023 }
2024
2025 subsys_initcall(crash_save_vmcoreinfo_init);
2026
2027 #ifdef CONFIG_KEXEC_FILE
2028 static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
2029                                     struct kexec_buf *kbuf)
2030 {
2031         struct kimage *image = kbuf->image;
2032         unsigned long temp_start, temp_end;
2033
2034         temp_end = min(end, kbuf->buf_max);
2035         temp_start = temp_end - kbuf->memsz;
2036
2037         do {
2038                 /* align down start */
2039                 temp_start = temp_start & (~(kbuf->buf_align - 1));
2040
2041                 if (temp_start < start || temp_start < kbuf->buf_min)
2042                         return 0;
2043
2044                 temp_end = temp_start + kbuf->memsz - 1;
2045
2046                 /*
2047                  * Make sure this does not conflict with any of existing
2048                  * segments
2049                  */
2050                 if (kimage_is_destination_range(image, temp_start, temp_end)) {
2051                         temp_start = temp_start - PAGE_SIZE;
2052                         continue;
2053                 }
2054
2055                 /* We found a suitable memory range */
2056                 break;
2057         } while (1);
2058
2059         /* If we are here, we found a suitable memory range */
2060         kbuf->mem = temp_start;
2061
2062         /* Success, stop navigating through remaining System RAM ranges */
2063         return 1;
2064 }
2065
2066 static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
2067                                      struct kexec_buf *kbuf)
2068 {
2069         struct kimage *image = kbuf->image;
2070         unsigned long temp_start, temp_end;
2071
2072         temp_start = max(start, kbuf->buf_min);
2073
2074         do {
2075                 temp_start = ALIGN(temp_start, kbuf->buf_align);
2076                 temp_end = temp_start + kbuf->memsz - 1;
2077
2078                 if (temp_end > end || temp_end > kbuf->buf_max)
2079                         return 0;
2080                 /*
2081                  * Make sure this does not conflict with any of existing
2082                  * segments
2083                  */
2084                 if (kimage_is_destination_range(image, temp_start, temp_end)) {
2085                         temp_start = temp_start + PAGE_SIZE;
2086                         continue;
2087                 }
2088
2089                 /* We found a suitable memory range */
2090                 break;
2091         } while (1);
2092
2093         /* If we are here, we found a suitable memory range */
2094         kbuf->mem = temp_start;
2095
2096         /* Success, stop navigating through remaining System RAM ranges */
2097         return 1;
2098 }
2099
2100 static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
2101 {
2102         struct kexec_buf *kbuf = (struct kexec_buf *)arg;
2103         unsigned long sz = end - start + 1;
2104
2105         /* Returning 0 will take to next memory range */
2106         if (sz < kbuf->memsz)
2107                 return 0;
2108
2109         if (end < kbuf->buf_min || start > kbuf->buf_max)
2110                 return 0;
2111
2112         /*
2113          * Allocate memory top down with-in ram range. Otherwise bottom up
2114          * allocation.
2115          */
2116         if (kbuf->top_down)
2117                 return locate_mem_hole_top_down(start, end, kbuf);
2118         return locate_mem_hole_bottom_up(start, end, kbuf);
2119 }
2120
2121 /*
2122  * Helper function for placing a buffer in a kexec segment. This assumes
2123  * that kexec_mutex is held.
2124  */
2125 int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
2126                      unsigned long memsz, unsigned long buf_align,
2127                      unsigned long buf_min, unsigned long buf_max,
2128                      bool top_down, unsigned long *load_addr)
2129 {
2130
2131         struct kexec_segment *ksegment;
2132         struct kexec_buf buf, *kbuf;
2133         int ret;
2134
2135         /* Currently adding segment this way is allowed only in file mode */
2136         if (!image->file_mode)
2137                 return -EINVAL;
2138
2139         if (image->nr_segments >= KEXEC_SEGMENT_MAX)
2140                 return -EINVAL;
2141
2142         /*
2143          * Make sure we are not trying to add buffer after allocating
2144          * control pages. All segments need to be placed first before
2145          * any control pages are allocated. As control page allocation
2146          * logic goes through list of segments to make sure there are
2147          * no destination overlaps.
2148          */
2149         if (!list_empty(&image->control_pages)) {
2150                 WARN_ON(1);
2151                 return -EINVAL;
2152         }
2153
2154         memset(&buf, 0, sizeof(struct kexec_buf));
2155         kbuf = &buf;
2156         kbuf->image = image;
2157         kbuf->buffer = buffer;
2158         kbuf->bufsz = bufsz;
2159
2160         kbuf->memsz = ALIGN(memsz, PAGE_SIZE);
2161         kbuf->buf_align = max(buf_align, PAGE_SIZE);
2162         kbuf->buf_min = buf_min;
2163         kbuf->buf_max = buf_max;
2164         kbuf->top_down = top_down;
2165
2166         /* Walk the RAM ranges and allocate a suitable range for the buffer */
2167         if (image->type == KEXEC_TYPE_CRASH)
2168                 ret = walk_iomem_res("Crash kernel",
2169                                      IORESOURCE_MEM | IORESOURCE_BUSY,
2170                                      crashk_res.start, crashk_res.end, kbuf,
2171                                      locate_mem_hole_callback);
2172         else
2173                 ret = walk_system_ram_res(0, -1, kbuf,
2174                                           locate_mem_hole_callback);
2175         if (ret != 1) {
2176                 /* A suitable memory range could not be found for buffer */
2177                 return -EADDRNOTAVAIL;
2178         }
2179
2180         /* Found a suitable memory range */
2181         ksegment = &image->segment[image->nr_segments];
2182         ksegment->kbuf = kbuf->buffer;
2183         ksegment->bufsz = kbuf->bufsz;
2184         ksegment->mem = kbuf->mem;
2185         ksegment->memsz = kbuf->memsz;
2186         image->nr_segments++;
2187         *load_addr = ksegment->mem;
2188         return 0;
2189 }
2190
2191 /* Calculate and store the digest of segments */
2192 static int kexec_calculate_store_digests(struct kimage *image)
2193 {
2194         struct crypto_shash *tfm;
2195         struct shash_desc *desc;
2196         int ret = 0, i, j, zero_buf_sz, sha_region_sz;
2197         size_t desc_size, nullsz;
2198         char *digest;
2199         void *zero_buf;
2200         struct kexec_sha_region *sha_regions;
2201         struct purgatory_info *pi = &image->purgatory_info;
2202
2203         zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
2204         zero_buf_sz = PAGE_SIZE;
2205
2206         tfm = crypto_alloc_shash("sha256", 0, 0);
2207         if (IS_ERR(tfm)) {
2208                 ret = PTR_ERR(tfm);
2209                 goto out;
2210         }
2211
2212         desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
2213         desc = kzalloc(desc_size, GFP_KERNEL);
2214         if (!desc) {
2215                 ret = -ENOMEM;
2216                 goto out_free_tfm;
2217         }
2218
2219         sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
2220         sha_regions = vzalloc(sha_region_sz);
2221         if (!sha_regions)
2222                 goto out_free_desc;
2223
2224         desc->tfm   = tfm;
2225         desc->flags = 0;
2226
2227         ret = crypto_shash_init(desc);
2228         if (ret < 0)
2229                 goto out_free_sha_regions;
2230
2231         digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
2232         if (!digest) {
2233                 ret = -ENOMEM;
2234                 goto out_free_sha_regions;
2235         }
2236
2237         for (j = i = 0; i < image->nr_segments; i++) {
2238                 struct kexec_segment *ksegment;
2239
2240                 ksegment = &image->segment[i];
2241                 /*
2242                  * Skip purgatory as it will be modified once we put digest
2243                  * info in purgatory.
2244                  */
2245                 if (ksegment->kbuf == pi->purgatory_buf)
2246                         continue;
2247
2248                 ret = crypto_shash_update(desc, ksegment->kbuf,
2249                                           ksegment->bufsz);
2250                 if (ret)
2251                         break;
2252
2253                 /*
2254                  * Assume rest of the buffer is filled with zero and
2255                  * update digest accordingly.
2256                  */
2257                 nullsz = ksegment->memsz - ksegment->bufsz;
2258                 while (nullsz) {
2259                         unsigned long bytes = nullsz;
2260
2261                         if (bytes > zero_buf_sz)
2262                                 bytes = zero_buf_sz;
2263                         ret = crypto_shash_update(desc, zero_buf, bytes);
2264                         if (ret)
2265                                 break;
2266                         nullsz -= bytes;
2267                 }
2268
2269                 if (ret)
2270                         break;
2271
2272                 sha_regions[j].start = ksegment->mem;
2273                 sha_regions[j].len = ksegment->memsz;
2274                 j++;
2275         }
2276
2277         if (!ret) {
2278                 ret = crypto_shash_final(desc, digest);
2279                 if (ret)
2280                         goto out_free_digest;
2281                 ret = kexec_purgatory_get_set_symbol(image, "sha_regions",
2282                                                 sha_regions, sha_region_sz, 0);
2283                 if (ret)
2284                         goto out_free_digest;
2285
2286                 ret = kexec_purgatory_get_set_symbol(image, "sha256_digest",
2287                                                 digest, SHA256_DIGEST_SIZE, 0);
2288                 if (ret)
2289                         goto out_free_digest;
2290         }
2291
2292 out_free_digest:
2293         kfree(digest);
2294 out_free_sha_regions:
2295         vfree(sha_regions);
2296 out_free_desc:
2297         kfree(desc);
2298 out_free_tfm:
2299         kfree(tfm);
2300 out:
2301         return ret;
2302 }
2303
2304 /* Actually load purgatory. Lot of code taken from kexec-tools */
2305 static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
2306                                   unsigned long max, int top_down)
2307 {
2308         struct purgatory_info *pi = &image->purgatory_info;
2309         unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad;
2310         unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset;
2311         unsigned char *buf_addr, *src;
2312         int i, ret = 0, entry_sidx = -1;
2313         const Elf_Shdr *sechdrs_c;
2314         Elf_Shdr *sechdrs = NULL;
2315         void *purgatory_buf = NULL;
2316
2317         /*
2318          * sechdrs_c points to section headers in purgatory and are read
2319          * only. No modifications allowed.
2320          */
2321         sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
2322
2323         /*
2324          * We can not modify sechdrs_c[] and its fields. It is read only.
2325          * Copy it over to a local copy where one can store some temporary
2326          * data and free it at the end. We need to modify ->sh_addr and
2327          * ->sh_offset fields to keep track of permanent and temporary
2328          * locations of sections.
2329          */
2330         sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
2331         if (!sechdrs)
2332                 return -ENOMEM;
2333
2334         memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
2335
2336         /*
2337          * We seem to have multiple copies of sections. First copy is which
2338          * is embedded in kernel in read only section. Some of these sections
2339          * will be copied to a temporary buffer and relocated. And these
2340          * sections will finally be copied to their final destination at
2341          * segment load time.
2342          *
2343          * Use ->sh_offset to reflect section address in memory. It will
2344          * point to original read only copy if section is not allocatable.
2345          * Otherwise it will point to temporary copy which will be relocated.
2346          *
2347          * Use ->sh_addr to contain final address of the section where it
2348          * will go during execution time.
2349          */
2350         for (i = 0; i < pi->ehdr->e_shnum; i++) {
2351                 if (sechdrs[i].sh_type == SHT_NOBITS)
2352                         continue;
2353
2354                 sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
2355                                                 sechdrs[i].sh_offset;
2356         }
2357
2358         /*
2359          * Identify entry point section and make entry relative to section
2360          * start.
2361          */
2362         entry = pi->ehdr->e_entry;
2363         for (i = 0; i < pi->ehdr->e_shnum; i++) {
2364                 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2365                         continue;
2366
2367                 if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
2368                         continue;
2369
2370                 /* Make entry section relative */
2371                 if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
2372                     ((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
2373                      pi->ehdr->e_entry)) {
2374                         entry_sidx = i;
2375                         entry -= sechdrs[i].sh_addr;
2376                         break;
2377                 }
2378         }
2379
2380         /* Determine how much memory is needed to load relocatable object. */
2381         buf_align = 1;
2382         bss_align = 1;
2383         buf_sz = 0;
2384         bss_sz = 0;
2385
2386         for (i = 0; i < pi->ehdr->e_shnum; i++) {
2387                 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2388                         continue;
2389
2390                 align = sechdrs[i].sh_addralign;
2391                 if (sechdrs[i].sh_type != SHT_NOBITS) {
2392                         if (buf_align < align)
2393                                 buf_align = align;
2394                         buf_sz = ALIGN(buf_sz, align);
2395                         buf_sz += sechdrs[i].sh_size;
2396                 } else {
2397                         /* bss section */
2398                         if (bss_align < align)
2399                                 bss_align = align;
2400                         bss_sz = ALIGN(bss_sz, align);
2401                         bss_sz += sechdrs[i].sh_size;
2402                 }
2403         }
2404
2405         /* Determine the bss padding required to align bss properly */
2406         bss_pad = 0;
2407         if (buf_sz & (bss_align - 1))
2408                 bss_pad = bss_align - (buf_sz & (bss_align - 1));
2409
2410         memsz = buf_sz + bss_pad + bss_sz;
2411
2412         /* Allocate buffer for purgatory */
2413         purgatory_buf = vzalloc(buf_sz);
2414         if (!purgatory_buf) {
2415                 ret = -ENOMEM;
2416                 goto out;
2417         }
2418
2419         if (buf_align < bss_align)
2420                 buf_align = bss_align;
2421
2422         /* Add buffer to segment list */
2423         ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz,
2424                                 buf_align, min, max, top_down,
2425                                 &pi->purgatory_load_addr);
2426         if (ret)
2427                 goto out;
2428
2429         /* Load SHF_ALLOC sections */
2430         buf_addr = purgatory_buf;
2431         load_addr = curr_load_addr = pi->purgatory_load_addr;
2432         bss_addr = load_addr + buf_sz + bss_pad;
2433
2434         for (i = 0; i < pi->ehdr->e_shnum; i++) {
2435                 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
2436                         continue;
2437
2438                 align = sechdrs[i].sh_addralign;
2439                 if (sechdrs[i].sh_type != SHT_NOBITS) {
2440                         curr_load_addr = ALIGN(curr_load_addr, align);
2441                         offset = curr_load_addr - load_addr;
2442                         /* We already modifed ->sh_offset to keep src addr */
2443                         src = (char *) sechdrs[i].sh_offset;
2444                         memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
2445
2446                         /* Store load address and source address of section */
2447                         sechdrs[i].sh_addr = curr_load_addr;
2448
2449                         /*
2450                          * This section got copied to temporary buffer. Update
2451                          * ->sh_offset accordingly.
2452                          */
2453                         sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
2454
2455                         /* Advance to the next address */
2456                         curr_load_addr += sechdrs[i].sh_size;
2457                 } else {
2458                         bss_addr = ALIGN(bss_addr, align);
2459                         sechdrs[i].sh_addr = bss_addr;
2460                         bss_addr += sechdrs[i].sh_size;
2461                 }
2462         }
2463
2464         /* Update entry point based on load address of text section */
2465         if (entry_sidx >= 0)
2466                 entry += sechdrs[entry_sidx].sh_addr;
2467
2468         /* Make kernel jump to purgatory after shutdown */
2469         image->start = entry;
2470
2471         /* Used later to get/set symbol values */
2472         pi->sechdrs = sechdrs;
2473
2474         /*
2475          * Used later to identify which section is purgatory and skip it
2476          * from checksumming.
2477          */
2478         pi->purgatory_buf = purgatory_buf;
2479         return ret;
2480 out:
2481         vfree(sechdrs);
2482         vfree(purgatory_buf);
2483         return ret;
2484 }
2485
2486 static int kexec_apply_relocations(struct kimage *image)
2487 {
2488         int i, ret;
2489         struct purgatory_info *pi = &image->purgatory_info;
2490         Elf_Shdr *sechdrs = pi->sechdrs;
2491
2492         /* Apply relocations */
2493         for (i = 0; i < pi->ehdr->e_shnum; i++) {
2494                 Elf_Shdr *section, *symtab;
2495
2496                 if (sechdrs[i].sh_type != SHT_RELA &&
2497                     sechdrs[i].sh_type != SHT_REL)
2498                         continue;
2499
2500                 /*
2501                  * For section of type SHT_RELA/SHT_REL,
2502                  * ->sh_link contains section header index of associated
2503                  * symbol table. And ->sh_info contains section header
2504                  * index of section to which relocations apply.
2505                  */
2506                 if (sechdrs[i].sh_info >= pi->ehdr->e_shnum ||
2507                     sechdrs[i].sh_link >= pi->ehdr->e_shnum)
2508                         return -ENOEXEC;
2509
2510                 section = &sechdrs[sechdrs[i].sh_info];
2511                 symtab = &sechdrs[sechdrs[i].sh_link];
2512
2513                 if (!(section->sh_flags & SHF_ALLOC))
2514                         continue;
2515
2516                 /*
2517                  * symtab->sh_link contain section header index of associated
2518                  * string table.
2519                  */
2520                 if (symtab->sh_link >= pi->ehdr->e_shnum)
2521                         /* Invalid section number? */
2522                         continue;
2523
2524                 /*
2525                  * Respective architecture needs to provide support for applying
2526                  * relocations of type SHT_RELA/SHT_REL.
2527                  */
2528                 if (sechdrs[i].sh_type == SHT_RELA)
2529                         ret = arch_kexec_apply_relocations_add(pi->ehdr,
2530                                                                sechdrs, i);
2531                 else if (sechdrs[i].sh_type == SHT_REL)
2532                         ret = arch_kexec_apply_relocations(pi->ehdr,
2533                                                            sechdrs, i);
2534                 if (ret)
2535                         return ret;
2536         }
2537
2538         return 0;
2539 }
2540
2541 /* Load relocatable purgatory object and relocate it appropriately */
2542 int kexec_load_purgatory(struct kimage *image, unsigned long min,
2543                          unsigned long max, int top_down,
2544                          unsigned long *load_addr)
2545 {
2546         struct purgatory_info *pi = &image->purgatory_info;
2547         int ret;
2548
2549         if (kexec_purgatory_size <= 0)
2550                 return -EINVAL;
2551
2552         if (kexec_purgatory_size < sizeof(Elf_Ehdr))
2553                 return -ENOEXEC;
2554
2555         pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
2556
2557         if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
2558             || pi->ehdr->e_type != ET_REL
2559             || !elf_check_arch(pi->ehdr)
2560             || pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
2561                 return -ENOEXEC;
2562
2563         if (pi->ehdr->e_shoff >= kexec_purgatory_size
2564             || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
2565             kexec_purgatory_size - pi->ehdr->e_shoff))
2566                 return -ENOEXEC;
2567
2568         ret = __kexec_load_purgatory(image, min, max, top_down);
2569         if (ret)
2570                 return ret;
2571
2572         ret = kexec_apply_relocations(image);
2573         if (ret)
2574                 goto out;
2575
2576         *load_addr = pi->purgatory_load_addr;
2577         return 0;
2578 out:
2579         vfree(pi->sechdrs);
2580         vfree(pi->purgatory_buf);
2581         return ret;
2582 }
2583
2584 static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
2585                                             const char *name)
2586 {
2587         Elf_Sym *syms;
2588         Elf_Shdr *sechdrs;
2589         Elf_Ehdr *ehdr;
2590         int i, k;
2591         const char *strtab;
2592
2593         if (!pi->sechdrs || !pi->ehdr)
2594                 return NULL;
2595
2596         sechdrs = pi->sechdrs;
2597         ehdr = pi->ehdr;
2598
2599         for (i = 0; i < ehdr->e_shnum; i++) {
2600                 if (sechdrs[i].sh_type != SHT_SYMTAB)
2601                         continue;
2602
2603                 if (sechdrs[i].sh_link >= ehdr->e_shnum)
2604                         /* Invalid strtab section number */
2605                         continue;
2606                 strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset;
2607                 syms = (Elf_Sym *)sechdrs[i].sh_offset;
2608
2609                 /* Go through symbols for a match */
2610                 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
2611                         if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
2612                                 continue;
2613
2614                         if (strcmp(strtab + syms[k].st_name, name) != 0)
2615                                 continue;
2616
2617                         if (syms[k].st_shndx == SHN_UNDEF ||
2618                             syms[k].st_shndx >= ehdr->e_shnum) {
2619                                 pr_debug("Symbol: %s has bad section index %d.\n",
2620                                                 name, syms[k].st_shndx);
2621                                 return NULL;
2622                         }
2623
2624                         /* Found the symbol we are looking for */
2625                         return &syms[k];
2626                 }
2627         }
2628
2629         return NULL;
2630 }
2631
2632 void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
2633 {
2634         struct purgatory_info *pi = &image->purgatory_info;
2635         Elf_Sym *sym;
2636         Elf_Shdr *sechdr;
2637
2638         sym = kexec_purgatory_find_symbol(pi, name);
2639         if (!sym)
2640                 return ERR_PTR(-EINVAL);
2641
2642         sechdr = &pi->sechdrs[sym->st_shndx];
2643
2644         /*
2645          * Returns the address where symbol will finally be loaded after
2646          * kexec_load_segment()
2647          */
2648         return (void *)(sechdr->sh_addr + sym->st_value);
2649 }
2650
2651 /*
2652  * Get or set value of a symbol. If "get_value" is true, symbol value is
2653  * returned in buf otherwise symbol value is set based on value in buf.
2654  */
2655 int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
2656                                    void *buf, unsigned int size, bool get_value)
2657 {
2658         Elf_Sym *sym;
2659         Elf_Shdr *sechdrs;
2660         struct purgatory_info *pi = &image->purgatory_info;
2661         char *sym_buf;
2662
2663         sym = kexec_purgatory_find_symbol(pi, name);
2664         if (!sym)
2665                 return -EINVAL;
2666
2667         if (sym->st_size != size) {
2668                 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
2669                        name, (unsigned long)sym->st_size, size);
2670                 return -EINVAL;
2671         }
2672
2673         sechdrs = pi->sechdrs;
2674
2675         if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2676                 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
2677                        get_value ? "get" : "set");
2678                 return -EINVAL;
2679         }
2680
2681         sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset +
2682                                         sym->st_value;
2683
2684         if (get_value)
2685                 memcpy((void *)buf, sym_buf, size);
2686         else
2687                 memcpy((void *)sym_buf, buf, size);
2688
2689         return 0;
2690 }
2691 #endif /* CONFIG_KEXEC_FILE */
2692
2693 /*
2694  * Move into place and start executing a preloaded standalone
2695  * executable.  If nothing was preloaded return an error.
2696  */
2697 int kernel_kexec(void)
2698 {
2699         int error = 0;
2700
2701         if (!mutex_trylock(&kexec_mutex))
2702                 return -EBUSY;
2703         if (!kexec_image) {
2704                 error = -EINVAL;
2705                 goto Unlock;
2706         }
2707
2708 #ifdef CONFIG_KEXEC_JUMP
2709         if (kexec_image->preserve_context) {
2710                 lock_system_sleep();
2711                 pm_prepare_console();
2712                 error = freeze_processes();
2713                 if (error) {
2714                         error = -EBUSY;
2715                         goto Restore_console;
2716                 }
2717                 suspend_console();
2718                 error = dpm_suspend_start(PMSG_FREEZE);
2719                 if (error)
2720                         goto Resume_console;
2721                 /* At this point, dpm_suspend_start() has been called,
2722                  * but *not* dpm_suspend_end(). We *must* call
2723                  * dpm_suspend_end() now.  Otherwise, drivers for
2724                  * some devices (e.g. interrupt controllers) become
2725                  * desynchronized with the actual state of the
2726                  * hardware at resume time, and evil weirdness ensues.
2727                  */
2728                 error = dpm_suspend_end(PMSG_FREEZE);
2729                 if (error)
2730                         goto Resume_devices;
2731                 error = disable_nonboot_cpus();
2732                 if (error)
2733                         goto Enable_cpus;
2734                 local_irq_disable();
2735                 error = syscore_suspend();
2736                 if (error)
2737                         goto Enable_irqs;
2738         } else
2739 #endif
2740         {
2741                 kexec_in_progress = true;
2742                 kernel_restart_prepare(NULL);
2743                 migrate_to_reboot_cpu();
2744
2745                 /*
2746                  * migrate_to_reboot_cpu() disables CPU hotplug assuming that
2747                  * no further code needs to use CPU hotplug (which is true in
2748                  * the reboot case). However, the kexec path depends on using
2749                  * CPU hotplug again; so re-enable it here.
2750                  */
2751                 cpu_hotplug_enable();
2752                 pr_emerg("Starting new kernel\n");
2753                 machine_shutdown();
2754         }
2755
2756         machine_kexec(kexec_image);
2757
2758 #ifdef CONFIG_KEXEC_JUMP
2759         if (kexec_image->preserve_context) {
2760                 syscore_resume();
2761  Enable_irqs:
2762                 local_irq_enable();
2763  Enable_cpus:
2764                 enable_nonboot_cpus();
2765                 dpm_resume_start(PMSG_RESTORE);
2766  Resume_devices:
2767                 dpm_resume_end(PMSG_RESTORE);
2768  Resume_console:
2769                 resume_console();
2770                 thaw_processes();
2771  Restore_console:
2772                 pm_restore_console();
2773                 unlock_system_sleep();
2774         }
2775 #endif
2776
2777  Unlock:
2778         mutex_unlock(&kexec_mutex);
2779         return error;
2780 }