2 * Machine specific setup for xen
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
26 #include <xen/interface/callback.h>
27 #include <xen/interface/memory.h>
28 #include <xen/interface/physdev.h>
29 #include <xen/features.h>
30 #include <xen/hvc-console.h>
36 /* Amount of extra memory space we add to the e820 ranges */
37 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
39 /* Number of pages released from the initial allocation. */
40 unsigned long xen_released_pages;
42 /* E820 map used during setting up memory. */
43 static struct e820entry xen_e820_map[E820MAX] __initdata;
44 static u32 xen_e820_map_entries __initdata;
47 * Buffer used to remap identity mapped pages. We only need the virtual space.
48 * The physical page behind this address is remapped as needed to different
51 #define REMAP_SIZE (P2M_PER_PAGE - 3)
53 unsigned long next_area_mfn;
54 unsigned long target_pfn;
56 unsigned long mfns[REMAP_SIZE];
57 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
58 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
61 * The maximum amount of extra memory compared to the base size. The
62 * main scaling factor is the size of struct page. At extreme ratios
63 * of base:extra, all the base memory can be filled with page
64 * structures for the extra memory, leaving no space for anything
67 * 10x seems like a reasonable balance between scaling flexibility and
68 * leaving a practically usable system.
70 #define EXTRA_MEM_RATIO (10)
72 static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
76 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
78 if (xen_extra_mem[i].size == 0) {
79 xen_extra_mem[i].start = start;
80 xen_extra_mem[i].size = size;
83 /* Append to existing region. */
84 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
85 xen_extra_mem[i].size += size;
89 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
90 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
92 memblock_reserve(start, size);
95 static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
98 phys_addr_t start_r, size_r;
100 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
101 start_r = xen_extra_mem[i].start;
102 size_r = xen_extra_mem[i].size;
104 /* Start of region. */
105 if (start_r == start) {
106 BUG_ON(size > size_r);
107 xen_extra_mem[i].start += size;
108 xen_extra_mem[i].size -= size;
112 if (start_r + size_r == start + size) {
113 BUG_ON(size > size_r);
114 xen_extra_mem[i].size -= size;
118 if (start > start_r && start < start_r + size_r) {
119 BUG_ON(start + size > start_r + size_r);
120 xen_extra_mem[i].size = start - start_r;
121 /* Calling memblock_reserve() again is okay. */
122 xen_add_extra_mem(start + size, start_r + size_r -
127 memblock_free(start, size);
131 * Called during boot before the p2m list can take entries beyond the
132 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
135 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
138 phys_addr_t addr = PFN_PHYS(pfn);
140 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
141 if (addr >= xen_extra_mem[i].start &&
142 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
143 return INVALID_P2M_ENTRY;
146 return IDENTITY_FRAME(pfn);
150 * Mark all pfns of extra mem as invalid in p2m list.
152 void __init xen_inv_extra_mem(void)
154 unsigned long pfn, pfn_s, pfn_e;
157 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
158 if (!xen_extra_mem[i].size)
160 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
161 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
162 for (pfn = pfn_s; pfn < pfn_e; pfn++)
163 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
168 * Finds the next RAM pfn available in the E820 map after min_pfn.
169 * This function updates min_pfn with the pfn found and returns
170 * the size of that range or zero if not found.
172 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
174 const struct e820entry *entry = xen_e820_map;
176 unsigned long done = 0;
178 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
182 if (entry->type != E820_RAM)
185 e_pfn = PFN_DOWN(entry->addr + entry->size);
187 /* We only care about E820 after this */
188 if (e_pfn < *min_pfn)
191 s_pfn = PFN_UP(entry->addr);
193 /* If min_pfn falls within the E820 entry, we want to start
194 * at the min_pfn PFN.
196 if (s_pfn <= *min_pfn) {
197 done = e_pfn - *min_pfn;
199 done = e_pfn - s_pfn;
208 static int __init xen_free_mfn(unsigned long mfn)
210 struct xen_memory_reservation reservation = {
216 set_xen_guest_handle(reservation.extent_start, &mfn);
217 reservation.nr_extents = 1;
219 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
223 * This releases a chunk of memory and then does the identity map. It's used
224 * as a fallback if the remapping fails.
226 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
227 unsigned long end_pfn, unsigned long nr_pages)
229 unsigned long pfn, end;
232 WARN_ON(start_pfn > end_pfn);
234 /* Release pages first. */
235 end = min(end_pfn, nr_pages);
236 for (pfn = start_pfn; pfn < end; pfn++) {
237 unsigned long mfn = pfn_to_mfn(pfn);
239 /* Make sure pfn exists to start with */
240 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
243 ret = xen_free_mfn(mfn);
244 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
247 xen_released_pages++;
248 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
254 set_phys_range_identity(start_pfn, end_pfn);
258 * Helper function to update the p2m and m2p tables and kernel mapping.
260 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
262 struct mmu_update update = {
263 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
268 if (!set_phys_to_machine(pfn, mfn)) {
269 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
275 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
276 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
281 /* Update kernel mapping, but not for highmem. */
282 if (pfn >= PFN_UP(__pa(high_memory - 1)))
285 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
286 mfn_pte(mfn, PAGE_KERNEL), 0)) {
287 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
294 * This function updates the p2m and m2p tables with an identity map from
295 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
296 * original allocation at remap_pfn. The information needed for remapping is
297 * saved in the memory itself to avoid the need for allocating buffers. The
298 * complete remap information is contained in a list of MFNs each containing
299 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
300 * This enables us to preserve the original mfn sequence while doing the
301 * remapping at a time when the memory management is capable of allocating
302 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
305 static void __init xen_do_set_identity_and_remap_chunk(
306 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
308 unsigned long buf = (unsigned long)&xen_remap_buf;
309 unsigned long mfn_save, mfn;
310 unsigned long ident_pfn_iter, remap_pfn_iter;
311 unsigned long ident_end_pfn = start_pfn + size;
312 unsigned long left = size;
313 unsigned int i, chunk;
317 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
319 mfn_save = virt_to_mfn(buf);
321 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
322 ident_pfn_iter < ident_end_pfn;
323 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
324 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
326 /* Map first pfn to xen_remap_buf */
327 mfn = pfn_to_mfn(ident_pfn_iter);
328 set_pte_mfn(buf, mfn, PAGE_KERNEL);
330 /* Save mapping information in page */
331 xen_remap_buf.next_area_mfn = xen_remap_mfn;
332 xen_remap_buf.target_pfn = remap_pfn_iter;
333 xen_remap_buf.size = chunk;
334 for (i = 0; i < chunk; i++)
335 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
337 /* Put remap buf into list. */
340 /* Set identity map */
341 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
346 /* Restore old xen_remap_buf mapping */
347 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
351 * This function takes a contiguous pfn range that needs to be identity mapped
354 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
355 * 2) Calls the do_ function to actually do the mapping/remapping work.
357 * The goal is to not allocate additional memory but to remap the existing
358 * pages. In the case of an error the underlying memory is simply released back
359 * to Xen and not remapped.
361 static unsigned long __init xen_set_identity_and_remap_chunk(
362 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
363 unsigned long remap_pfn)
367 unsigned long n = end_pfn - start_pfn;
370 unsigned long cur_pfn = start_pfn + i;
371 unsigned long left = n - i;
372 unsigned long size = left;
373 unsigned long remap_range_size;
375 /* Do not remap pages beyond the current allocation */
376 if (cur_pfn >= nr_pages) {
377 /* Identity map remaining pages */
378 set_phys_range_identity(cur_pfn, cur_pfn + size);
381 if (cur_pfn + size > nr_pages)
382 size = nr_pages - cur_pfn;
384 remap_range_size = xen_find_pfn_range(&remap_pfn);
385 if (!remap_range_size) {
386 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
387 xen_set_identity_and_release_chunk(cur_pfn,
388 cur_pfn + left, nr_pages);
391 /* Adjust size to fit in current e820 RAM region */
392 if (size > remap_range_size)
393 size = remap_range_size;
395 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
397 /* Update variables to reflect new mappings. */
403 * If the PFNs are currently mapped, the VA mapping also needs
404 * to be updated to be 1:1.
406 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
407 (void)HYPERVISOR_update_va_mapping(
408 (unsigned long)__va(pfn << PAGE_SHIFT),
409 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
414 static void __init xen_set_identity_and_remap(unsigned long nr_pages)
416 phys_addr_t start = 0;
417 unsigned long last_pfn = nr_pages;
418 const struct e820entry *entry = xen_e820_map;
422 * Combine non-RAM regions and gaps until a RAM region (or the
423 * end of the map) is reached, then set the 1:1 map and
424 * remap the memory in those non-RAM regions.
426 * The combined non-RAM regions are rounded to a whole number
427 * of pages so any partial pages are accessible via the 1:1
428 * mapping. This is needed for some BIOSes that put (for
429 * example) the DMI tables in a reserved region that begins on
430 * a non-page boundary.
432 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
433 phys_addr_t end = entry->addr + entry->size;
434 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
435 unsigned long start_pfn = PFN_DOWN(start);
436 unsigned long end_pfn = PFN_UP(end);
438 if (entry->type == E820_RAM)
439 end_pfn = PFN_UP(entry->addr);
441 if (start_pfn < end_pfn)
442 last_pfn = xen_set_identity_and_remap_chunk(
443 start_pfn, end_pfn, nr_pages,
449 pr_info("Released %ld page(s)\n", xen_released_pages);
453 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
454 * The remap information (which mfn remap to which pfn) is contained in the
455 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
456 * This scheme allows to remap the different chunks in arbitrary order while
457 * the resulting mapping will be independant from the order.
459 void __init xen_remap_memory(void)
461 unsigned long buf = (unsigned long)&xen_remap_buf;
462 unsigned long mfn_save, mfn, pfn;
463 unsigned long remapped = 0;
465 unsigned long pfn_s = ~0UL;
466 unsigned long len = 0;
468 mfn_save = virt_to_mfn(buf);
470 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
471 /* Map the remap information */
472 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
474 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
476 pfn = xen_remap_buf.target_pfn;
477 for (i = 0; i < xen_remap_buf.size; i++) {
478 mfn = xen_remap_buf.mfns[i];
479 xen_update_mem_tables(pfn, mfn);
483 if (pfn_s == ~0UL || pfn == pfn_s) {
484 pfn_s = xen_remap_buf.target_pfn;
485 len += xen_remap_buf.size;
486 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
487 len += xen_remap_buf.size;
489 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
490 pfn_s = xen_remap_buf.target_pfn;
491 len = xen_remap_buf.size;
495 xen_remap_mfn = xen_remap_buf.next_area_mfn;
498 if (pfn_s != ~0UL && len)
499 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
501 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
503 pr_info("Remapped %ld page(s)\n", remapped);
506 static unsigned long __init xen_get_max_pages(void)
508 unsigned long max_pages = MAX_DOMAIN_PAGES;
509 domid_t domid = DOMID_SELF;
513 * For the initial domain we use the maximum reservation as
516 * For guest domains the current maximum reservation reflects
517 * the current maximum rather than the static maximum. In this
518 * case the e820 map provided to us will cover the static
521 if (xen_initial_domain()) {
522 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
527 return min(max_pages, MAX_DOMAIN_PAGES);
530 static void __init xen_align_and_add_e820_region(phys_addr_t start,
531 phys_addr_t size, int type)
533 phys_addr_t end = start + size;
535 /* Align RAM regions to page boundaries. */
536 if (type == E820_RAM) {
537 start = PAGE_ALIGN(start);
538 end &= ~((phys_addr_t)PAGE_SIZE - 1);
541 e820_add_region(start, end - start, type);
544 static void __init xen_ignore_unusable(void)
546 struct e820entry *entry = xen_e820_map;
549 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
550 if (entry->type == E820_UNUSABLE)
551 entry->type = E820_RAM;
555 static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
557 unsigned long extra = 0;
558 const struct e820entry *entry = xen_e820_map;
561 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
562 unsigned long start_pfn = PFN_DOWN(entry->addr);
563 unsigned long end_pfn = PFN_UP(entry->addr + entry->size);
565 if (start_pfn >= max_pfn)
567 if (entry->type == E820_RAM)
569 if (end_pfn >= max_pfn)
571 extra += end_pfn - start_pfn;
577 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
579 struct e820entry *entry;
587 entry = xen_e820_map;
589 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
590 if (entry->type == E820_RAM && entry->addr <= start &&
591 (entry->addr + entry->size) >= end)
601 * Find a free area in physical memory not yet reserved and compliant with
603 * Used to relocate pre-allocated areas like initrd or p2m list which are in
604 * conflict with the to be used E820 map.
605 * In case no area is found, return 0. Otherwise return the physical address
606 * of the area which is already reserved for convenience.
608 phys_addr_t __init xen_find_free_area(phys_addr_t size)
611 phys_addr_t addr, start;
612 struct e820entry *entry = xen_e820_map;
614 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
615 if (entry->type != E820_RAM || entry->size < size)
618 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
619 if (!memblock_is_reserved(addr))
621 start = addr + PAGE_SIZE;
622 if (start + size > entry->addr + entry->size)
625 if (addr >= start + size) {
626 memblock_reserve(start, size);
635 * Like memcpy, but with physical addresses for dest and src.
637 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
640 phys_addr_t dest_off, src_off, dest_len, src_len, len;
644 dest_off = dest & ~PAGE_MASK;
645 src_off = src & ~PAGE_MASK;
647 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
648 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
650 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
651 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
652 len = min(dest_len, src_len);
653 to = early_memremap(dest - dest_off, dest_len + dest_off);
654 from = early_memremap(src - src_off, src_len + src_off);
655 memcpy(to, from, len);
656 early_memunmap(to, dest_len + dest_off);
657 early_memunmap(from, src_len + src_off);
665 * Reserve Xen mfn_list.
666 * See comment above "struct start_info" in <xen/interface/xen.h>
667 * We tried to make the the memblock_reserve more selective so
668 * that it would be clear what region is reserved. Sadly we ran
669 * in the problem wherein on a 64-bit hypervisor with a 32-bit
670 * initial domain, the pt_base has the cr3 value which is not
671 * neccessarily where the pagetable starts! As Jan put it: "
672 * Actually, the adjustment turns out to be correct: The page
673 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
674 * "first L2", "first L3", so the offset to the page table base is
675 * indeed 2. When reading xen/include/public/xen.h's comment
676 * very strictly, this is not a violation (since there nothing is said
677 * that the first thing in the page table space is pointed to by
678 * pt_base; I admit that this seems to be implied though, namely
679 * do I think that it is implied that the page table space is the
680 * range [pt_base, pt_base + nt_pt_frames), whereas that
681 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
682 * which - without a priori knowledge - the kernel would have
683 * difficulty to figure out)." - so lets just fall back to the
684 * easy way and reserve the whole region.
686 static void __init xen_reserve_xen_mfnlist(void)
688 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
689 memblock_reserve(__pa(xen_start_info->mfn_list),
690 xen_start_info->pt_base -
691 xen_start_info->mfn_list);
695 memblock_reserve(PFN_PHYS(xen_start_info->first_p2m_pfn),
696 PFN_PHYS(xen_start_info->nr_p2m_frames));
700 * machine_specific_memory_setup - Hook for machine specific memory setup.
702 char * __init xen_memory_setup(void)
704 unsigned long max_pfn = xen_start_info->nr_pages;
705 phys_addr_t mem_end, addr, size, chunk_size;
708 struct xen_memory_map memmap;
709 unsigned long max_pages;
710 unsigned long extra_pages = 0;
714 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
715 mem_end = PFN_PHYS(max_pfn);
717 memmap.nr_entries = E820MAX;
718 set_xen_guest_handle(memmap.buffer, xen_e820_map);
720 op = xen_initial_domain() ?
721 XENMEM_machine_memory_map :
723 rc = HYPERVISOR_memory_op(op, &memmap);
725 BUG_ON(xen_initial_domain());
726 memmap.nr_entries = 1;
727 xen_e820_map[0].addr = 0ULL;
728 xen_e820_map[0].size = mem_end;
729 /* 8MB slack (to balance backend allocations). */
730 xen_e820_map[0].size += 8ULL << 20;
731 xen_e820_map[0].type = E820_RAM;
735 BUG_ON(memmap.nr_entries == 0);
736 xen_e820_map_entries = memmap.nr_entries;
739 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
740 * regions, so if we're using the machine memory map leave the
741 * region as RAM as it is in the pseudo-physical map.
743 * UNUSABLE regions in domUs are not handled and will need
744 * a patch in the future.
746 if (xen_initial_domain())
747 xen_ignore_unusable();
749 /* Make sure the Xen-supplied memory map is well-ordered. */
750 sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
751 &xen_e820_map_entries);
753 max_pages = xen_get_max_pages();
754 if (max_pages > max_pfn)
755 extra_pages += max_pages - max_pfn;
757 /* How many extra pages do we need due to remapping? */
758 extra_pages += xen_count_remap_pages(max_pfn);
761 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
762 * factor the base size. On non-highmem systems, the base
763 * size is the full initial memory allocation; on highmem it
764 * is limited to the max size of lowmem, so that it doesn't
765 * get completely filled.
767 * In principle there could be a problem in lowmem systems if
768 * the initial memory is also very large with respect to
769 * lowmem, but we won't try to deal with that here.
771 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
774 addr = xen_e820_map[0].addr;
775 size = xen_e820_map[0].size;
776 while (i < xen_e820_map_entries) {
778 type = xen_e820_map[i].type;
780 if (type == E820_RAM) {
781 if (addr < mem_end) {
782 chunk_size = min(size, mem_end - addr);
783 } else if (extra_pages) {
784 chunk_size = min(size, PFN_PHYS(extra_pages));
785 extra_pages -= PFN_DOWN(chunk_size);
786 xen_add_extra_mem(addr, chunk_size);
787 xen_max_p2m_pfn = PFN_DOWN(addr + chunk_size);
789 type = E820_UNUSABLE;
792 xen_align_and_add_e820_region(addr, chunk_size, type);
798 if (i < xen_e820_map_entries) {
799 addr = xen_e820_map[i].addr;
800 size = xen_e820_map[i].size;
806 * Set the rest as identity mapped, in case PCI BARs are
809 * PFNs above MAX_P2M_PFN are considered identity mapped as
812 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
815 * In domU, the ISA region is normal, usable memory, but we
816 * reserve ISA memory anyway because too many things poke
819 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
822 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
825 * Check whether the kernel itself conflicts with the target E820 map.
826 * Failing now is better than running into weird problems later due
827 * to relocating (and even reusing) pages with kernel text or data.
829 if (xen_is_e820_reserved(__pa_symbol(_text),
830 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
831 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
836 * Check for a conflict of the hypervisor supplied page tables with
837 * the target E820 map.
841 xen_reserve_xen_mfnlist();
843 /* Check for a conflict of the initrd with the target E820 map. */
844 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
845 boot_params.hdr.ramdisk_size)) {
846 phys_addr_t new_area, start, size;
848 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
850 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
854 start = boot_params.hdr.ramdisk_image;
855 size = boot_params.hdr.ramdisk_size;
856 xen_phys_memcpy(new_area, start, size);
857 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
858 start, start + size, new_area, new_area + size);
859 memblock_free(start, size);
860 boot_params.hdr.ramdisk_image = new_area;
861 boot_params.ext_ramdisk_image = new_area >> 32;
865 * Set identity map on non-RAM pages and prepare remapping the
868 xen_set_identity_and_remap(max_pfn);
874 * Machine specific memory setup for auto-translated guests.
876 char * __init xen_auto_xlated_memory_setup(void)
878 struct xen_memory_map memmap;
882 memmap.nr_entries = E820MAX;
883 set_xen_guest_handle(memmap.buffer, xen_e820_map);
885 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
887 panic("No memory map (%d)\n", rc);
889 xen_e820_map_entries = memmap.nr_entries;
891 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
892 &xen_e820_map_entries);
894 for (i = 0; i < xen_e820_map_entries; i++)
895 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
896 xen_e820_map[i].type);
898 xen_reserve_xen_mfnlist();
904 * Set the bit indicating "nosegneg" library variants should be used.
905 * We only need to bother in pure 32-bit mode; compat 32-bit processes
906 * can have un-truncated segments, so wrapping around is allowed.
908 static void __init fiddle_vdso(void)
912 * This could be called before selected_vdso32 is initialized, so
913 * just fiddle with both possible images. vdso_image_32_syscall
914 * can't be selected, since it only exists on 64-bit systems.
917 mask = vdso_image_32_int80.data +
918 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
919 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
920 mask = vdso_image_32_sysenter.data +
921 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
922 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
926 static int register_callback(unsigned type, const void *func)
928 struct callback_register callback = {
930 .address = XEN_CALLBACK(__KERNEL_CS, func),
931 .flags = CALLBACKF_mask_events,
934 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
937 void xen_enable_sysenter(void)
940 unsigned sysenter_feature;
943 sysenter_feature = X86_FEATURE_SEP;
945 sysenter_feature = X86_FEATURE_SYSENTER32;
948 if (!boot_cpu_has(sysenter_feature))
951 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
953 setup_clear_cpu_cap(sysenter_feature);
956 void xen_enable_syscall(void)
961 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
963 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
964 /* Pretty fatal; 64-bit userspace has no other
965 mechanism for syscalls. */
968 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
969 ret = register_callback(CALLBACKTYPE_syscall32,
970 xen_syscall32_target);
972 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
974 #endif /* CONFIG_X86_64 */
977 void __init xen_pvmmu_arch_setup(void)
979 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
980 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
982 HYPERVISOR_vm_assist(VMASST_CMD_enable,
983 VMASST_TYPE_pae_extended_cr3);
985 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
986 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
989 xen_enable_sysenter();
990 xen_enable_syscall();
993 /* This function is not called for HVM domains */
994 void __init xen_arch_setup(void)
996 xen_panic_handler_init();
997 if (!xen_feature(XENFEAT_auto_translated_physmap))
998 xen_pvmmu_arch_setup();
1001 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1002 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1007 memcpy(boot_command_line, xen_start_info->cmd_line,
1008 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1009 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1011 /* Set up idle, making sure it calls safe_halt() pvop */
1014 WARN_ON(xen_set_default_idle());