]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
xen: save linear p2m list address in shared info structure
authorJuergen Gross <jgross@suse.com>
Fri, 17 Jul 2015 04:51:23 +0000 (06:51 +0200)
committerDavid Vrabel <david.vrabel@citrix.com>
Thu, 20 Aug 2015 11:24:17 +0000 (12:24 +0100)
The virtual address of the linear p2m list should be stored in the
shared info structure read by the Xen tools to be able to support
64 bit pv-domains larger than 512 GB. Additionally the linear p2m
list interface includes a generation count which is changed prior
to and after each mapping change of the p2m list. Reading the
generation count the Xen tools can detect changes of the mappings
and re-read the p2m list eventually.

Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Konrad Rzeszutek Wilk <Konrad.wilk@oracle.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
arch/x86/xen/p2m.c

index 8b7f18e200aa4a453d8ae60d02b85b4141abc677..b89983e9656f527acb4702b4520f6fa270794527 100644 (file)
@@ -263,6 +263,10 @@ void xen_setup_mfn_list_list(void)
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                virt_to_mfn(p2m_top_mfn);
        HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+       HYPERVISOR_shared_info->arch.p2m_generation = 0;
+       HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
+       HYPERVISOR_shared_info->arch.p2m_cr3 =
+               xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -478,8 +482,12 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
 
                ptechk = lookup_address(vaddr, &level);
                if (ptechk == pte_pg) {
+                       HYPERVISOR_shared_info->arch.p2m_generation++;
+                       wmb(); /* Tools are synchronizing via p2m_generation. */
                        set_pmd(pmdp,
                                __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
+                       wmb(); /* Tools are synchronizing via p2m_generation. */
+                       HYPERVISOR_shared_info->arch.p2m_generation++;
                        pte_newpg[i] = NULL;
                }
 
@@ -577,8 +585,12 @@ static bool alloc_p2m(unsigned long pfn)
                spin_lock_irqsave(&p2m_update_lock, flags);
 
                if (pte_pfn(*ptep) == p2m_pfn) {
+                       HYPERVISOR_shared_info->arch.p2m_generation++;
+                       wmb(); /* Tools are synchronizing via p2m_generation. */
                        set_pte(ptep,
                                pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
+                       wmb(); /* Tools are synchronizing via p2m_generation. */
+                       HYPERVISOR_shared_info->arch.p2m_generation++;
                        if (mid_mfn)
                                mid_mfn[mididx] = virt_to_mfn(p2m);
                        p2m = NULL;
@@ -630,6 +642,11 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
                return true;
        }
 
+       /*
+        * The interface requires atomic updates on p2m elements.
+        * xen_safe_write_ulong() is using __put_user which does an atomic
+        * store via asm().
+        */
        if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
                return true;