]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
KVM: MMU: Let is_rsvd_bits_set take mmu context instead of vcpu
authorJoerg Roedel <joerg.roedel@amd.com>
Fri, 10 Sep 2010 15:30:45 +0000 (17:30 +0200)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:52:32 +0000 (10:52 +0200)
This patch changes is_rsvd_bits_set() function prototype to
take only a kvm_mmu context instead of a full vcpu.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 9e48a774fceb0afc7d3dfe313ea7528438b9d317..86f7557cf3fbf622ec53ae74996b06f3856c7037 100644 (file)
@@ -2578,12 +2578,12 @@ static void paging_free(struct kvm_vcpu *vcpu)
        nonpaging_free(vcpu);
 }
 
-static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
+static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
 {
        int bit7;
 
        bit7 = (gpte >> 7) & 1;
-       return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
+       return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
 }
 
 #define PTTYPE 64
@@ -2859,7 +2859,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                return;
         }
 
-       if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
+       if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
                return;
 
        ++vcpu->kvm->stat.mmu_pte_updated;
index 13d0c06b1bc8bad3fae4e8cff22a35065fa46a96..68ee1b7fa89fedad52cb65ce69bcb46ff8dea917 100644 (file)
@@ -168,7 +168,7 @@ walk:
                        break;
                }
 
-               if (is_rsvd_bits_set(vcpu, pte, walker->level)) {
+               if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) {
                        rsvd_fault = true;
                        break;
                }
@@ -327,6 +327,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
                                u64 *sptep)
 {
        struct kvm_mmu_page *sp;
+       struct kvm_mmu *mmu = &vcpu->arch.mmu;
        pt_element_t *gptep = gw->prefetch_ptes;
        u64 *spte;
        int i;
@@ -358,7 +359,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
                gpte = gptep[i];
 
                if (!is_present_gpte(gpte) ||
-                     is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL)) {
+                     is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
                        if (!sp->unsync)
                                __set_spte(spte, shadow_notrap_nonpresent_pte);
                        continue;
@@ -713,7 +714,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                        return -EINVAL;
 
                gfn = gpte_to_gfn(gpte);
-               if (is_rsvd_bits_set(vcpu, gpte, PT_PAGE_TABLE_LEVEL)
+               if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
                      || gfn != sp->gfns[i] || !is_present_gpte(gpte)
                      || !(gpte & PT_ACCESSED_MASK)) {
                        u64 nonpresent;