]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/s390/kvm/kvm-s390.c
Merge remote-tracking branch 'kvm/linux-next'
[karo-tx-linux.git] / arch / s390 / kvm / kvm-s390.c
index c6b4063fce295b648966e791d6114a610d9d407a..8fe2f1c722dcabe22b3b3731bdce5e22092703db 100644 (file)
@@ -514,35 +514,20 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 
        if (gtod_high != 0)
                return -EINVAL;
-       VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
+       VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
 
        return 0;
 }
 
 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       struct kvm_vcpu *cur_vcpu;
-       unsigned int vcpu_idx;
-       u64 host_tod, gtod;
-       int r;
+       u64 gtod;
 
        if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
                return -EFAULT;
 
-       r = store_tod_clock(&host_tod);
-       if (r)
-               return r;
-
-       mutex_lock(&kvm->lock);
-       preempt_disable();
-       kvm->arch.epoch = gtod - host_tod;
-       kvm_s390_vcpu_block_all(kvm);
-       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
-               cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-       kvm_s390_vcpu_unblock_all(kvm);
-       preempt_enable();
-       mutex_unlock(&kvm->lock);
-       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
+       kvm_s390_set_tod_clock(kvm, gtod);
+       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
        return 0;
 }
 
@@ -574,26 +559,19 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
        if (copy_to_user((void __user *)attr->addr, &gtod_high,
                                         sizeof(gtod_high)))
                return -EFAULT;
-       VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
+       VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
 
        return 0;
 }
 
 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       u64 host_tod, gtod;
-       int r;
+       u64 gtod;
 
-       r = store_tod_clock(&host_tod);
-       if (r)
-               return r;
-
-       preempt_disable();
-       gtod = host_tod + kvm->arch.epoch;
-       preempt_enable();
+       gtod = kvm_s390_get_tod_clock_fast(kvm);
        if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
                return -EFAULT;
-       VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
+       VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
 
        return 0;
 }
@@ -1120,7 +1098,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (!kvm->arch.sca)
                goto out_err;
        spin_lock(&kvm_lock);
-       sca_offset = (sca_offset + 16) & 0x7f0;
+       sca_offset += 16;
+       if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
+               sca_offset = 0;
        kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
        spin_unlock(&kvm_lock);
 
@@ -1911,6 +1891,22 @@ retry:
        return 0;
 }
 
+void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       mutex_lock(&kvm->lock);
+       preempt_disable();
+       kvm->arch.epoch = tod - get_tod_clock();
+       kvm_s390_vcpu_block_all(kvm);
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+       kvm_s390_vcpu_unblock_all(kvm);
+       preempt_enable();
+       mutex_unlock(&kvm->lock);
+}
+
 /**
  * kvm_arch_fault_in_page - fault-in guest page if necessary
  * @vcpu: The corresponding virtual cpu