]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/kvm/x86.c
KVM: x86: allow TSC deadline timer on all hosts
[karo-tx-linux.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 #include "assigned-dev.h"
31
32 #include <linux/clocksource.h>
33 #include <linux/interrupt.h>
34 #include <linux/kvm.h>
35 #include <linux/fs.h>
36 #include <linux/vmalloc.h>
37 #include <linux/module.h>
38 #include <linux/mman.h>
39 #include <linux/highmem.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/cpufreq.h>
43 #include <linux/user-return-notifier.h>
44 #include <linux/srcu.h>
45 #include <linux/slab.h>
46 #include <linux/perf_event.h>
47 #include <linux/uaccess.h>
48 #include <linux/hash.h>
49 #include <linux/pci.h>
50 #include <linux/timekeeper_internal.h>
51 #include <linux/pvclock_gtod.h>
52 #include <trace/events/kvm.h>
53
54 #define CREATE_TRACE_POINTS
55 #include "trace.h"
56
57 #include <asm/debugreg.h>
58 #include <asm/msr.h>
59 #include <asm/desc.h>
60 #include <asm/mtrr.h>
61 #include <asm/mce.h>
62 #include <asm/i387.h>
63 #include <asm/fpu-internal.h> /* Ugh! */
64 #include <asm/xcr.h>
65 #include <asm/pvclock.h>
66 #include <asm/div64.h>
67
68 #define MAX_IO_MSRS 256
69 #define KVM_MAX_MCE_BANKS 32
70 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
71
72 #define emul_to_vcpu(ctxt) \
73         container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
74
75 /* EFER defaults:
76  * - enable syscall per default because its emulated by KVM
77  * - enable LME and LMA per default on 64 bit KVM
78  */
79 #ifdef CONFIG_X86_64
80 static
81 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
82 #else
83 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
84 #endif
85
86 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
87 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
88
89 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
90 static void process_nmi(struct kvm_vcpu *vcpu);
91 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
92
93 struct kvm_x86_ops *kvm_x86_ops;
94 EXPORT_SYMBOL_GPL(kvm_x86_ops);
95
96 static bool ignore_msrs = 0;
97 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
98
99 unsigned int min_timer_period_us = 500;
100 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
101
102 bool kvm_has_tsc_control;
103 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
104 u32  kvm_max_guest_tsc_khz;
105 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
106
107 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
108 static u32 tsc_tolerance_ppm = 250;
109 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
110
111 /* lapic timer advance (tscdeadline mode only) in nanoseconds */
112 unsigned int lapic_timer_advance_ns = 0;
113 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
114
115 static bool backwards_tsc_observed = false;
116
117 #define KVM_NR_SHARED_MSRS 16
118
119 struct kvm_shared_msrs_global {
120         int nr;
121         u32 msrs[KVM_NR_SHARED_MSRS];
122 };
123
124 struct kvm_shared_msrs {
125         struct user_return_notifier urn;
126         bool registered;
127         struct kvm_shared_msr_values {
128                 u64 host;
129                 u64 curr;
130         } values[KVM_NR_SHARED_MSRS];
131 };
132
133 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
134 static struct kvm_shared_msrs __percpu *shared_msrs;
135
136 struct kvm_stats_debugfs_item debugfs_entries[] = {
137         { "pf_fixed", VCPU_STAT(pf_fixed) },
138         { "pf_guest", VCPU_STAT(pf_guest) },
139         { "tlb_flush", VCPU_STAT(tlb_flush) },
140         { "invlpg", VCPU_STAT(invlpg) },
141         { "exits", VCPU_STAT(exits) },
142         { "io_exits", VCPU_STAT(io_exits) },
143         { "mmio_exits", VCPU_STAT(mmio_exits) },
144         { "signal_exits", VCPU_STAT(signal_exits) },
145         { "irq_window", VCPU_STAT(irq_window_exits) },
146         { "nmi_window", VCPU_STAT(nmi_window_exits) },
147         { "halt_exits", VCPU_STAT(halt_exits) },
148         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
149         { "hypercalls", VCPU_STAT(hypercalls) },
150         { "request_irq", VCPU_STAT(request_irq_exits) },
151         { "irq_exits", VCPU_STAT(irq_exits) },
152         { "host_state_reload", VCPU_STAT(host_state_reload) },
153         { "efer_reload", VCPU_STAT(efer_reload) },
154         { "fpu_reload", VCPU_STAT(fpu_reload) },
155         { "insn_emulation", VCPU_STAT(insn_emulation) },
156         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
157         { "irq_injections", VCPU_STAT(irq_injections) },
158         { "nmi_injections", VCPU_STAT(nmi_injections) },
159         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
160         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
161         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
162         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
163         { "mmu_flooded", VM_STAT(mmu_flooded) },
164         { "mmu_recycled", VM_STAT(mmu_recycled) },
165         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
166         { "mmu_unsync", VM_STAT(mmu_unsync) },
167         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
168         { "largepages", VM_STAT(lpages) },
169         { NULL }
170 };
171
172 u64 __read_mostly host_xcr0;
173
174 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
175
176 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
177 {
178         int i;
179         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
180                 vcpu->arch.apf.gfns[i] = ~0;
181 }
182
183 static void kvm_on_user_return(struct user_return_notifier *urn)
184 {
185         unsigned slot;
186         struct kvm_shared_msrs *locals
187                 = container_of(urn, struct kvm_shared_msrs, urn);
188         struct kvm_shared_msr_values *values;
189
190         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
191                 values = &locals->values[slot];
192                 if (values->host != values->curr) {
193                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
194                         values->curr = values->host;
195                 }
196         }
197         locals->registered = false;
198         user_return_notifier_unregister(urn);
199 }
200
201 static void shared_msr_update(unsigned slot, u32 msr)
202 {
203         u64 value;
204         unsigned int cpu = smp_processor_id();
205         struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
206
207         /* only read, and nobody should modify it at this time,
208          * so don't need lock */
209         if (slot >= shared_msrs_global.nr) {
210                 printk(KERN_ERR "kvm: invalid MSR slot!");
211                 return;
212         }
213         rdmsrl_safe(msr, &value);
214         smsr->values[slot].host = value;
215         smsr->values[slot].curr = value;
216 }
217
218 void kvm_define_shared_msr(unsigned slot, u32 msr)
219 {
220         BUG_ON(slot >= KVM_NR_SHARED_MSRS);
221         if (slot >= shared_msrs_global.nr)
222                 shared_msrs_global.nr = slot + 1;
223         shared_msrs_global.msrs[slot] = msr;
224         /* we need ensured the shared_msr_global have been updated */
225         smp_wmb();
226 }
227 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
228
229 static void kvm_shared_msr_cpu_online(void)
230 {
231         unsigned i;
232
233         for (i = 0; i < shared_msrs_global.nr; ++i)
234                 shared_msr_update(i, shared_msrs_global.msrs[i]);
235 }
236
237 int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
238 {
239         unsigned int cpu = smp_processor_id();
240         struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
241         int err;
242
243         if (((value ^ smsr->values[slot].curr) & mask) == 0)
244                 return 0;
245         smsr->values[slot].curr = value;
246         err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
247         if (err)
248                 return 1;
249
250         if (!smsr->registered) {
251                 smsr->urn.on_user_return = kvm_on_user_return;
252                 user_return_notifier_register(&smsr->urn);
253                 smsr->registered = true;
254         }
255         return 0;
256 }
257 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
258
259 static void drop_user_return_notifiers(void)
260 {
261         unsigned int cpu = smp_processor_id();
262         struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
263
264         if (smsr->registered)
265                 kvm_on_user_return(&smsr->urn);
266 }
267
268 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
269 {
270         return vcpu->arch.apic_base;
271 }
272 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
273
274 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
275 {
276         u64 old_state = vcpu->arch.apic_base &
277                 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
278         u64 new_state = msr_info->data &
279                 (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
280         u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
281                 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
282
283         if (!msr_info->host_initiated &&
284             ((msr_info->data & reserved_bits) != 0 ||
285              new_state == X2APIC_ENABLE ||
286              (new_state == MSR_IA32_APICBASE_ENABLE &&
287               old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
288              (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
289               old_state == 0)))
290                 return 1;
291
292         kvm_lapic_set_base(vcpu, msr_info->data);
293         return 0;
294 }
295 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
296
297 asmlinkage __visible void kvm_spurious_fault(void)
298 {
299         /* Fault while not rebooting.  We want the trace. */
300         BUG();
301 }
302 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
303
304 #define EXCPT_BENIGN            0
305 #define EXCPT_CONTRIBUTORY      1
306 #define EXCPT_PF                2
307
308 static int exception_class(int vector)
309 {
310         switch (vector) {
311         case PF_VECTOR:
312                 return EXCPT_PF;
313         case DE_VECTOR:
314         case TS_VECTOR:
315         case NP_VECTOR:
316         case SS_VECTOR:
317         case GP_VECTOR:
318                 return EXCPT_CONTRIBUTORY;
319         default:
320                 break;
321         }
322         return EXCPT_BENIGN;
323 }
324
325 #define EXCPT_FAULT             0
326 #define EXCPT_TRAP              1
327 #define EXCPT_ABORT             2
328 #define EXCPT_INTERRUPT         3
329
330 static int exception_type(int vector)
331 {
332         unsigned int mask;
333
334         if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
335                 return EXCPT_INTERRUPT;
336
337         mask = 1 << vector;
338
339         /* #DB is trap, as instruction watchpoints are handled elsewhere */
340         if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
341                 return EXCPT_TRAP;
342
343         if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
344                 return EXCPT_ABORT;
345
346         /* Reserved exceptions will result in fault */
347         return EXCPT_FAULT;
348 }
349
350 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
351                 unsigned nr, bool has_error, u32 error_code,
352                 bool reinject)
353 {
354         u32 prev_nr;
355         int class1, class2;
356
357         kvm_make_request(KVM_REQ_EVENT, vcpu);
358
359         if (!vcpu->arch.exception.pending) {
360         queue:
361                 if (has_error && !is_protmode(vcpu))
362                         has_error = false;
363                 vcpu->arch.exception.pending = true;
364                 vcpu->arch.exception.has_error_code = has_error;
365                 vcpu->arch.exception.nr = nr;
366                 vcpu->arch.exception.error_code = error_code;
367                 vcpu->arch.exception.reinject = reinject;
368                 return;
369         }
370
371         /* to check exception */
372         prev_nr = vcpu->arch.exception.nr;
373         if (prev_nr == DF_VECTOR) {
374                 /* triple fault -> shutdown */
375                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
376                 return;
377         }
378         class1 = exception_class(prev_nr);
379         class2 = exception_class(nr);
380         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
381                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
382                 /* generate double fault per SDM Table 5-5 */
383                 vcpu->arch.exception.pending = true;
384                 vcpu->arch.exception.has_error_code = true;
385                 vcpu->arch.exception.nr = DF_VECTOR;
386                 vcpu->arch.exception.error_code = 0;
387         } else
388                 /* replace previous exception with a new one in a hope
389                    that instruction re-execution will regenerate lost
390                    exception */
391                 goto queue;
392 }
393
394 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
395 {
396         kvm_multiple_exception(vcpu, nr, false, 0, false);
397 }
398 EXPORT_SYMBOL_GPL(kvm_queue_exception);
399
400 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
401 {
402         kvm_multiple_exception(vcpu, nr, false, 0, true);
403 }
404 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
405
406 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
407 {
408         if (err)
409                 kvm_inject_gp(vcpu, 0);
410         else
411                 kvm_x86_ops->skip_emulated_instruction(vcpu);
412 }
413 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
414
415 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
416 {
417         ++vcpu->stat.pf_guest;
418         vcpu->arch.cr2 = fault->address;
419         kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
420 }
421 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
422
423 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
424 {
425         if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
426                 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
427         else
428                 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
429
430         return fault->nested_page_fault;
431 }
432
433 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
434 {
435         atomic_inc(&vcpu->arch.nmi_queued);
436         kvm_make_request(KVM_REQ_NMI, vcpu);
437 }
438 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
439
440 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
441 {
442         kvm_multiple_exception(vcpu, nr, true, error_code, false);
443 }
444 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
445
446 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
447 {
448         kvm_multiple_exception(vcpu, nr, true, error_code, true);
449 }
450 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
451
452 /*
453  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
454  * a #GP and return false.
455  */
456 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
457 {
458         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
459                 return true;
460         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
461         return false;
462 }
463 EXPORT_SYMBOL_GPL(kvm_require_cpl);
464
465 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
466 {
467         if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
468                 return true;
469
470         kvm_queue_exception(vcpu, UD_VECTOR);
471         return false;
472 }
473 EXPORT_SYMBOL_GPL(kvm_require_dr);
474
475 /*
476  * This function will be used to read from the physical memory of the currently
477  * running guest. The difference to kvm_read_guest_page is that this function
478  * can read from guest physical or from the guest's guest physical memory.
479  */
480 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
481                             gfn_t ngfn, void *data, int offset, int len,
482                             u32 access)
483 {
484         struct x86_exception exception;
485         gfn_t real_gfn;
486         gpa_t ngpa;
487
488         ngpa     = gfn_to_gpa(ngfn);
489         real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
490         if (real_gfn == UNMAPPED_GVA)
491                 return -EFAULT;
492
493         real_gfn = gpa_to_gfn(real_gfn);
494
495         return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
496 }
497 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
498
499 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
500                                void *data, int offset, int len, u32 access)
501 {
502         return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
503                                        data, offset, len, access);
504 }
505
506 /*
507  * Load the pae pdptrs.  Return true is they are all valid.
508  */
509 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
510 {
511         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
512         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
513         int i;
514         int ret;
515         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
516
517         ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
518                                       offset * sizeof(u64), sizeof(pdpte),
519                                       PFERR_USER_MASK|PFERR_WRITE_MASK);
520         if (ret < 0) {
521                 ret = 0;
522                 goto out;
523         }
524         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
525                 if (is_present_gpte(pdpte[i]) &&
526                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
527                         ret = 0;
528                         goto out;
529                 }
530         }
531         ret = 1;
532
533         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
534         __set_bit(VCPU_EXREG_PDPTR,
535                   (unsigned long *)&vcpu->arch.regs_avail);
536         __set_bit(VCPU_EXREG_PDPTR,
537                   (unsigned long *)&vcpu->arch.regs_dirty);
538 out:
539
540         return ret;
541 }
542 EXPORT_SYMBOL_GPL(load_pdptrs);
543
544 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
545 {
546         u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
547         bool changed = true;
548         int offset;
549         gfn_t gfn;
550         int r;
551
552         if (is_long_mode(vcpu) || !is_pae(vcpu))
553                 return false;
554
555         if (!test_bit(VCPU_EXREG_PDPTR,
556                       (unsigned long *)&vcpu->arch.regs_avail))
557                 return true;
558
559         gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
560         offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
561         r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
562                                        PFERR_USER_MASK | PFERR_WRITE_MASK);
563         if (r < 0)
564                 goto out;
565         changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
566 out:
567
568         return changed;
569 }
570
571 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
572 {
573         unsigned long old_cr0 = kvm_read_cr0(vcpu);
574         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
575                                     X86_CR0_CD | X86_CR0_NW;
576
577         cr0 |= X86_CR0_ET;
578
579 #ifdef CONFIG_X86_64
580         if (cr0 & 0xffffffff00000000UL)
581                 return 1;
582 #endif
583
584         cr0 &= ~CR0_RESERVED_BITS;
585
586         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
587                 return 1;
588
589         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
590                 return 1;
591
592         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
593 #ifdef CONFIG_X86_64
594                 if ((vcpu->arch.efer & EFER_LME)) {
595                         int cs_db, cs_l;
596
597                         if (!is_pae(vcpu))
598                                 return 1;
599                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
600                         if (cs_l)
601                                 return 1;
602                 } else
603 #endif
604                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
605                                                  kvm_read_cr3(vcpu)))
606                         return 1;
607         }
608
609         if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
610                 return 1;
611
612         kvm_x86_ops->set_cr0(vcpu, cr0);
613
614         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
615                 kvm_clear_async_pf_completion_queue(vcpu);
616                 kvm_async_pf_hash_reset(vcpu);
617         }
618
619         if ((cr0 ^ old_cr0) & update_bits)
620                 kvm_mmu_reset_context(vcpu);
621         return 0;
622 }
623 EXPORT_SYMBOL_GPL(kvm_set_cr0);
624
625 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
626 {
627         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
628 }
629 EXPORT_SYMBOL_GPL(kvm_lmsw);
630
631 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
632 {
633         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
634                         !vcpu->guest_xcr0_loaded) {
635                 /* kvm_set_xcr() also depends on this */
636                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
637                 vcpu->guest_xcr0_loaded = 1;
638         }
639 }
640
641 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
642 {
643         if (vcpu->guest_xcr0_loaded) {
644                 if (vcpu->arch.xcr0 != host_xcr0)
645                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
646                 vcpu->guest_xcr0_loaded = 0;
647         }
648 }
649
650 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
651 {
652         u64 xcr0 = xcr;
653         u64 old_xcr0 = vcpu->arch.xcr0;
654         u64 valid_bits;
655
656         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
657         if (index != XCR_XFEATURE_ENABLED_MASK)
658                 return 1;
659         if (!(xcr0 & XSTATE_FP))
660                 return 1;
661         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
662                 return 1;
663
664         /*
665          * Do not allow the guest to set bits that we do not support
666          * saving.  However, xcr0 bit 0 is always set, even if the
667          * emulated CPU does not support XSAVE (see fx_init).
668          */
669         valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
670         if (xcr0 & ~valid_bits)
671                 return 1;
672
673         if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
674                 return 1;
675
676         if (xcr0 & XSTATE_AVX512) {
677                 if (!(xcr0 & XSTATE_YMM))
678                         return 1;
679                 if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
680                         return 1;
681         }
682         kvm_put_guest_xcr0(vcpu);
683         vcpu->arch.xcr0 = xcr0;
684
685         if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
686                 kvm_update_cpuid(vcpu);
687         return 0;
688 }
689
690 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
691 {
692         if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
693             __kvm_set_xcr(vcpu, index, xcr)) {
694                 kvm_inject_gp(vcpu, 0);
695                 return 1;
696         }
697         return 0;
698 }
699 EXPORT_SYMBOL_GPL(kvm_set_xcr);
700
701 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
702 {
703         unsigned long old_cr4 = kvm_read_cr4(vcpu);
704         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
705                                    X86_CR4_PAE | X86_CR4_SMEP;
706         if (cr4 & CR4_RESERVED_BITS)
707                 return 1;
708
709         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
710                 return 1;
711
712         if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
713                 return 1;
714
715         if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
716                 return 1;
717
718         if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
719                 return 1;
720
721         if (is_long_mode(vcpu)) {
722                 if (!(cr4 & X86_CR4_PAE))
723                         return 1;
724         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
725                    && ((cr4 ^ old_cr4) & pdptr_bits)
726                    && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
727                                    kvm_read_cr3(vcpu)))
728                 return 1;
729
730         if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
731                 if (!guest_cpuid_has_pcid(vcpu))
732                         return 1;
733
734                 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
735                 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
736                         return 1;
737         }
738
739         if (kvm_x86_ops->set_cr4(vcpu, cr4))
740                 return 1;
741
742         if (((cr4 ^ old_cr4) & pdptr_bits) ||
743             (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
744                 kvm_mmu_reset_context(vcpu);
745
746         if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
747                 update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
748
749         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
750                 kvm_update_cpuid(vcpu);
751
752         return 0;
753 }
754 EXPORT_SYMBOL_GPL(kvm_set_cr4);
755
756 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
757 {
758 #ifdef CONFIG_X86_64
759         cr3 &= ~CR3_PCID_INVD;
760 #endif
761
762         if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
763                 kvm_mmu_sync_roots(vcpu);
764                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
765                 return 0;
766         }
767
768         if (is_long_mode(vcpu)) {
769                 if (cr3 & CR3_L_MODE_RESERVED_BITS)
770                         return 1;
771         } else if (is_pae(vcpu) && is_paging(vcpu) &&
772                    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
773                 return 1;
774
775         vcpu->arch.cr3 = cr3;
776         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
777         kvm_mmu_new_cr3(vcpu);
778         return 0;
779 }
780 EXPORT_SYMBOL_GPL(kvm_set_cr3);
781
782 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
783 {
784         if (cr8 & CR8_RESERVED_BITS)
785                 return 1;
786         if (irqchip_in_kernel(vcpu->kvm))
787                 kvm_lapic_set_tpr(vcpu, cr8);
788         else
789                 vcpu->arch.cr8 = cr8;
790         return 0;
791 }
792 EXPORT_SYMBOL_GPL(kvm_set_cr8);
793
794 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
795 {
796         if (irqchip_in_kernel(vcpu->kvm))
797                 return kvm_lapic_get_cr8(vcpu);
798         else
799                 return vcpu->arch.cr8;
800 }
801 EXPORT_SYMBOL_GPL(kvm_get_cr8);
802
803 static void kvm_update_dr6(struct kvm_vcpu *vcpu)
804 {
805         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
806                 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
807 }
808
809 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
810 {
811         unsigned long dr7;
812
813         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
814                 dr7 = vcpu->arch.guest_debug_dr7;
815         else
816                 dr7 = vcpu->arch.dr7;
817         kvm_x86_ops->set_dr7(vcpu, dr7);
818         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
819         if (dr7 & DR7_BP_EN_MASK)
820                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
821 }
822
823 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
824 {
825         u64 fixed = DR6_FIXED_1;
826
827         if (!guest_cpuid_has_rtm(vcpu))
828                 fixed |= DR6_RTM;
829         return fixed;
830 }
831
832 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
833 {
834         switch (dr) {
835         case 0 ... 3:
836                 vcpu->arch.db[dr] = val;
837                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
838                         vcpu->arch.eff_db[dr] = val;
839                 break;
840         case 4:
841                 /* fall through */
842         case 6:
843                 if (val & 0xffffffff00000000ULL)
844                         return -1; /* #GP */
845                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
846                 kvm_update_dr6(vcpu);
847                 break;
848         case 5:
849                 /* fall through */
850         default: /* 7 */
851                 if (val & 0xffffffff00000000ULL)
852                         return -1; /* #GP */
853                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
854                 kvm_update_dr7(vcpu);
855                 break;
856         }
857
858         return 0;
859 }
860
861 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
862 {
863         if (__kvm_set_dr(vcpu, dr, val)) {
864                 kvm_inject_gp(vcpu, 0);
865                 return 1;
866         }
867         return 0;
868 }
869 EXPORT_SYMBOL_GPL(kvm_set_dr);
870
871 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
872 {
873         switch (dr) {
874         case 0 ... 3:
875                 *val = vcpu->arch.db[dr];
876                 break;
877         case 4:
878                 /* fall through */
879         case 6:
880                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
881                         *val = vcpu->arch.dr6;
882                 else
883                         *val = kvm_x86_ops->get_dr6(vcpu);
884                 break;
885         case 5:
886                 /* fall through */
887         default: /* 7 */
888                 *val = vcpu->arch.dr7;
889                 break;
890         }
891         return 0;
892 }
893 EXPORT_SYMBOL_GPL(kvm_get_dr);
894
895 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
896 {
897         u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
898         u64 data;
899         int err;
900
901         err = kvm_pmu_read_pmc(vcpu, ecx, &data);
902         if (err)
903                 return err;
904         kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
905         kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
906         return err;
907 }
908 EXPORT_SYMBOL_GPL(kvm_rdpmc);
909
910 /*
911  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
912  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
913  *
914  * This list is modified at module load time to reflect the
915  * capabilities of the host cpu. This capabilities test skips MSRs that are
916  * kvm-specific. Those are put in the beginning of the list.
917  */
918
919 #define KVM_SAVE_MSRS_BEGIN     12
920 static u32 msrs_to_save[] = {
921         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
922         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
923         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
924         HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
925         HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
926         MSR_KVM_PV_EOI_EN,
927         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
928         MSR_STAR,
929 #ifdef CONFIG_X86_64
930         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
931 #endif
932         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
933         MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
934 };
935
936 static unsigned num_msrs_to_save;
937
938 static const u32 emulated_msrs[] = {
939         MSR_IA32_TSC_ADJUST,
940         MSR_IA32_TSCDEADLINE,
941         MSR_IA32_MISC_ENABLE,
942         MSR_IA32_MCG_STATUS,
943         MSR_IA32_MCG_CTL,
944 };
945
946 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
947 {
948         if (efer & efer_reserved_bits)
949                 return false;
950
951         if (efer & EFER_FFXSR) {
952                 struct kvm_cpuid_entry2 *feat;
953
954                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
955                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
956                         return false;
957         }
958
959         if (efer & EFER_SVME) {
960                 struct kvm_cpuid_entry2 *feat;
961
962                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
963                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
964                         return false;
965         }
966
967         return true;
968 }
969 EXPORT_SYMBOL_GPL(kvm_valid_efer);
970
971 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
972 {
973         u64 old_efer = vcpu->arch.efer;
974
975         if (!kvm_valid_efer(vcpu, efer))
976                 return 1;
977
978         if (is_paging(vcpu)
979             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
980                 return 1;
981
982         efer &= ~EFER_LMA;
983         efer |= vcpu->arch.efer & EFER_LMA;
984
985         kvm_x86_ops->set_efer(vcpu, efer);
986
987         /* Update reserved bits */
988         if ((efer ^ old_efer) & EFER_NX)
989                 kvm_mmu_reset_context(vcpu);
990
991         return 0;
992 }
993
994 void kvm_enable_efer_bits(u64 mask)
995 {
996        efer_reserved_bits &= ~mask;
997 }
998 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
999
1000 /*
1001  * Writes msr value into into the appropriate "register".
1002  * Returns 0 on success, non-0 otherwise.
1003  * Assumes vcpu_load() was already called.
1004  */
1005 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1006 {
1007         switch (msr->index) {
1008         case MSR_FS_BASE:
1009         case MSR_GS_BASE:
1010         case MSR_KERNEL_GS_BASE:
1011         case MSR_CSTAR:
1012         case MSR_LSTAR:
1013                 if (is_noncanonical_address(msr->data))
1014                         return 1;
1015                 break;
1016         case MSR_IA32_SYSENTER_EIP:
1017         case MSR_IA32_SYSENTER_ESP:
1018                 /*
1019                  * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1020                  * non-canonical address is written on Intel but not on
1021                  * AMD (which ignores the top 32-bits, because it does
1022                  * not implement 64-bit SYSENTER).
1023                  *
1024                  * 64-bit code should hence be able to write a non-canonical
1025                  * value on AMD.  Making the address canonical ensures that
1026                  * vmentry does not fail on Intel after writing a non-canonical
1027                  * value, and that something deterministic happens if the guest
1028                  * invokes 64-bit SYSENTER.
1029                  */
1030                 msr->data = get_canonical(msr->data);
1031         }
1032         return kvm_x86_ops->set_msr(vcpu, msr);
1033 }
1034 EXPORT_SYMBOL_GPL(kvm_set_msr);
1035
1036 /*
1037  * Adapt set_msr() to msr_io()'s calling convention
1038  */
1039 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1040 {
1041         struct msr_data msr;
1042
1043         msr.data = *data;
1044         msr.index = index;
1045         msr.host_initiated = true;
1046         return kvm_set_msr(vcpu, &msr);
1047 }
1048
1049 #ifdef CONFIG_X86_64
1050 struct pvclock_gtod_data {
1051         seqcount_t      seq;
1052
1053         struct { /* extract of a clocksource struct */
1054                 int vclock_mode;
1055                 cycle_t cycle_last;
1056                 cycle_t mask;
1057                 u32     mult;
1058                 u32     shift;
1059         } clock;
1060
1061         u64             boot_ns;
1062         u64             nsec_base;
1063 };
1064
1065 static struct pvclock_gtod_data pvclock_gtod_data;
1066
1067 static void update_pvclock_gtod(struct timekeeper *tk)
1068 {
1069         struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1070         u64 boot_ns;
1071
1072         boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
1073
1074         write_seqcount_begin(&vdata->seq);
1075
1076         /* copy pvclock gtod data */
1077         vdata->clock.vclock_mode        = tk->tkr.clock->archdata.vclock_mode;
1078         vdata->clock.cycle_last         = tk->tkr.cycle_last;
1079         vdata->clock.mask               = tk->tkr.mask;
1080         vdata->clock.mult               = tk->tkr.mult;
1081         vdata->clock.shift              = tk->tkr.shift;
1082
1083         vdata->boot_ns                  = boot_ns;
1084         vdata->nsec_base                = tk->tkr.xtime_nsec;
1085
1086         write_seqcount_end(&vdata->seq);
1087 }
1088 #endif
1089
1090 void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
1091 {
1092         /*
1093          * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
1094          * vcpu_enter_guest.  This function is only called from
1095          * the physical CPU that is running vcpu.
1096          */
1097         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1098 }
1099
1100 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
1101 {
1102         int version;
1103         int r;
1104         struct pvclock_wall_clock wc;
1105         struct timespec boot;
1106
1107         if (!wall_clock)
1108                 return;
1109
1110         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
1111         if (r)
1112                 return;
1113
1114         if (version & 1)
1115                 ++version;  /* first time write, random junk */
1116
1117         ++version;
1118
1119         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1120
1121         /*
1122          * The guest calculates current wall clock time by adding
1123          * system time (updated by kvm_guest_time_update below) to the
1124          * wall clock specified here.  guest system time equals host
1125          * system time for us, thus we must fill in host boot time here.
1126          */
1127         getboottime(&boot);
1128
1129         if (kvm->arch.kvmclock_offset) {
1130                 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
1131                 boot = timespec_sub(boot, ts);
1132         }
1133         wc.sec = boot.tv_sec;
1134         wc.nsec = boot.tv_nsec;
1135         wc.version = version;
1136
1137         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
1138
1139         version++;
1140         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
1141 }
1142
1143 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
1144 {
1145         uint32_t quotient, remainder;
1146
1147         /* Don't try to replace with do_div(), this one calculates
1148          * "(dividend << 32) / divisor" */
1149         __asm__ ( "divl %4"
1150                   : "=a" (quotient), "=d" (remainder)
1151                   : "0" (0), "1" (dividend), "r" (divisor) );
1152         return quotient;
1153 }
1154
1155 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
1156                                s8 *pshift, u32 *pmultiplier)
1157 {
1158         uint64_t scaled64;
1159         int32_t  shift = 0;
1160         uint64_t tps64;
1161         uint32_t tps32;
1162
1163         tps64 = base_khz * 1000LL;
1164         scaled64 = scaled_khz * 1000LL;
1165         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1166                 tps64 >>= 1;
1167                 shift--;
1168         }
1169
1170         tps32 = (uint32_t)tps64;
1171         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
1172                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1173                         scaled64 >>= 1;
1174                 else
1175                         tps32 <<= 1;
1176                 shift++;
1177         }
1178
1179         *pshift = shift;
1180         *pmultiplier = div_frac(scaled64, tps32);
1181
1182         pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
1183                  __func__, base_khz, scaled_khz, shift, *pmultiplier);
1184 }
1185
1186 static inline u64 get_kernel_ns(void)
1187 {
1188         return ktime_get_boot_ns();
1189 }
1190
1191 #ifdef CONFIG_X86_64
1192 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1193 #endif
1194
1195 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1196 unsigned long max_tsc_khz;
1197
1198 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1199 {
1200         return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
1201                                    vcpu->arch.virtual_tsc_shift);
1202 }
1203
1204 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1205 {
1206         u64 v = (u64)khz * (1000000 + ppm);
1207         do_div(v, 1000000);
1208         return v;
1209 }
1210
1211 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1212 {
1213         u32 thresh_lo, thresh_hi;
1214         int use_scaling = 0;
1215
1216         /* tsc_khz can be zero if TSC calibration fails */
1217         if (this_tsc_khz == 0)
1218                 return;
1219
1220         /* Compute a scale to convert nanoseconds in TSC cycles */
1221         kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
1222                            &vcpu->arch.virtual_tsc_shift,
1223                            &vcpu->arch.virtual_tsc_mult);
1224         vcpu->arch.virtual_tsc_khz = this_tsc_khz;
1225
1226         /*
1227          * Compute the variation in TSC rate which is acceptable
1228          * within the range of tolerance and decide if the
1229          * rate being applied is within that bounds of the hardware
1230          * rate.  If so, no scaling or compensation need be done.
1231          */
1232         thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1233         thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1234         if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
1235                 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
1236                 use_scaling = 1;
1237         }
1238         kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
1239 }
1240
1241 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1242 {
1243         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1244                                       vcpu->arch.virtual_tsc_mult,
1245                                       vcpu->arch.virtual_tsc_shift);
1246         tsc += vcpu->arch.this_tsc_write;
1247         return tsc;
1248 }
1249
1250 void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1251 {
1252 #ifdef CONFIG_X86_64
1253         bool vcpus_matched;
1254         struct kvm_arch *ka = &vcpu->kvm->arch;
1255         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1256
1257         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1258                          atomic_read(&vcpu->kvm->online_vcpus));
1259
1260         /*
1261          * Once the masterclock is enabled, always perform request in
1262          * order to update it.
1263          *
1264          * In order to enable masterclock, the host clocksource must be TSC
1265          * and the vcpus need to have matched TSCs.  When that happens,
1266          * perform request to enable masterclock.
1267          */
1268         if (ka->use_master_clock ||
1269             (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1270                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1271
1272         trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
1273                             atomic_read(&vcpu->kvm->online_vcpus),
1274                             ka->use_master_clock, gtod->clock.vclock_mode);
1275 #endif
1276 }
1277
1278 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1279 {
1280         u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
1281         vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1282 }
1283
1284 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1285 {
1286         struct kvm *kvm = vcpu->kvm;
1287         u64 offset, ns, elapsed;
1288         unsigned long flags;
1289         s64 usdiff;
1290         bool matched;
1291         bool already_matched;
1292         u64 data = msr->data;
1293
1294         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1295         offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1296         ns = get_kernel_ns();
1297         elapsed = ns - kvm->arch.last_tsc_nsec;
1298
1299         if (vcpu->arch.virtual_tsc_khz) {
1300                 int faulted = 0;
1301
1302                 /* n.b - signed multiplication and division required */
1303                 usdiff = data - kvm->arch.last_tsc_write;
1304 #ifdef CONFIG_X86_64
1305                 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1306 #else
1307                 /* do_div() only does unsigned */
1308                 asm("1: idivl %[divisor]\n"
1309                     "2: xor %%edx, %%edx\n"
1310                     "   movl $0, %[faulted]\n"
1311                     "3:\n"
1312                     ".section .fixup,\"ax\"\n"
1313                     "4: movl $1, %[faulted]\n"
1314                     "   jmp  3b\n"
1315                     ".previous\n"
1316
1317                 _ASM_EXTABLE(1b, 4b)
1318
1319                 : "=A"(usdiff), [faulted] "=r" (faulted)
1320                 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
1321
1322 #endif
1323                 do_div(elapsed, 1000);
1324                 usdiff -= elapsed;
1325                 if (usdiff < 0)
1326                         usdiff = -usdiff;
1327
1328                 /* idivl overflow => difference is larger than USEC_PER_SEC */
1329                 if (faulted)
1330                         usdiff = USEC_PER_SEC;
1331         } else
1332                 usdiff = USEC_PER_SEC; /* disable TSC match window below */
1333
1334         /*
1335          * Special case: TSC write with a small delta (1 second) of virtual
1336          * cycle time against real time is interpreted as an attempt to
1337          * synchronize the CPU.
1338          *
1339          * For a reliable TSC, we can match TSC offsets, and for an unstable
1340          * TSC, we add elapsed time in this computation.  We could let the
1341          * compensation code attempt to catch up if we fall behind, but
1342          * it's better to try to match offsets from the beginning.
1343          */
1344         if (usdiff < USEC_PER_SEC &&
1345             vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1346                 if (!check_tsc_unstable()) {
1347                         offset = kvm->arch.cur_tsc_offset;
1348                         pr_debug("kvm: matched tsc offset for %llu\n", data);
1349                 } else {
1350                         u64 delta = nsec_to_cycles(vcpu, elapsed);
1351                         data += delta;
1352                         offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1353                         pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1354                 }
1355                 matched = true;
1356                 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1357         } else {
1358                 /*
1359                  * We split periods of matched TSC writes into generations.
1360                  * For each generation, we track the original measured
1361                  * nanosecond time, offset, and write, so if TSCs are in
1362                  * sync, we can match exact offset, and if not, we can match
1363                  * exact software computation in compute_guest_tsc()
1364                  *
1365                  * These values are tracked in kvm->arch.cur_xxx variables.
1366                  */
1367                 kvm->arch.cur_tsc_generation++;
1368                 kvm->arch.cur_tsc_nsec = ns;
1369                 kvm->arch.cur_tsc_write = data;
1370                 kvm->arch.cur_tsc_offset = offset;
1371                 matched = false;
1372                 pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1373                          kvm->arch.cur_tsc_generation, data);
1374         }
1375
1376         /*
1377          * We also track th most recent recorded KHZ, write and time to
1378          * allow the matching interval to be extended at each write.
1379          */
1380         kvm->arch.last_tsc_nsec = ns;
1381         kvm->arch.last_tsc_write = data;
1382         kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1383
1384         vcpu->arch.last_guest_tsc = data;
1385
1386         /* Keep track of which generation this VCPU has synchronized to */
1387         vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1388         vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1389         vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1390
1391         if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
1392                 update_ia32_tsc_adjust_msr(vcpu, offset);
1393         kvm_x86_ops->write_tsc_offset(vcpu, offset);
1394         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1395
1396         spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
1397         if (!matched) {
1398                 kvm->arch.nr_vcpus_matched_tsc = 0;
1399         } else if (!already_matched) {
1400                 kvm->arch.nr_vcpus_matched_tsc++;
1401         }
1402
1403         kvm_track_tsc_matching(vcpu);
1404         spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1405 }
1406
1407 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1408
1409 #ifdef CONFIG_X86_64
1410
1411 static cycle_t read_tsc(void)
1412 {
1413         cycle_t ret;
1414         u64 last;
1415
1416         /*
1417          * Empirically, a fence (of type that depends on the CPU)
1418          * before rdtsc is enough to ensure that rdtsc is ordered
1419          * with respect to loads.  The various CPU manuals are unclear
1420          * as to whether rdtsc can be reordered with later loads,
1421          * but no one has ever seen it happen.
1422          */
1423         rdtsc_barrier();
1424         ret = (cycle_t)vget_cycles();
1425
1426         last = pvclock_gtod_data.clock.cycle_last;
1427
1428         if (likely(ret >= last))
1429                 return ret;
1430
1431         /*
1432          * GCC likes to generate cmov here, but this branch is extremely
1433          * predictable (it's just a funciton of time and the likely is
1434          * very likely) and there's a data dependence, so force GCC
1435          * to generate a branch instead.  I don't barrier() because
1436          * we don't actually need a barrier, and if this function
1437          * ever gets inlined it will generate worse code.
1438          */
1439         asm volatile ("");
1440         return last;
1441 }
1442
1443 static inline u64 vgettsc(cycle_t *cycle_now)
1444 {
1445         long v;
1446         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1447
1448         *cycle_now = read_tsc();
1449
1450         v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
1451         return v * gtod->clock.mult;
1452 }
1453
1454 static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
1455 {
1456         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1457         unsigned long seq;
1458         int mode;
1459         u64 ns;
1460
1461         do {
1462                 seq = read_seqcount_begin(&gtod->seq);
1463                 mode = gtod->clock.vclock_mode;
1464                 ns = gtod->nsec_base;
1465                 ns += vgettsc(cycle_now);
1466                 ns >>= gtod->clock.shift;
1467                 ns += gtod->boot_ns;
1468         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1469         *t = ns;
1470
1471         return mode;
1472 }
1473
1474 /* returns true if host is using tsc clocksource */
1475 static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
1476 {
1477         /* checked again under seqlock below */
1478         if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
1479                 return false;
1480
1481         return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1482 }
1483 #endif
1484
1485 /*
1486  *
1487  * Assuming a stable TSC across physical CPUS, and a stable TSC
1488  * across virtual CPUs, the following condition is possible.
1489  * Each numbered line represents an event visible to both
1490  * CPUs at the next numbered event.
1491  *
1492  * "timespecX" represents host monotonic time. "tscX" represents
1493  * RDTSC value.
1494  *
1495  *              VCPU0 on CPU0           |       VCPU1 on CPU1
1496  *
1497  * 1.  read timespec0,tsc0
1498  * 2.                                   | timespec1 = timespec0 + N
1499  *                                      | tsc1 = tsc0 + M
1500  * 3. transition to guest               | transition to guest
1501  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
1502  * 5.                                   | ret1 = timespec1 + (rdtsc - tsc1)
1503  *                                      | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
1504  *
1505  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
1506  *
1507  *      - ret0 < ret1
1508  *      - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
1509  *              ...
1510  *      - 0 < N - M => M < N
1511  *
1512  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
1513  * always the case (the difference between two distinct xtime instances
1514  * might be smaller then the difference between corresponding TSC reads,
1515  * when updating guest vcpus pvclock areas).
1516  *
1517  * To avoid that problem, do not allow visibility of distinct
1518  * system_timestamp/tsc_timestamp values simultaneously: use a master
1519  * copy of host monotonic time values. Update that master copy
1520  * in lockstep.
1521  *
1522  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1523  *
1524  */
1525
1526 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1527 {
1528 #ifdef CONFIG_X86_64
1529         struct kvm_arch *ka = &kvm->arch;
1530         int vclock_mode;
1531         bool host_tsc_clocksource, vcpus_matched;
1532
1533         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
1534                         atomic_read(&kvm->online_vcpus));
1535
1536         /*
1537          * If the host uses TSC clock, then passthrough TSC as stable
1538          * to the guest.
1539          */
1540         host_tsc_clocksource = kvm_get_time_and_clockread(
1541                                         &ka->master_kernel_ns,
1542                                         &ka->master_cycle_now);
1543
1544         ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1545                                 && !backwards_tsc_observed;
1546
1547         if (ka->use_master_clock)
1548                 atomic_set(&kvm_guest_has_master_clock, 1);
1549
1550         vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1551         trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
1552                                         vcpus_matched);
1553 #endif
1554 }
1555
1556 static void kvm_gen_update_masterclock(struct kvm *kvm)
1557 {
1558 #ifdef CONFIG_X86_64
1559         int i;
1560         struct kvm_vcpu *vcpu;
1561         struct kvm_arch *ka = &kvm->arch;
1562
1563         spin_lock(&ka->pvclock_gtod_sync_lock);
1564         kvm_make_mclock_inprogress_request(kvm);
1565         /* no guest entries from this point */
1566         pvclock_update_vm_gtod_copy(kvm);
1567
1568         kvm_for_each_vcpu(i, vcpu, kvm)
1569                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1570
1571         /* guest entries allowed */
1572         kvm_for_each_vcpu(i, vcpu, kvm)
1573                 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
1574
1575         spin_unlock(&ka->pvclock_gtod_sync_lock);
1576 #endif
1577 }
1578
1579 static int kvm_guest_time_update(struct kvm_vcpu *v)
1580 {
1581         unsigned long flags, this_tsc_khz;
1582         struct kvm_vcpu_arch *vcpu = &v->arch;
1583         struct kvm_arch *ka = &v->kvm->arch;
1584         s64 kernel_ns;
1585         u64 tsc_timestamp, host_tsc;
1586         struct pvclock_vcpu_time_info guest_hv_clock;
1587         u8 pvclock_flags;
1588         bool use_master_clock;
1589
1590         kernel_ns = 0;
1591         host_tsc = 0;
1592
1593         /*
1594          * If the host uses TSC clock, then passthrough TSC as stable
1595          * to the guest.
1596          */
1597         spin_lock(&ka->pvclock_gtod_sync_lock);
1598         use_master_clock = ka->use_master_clock;
1599         if (use_master_clock) {
1600                 host_tsc = ka->master_cycle_now;
1601                 kernel_ns = ka->master_kernel_ns;
1602         }
1603         spin_unlock(&ka->pvclock_gtod_sync_lock);
1604
1605         /* Keep irq disabled to prevent changes to the clock */
1606         local_irq_save(flags);
1607         this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
1608         if (unlikely(this_tsc_khz == 0)) {
1609                 local_irq_restore(flags);
1610                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1611                 return 1;
1612         }
1613         if (!use_master_clock) {
1614                 host_tsc = native_read_tsc();
1615                 kernel_ns = get_kernel_ns();
1616         }
1617
1618         tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
1619
1620         /*
1621          * We may have to catch up the TSC to match elapsed wall clock
1622          * time for two reasons, even if kvmclock is used.
1623          *   1) CPU could have been running below the maximum TSC rate
1624          *   2) Broken TSC compensation resets the base at each VCPU
1625          *      entry to avoid unknown leaps of TSC even when running
1626          *      again on the same CPU.  This may cause apparent elapsed
1627          *      time to disappear, and the guest to stand still or run
1628          *      very slowly.
1629          */
1630         if (vcpu->tsc_catchup) {
1631                 u64 tsc = compute_guest_tsc(v, kernel_ns);
1632                 if (tsc > tsc_timestamp) {
1633                         adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1634                         tsc_timestamp = tsc;
1635                 }
1636         }
1637
1638         local_irq_restore(flags);
1639
1640         if (!vcpu->pv_time_enabled)
1641                 return 0;
1642
1643         if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1644                 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1645                                    &vcpu->hv_clock.tsc_shift,
1646                                    &vcpu->hv_clock.tsc_to_system_mul);
1647                 vcpu->hw_tsc_khz = this_tsc_khz;
1648         }
1649
1650         /* With all the info we got, fill in the values */
1651         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1652         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1653         vcpu->last_guest_tsc = tsc_timestamp;
1654
1655         if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1656                 &guest_hv_clock, sizeof(guest_hv_clock))))
1657                 return 0;
1658
1659         /*
1660          * The interface expects us to write an even number signaling that the
1661          * update is finished. Since the guest won't see the intermediate
1662          * state, we just increase by 2 at the end.
1663          */
1664         vcpu->hv_clock.version = guest_hv_clock.version + 2;
1665
1666         /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1667         pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
1668
1669         if (vcpu->pvclock_set_guest_stopped_request) {
1670                 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
1671                 vcpu->pvclock_set_guest_stopped_request = false;
1672         }
1673
1674         /* If the host uses TSC clocksource, then it is stable */
1675         if (use_master_clock)
1676                 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
1677
1678         vcpu->hv_clock.flags = pvclock_flags;
1679
1680         trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
1681
1682         kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1683                                 &vcpu->hv_clock,
1684                                 sizeof(vcpu->hv_clock));
1685         return 0;
1686 }
1687
1688 /*
1689  * kvmclock updates which are isolated to a given vcpu, such as
1690  * vcpu->cpu migration, should not allow system_timestamp from
1691  * the rest of the vcpus to remain static. Otherwise ntp frequency
1692  * correction applies to one vcpu's system_timestamp but not
1693  * the others.
1694  *
1695  * So in those cases, request a kvmclock update for all vcpus.
1696  * We need to rate-limit these requests though, as they can
1697  * considerably slow guests that have a large number of vcpus.
1698  * The time for a remote vcpu to update its kvmclock is bound
1699  * by the delay we use to rate-limit the updates.
1700  */
1701
1702 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
1703
1704 static void kvmclock_update_fn(struct work_struct *work)
1705 {
1706         int i;
1707         struct delayed_work *dwork = to_delayed_work(work);
1708         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1709                                            kvmclock_update_work);
1710         struct kvm *kvm = container_of(ka, struct kvm, arch);
1711         struct kvm_vcpu *vcpu;
1712
1713         kvm_for_each_vcpu(i, vcpu, kvm) {
1714                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1715                 kvm_vcpu_kick(vcpu);
1716         }
1717 }
1718
1719 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
1720 {
1721         struct kvm *kvm = v->kvm;
1722
1723         kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1724         schedule_delayed_work(&kvm->arch.kvmclock_update_work,
1725                                         KVMCLOCK_UPDATE_DELAY);
1726 }
1727
1728 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
1729
1730 static void kvmclock_sync_fn(struct work_struct *work)
1731 {
1732         struct delayed_work *dwork = to_delayed_work(work);
1733         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
1734                                            kvmclock_sync_work);
1735         struct kvm *kvm = container_of(ka, struct kvm, arch);
1736
1737         schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
1738         schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
1739                                         KVMCLOCK_SYNC_PERIOD);
1740 }
1741
1742 static bool msr_mtrr_valid(unsigned msr)
1743 {
1744         switch (msr) {
1745         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1746         case MSR_MTRRfix64K_00000:
1747         case MSR_MTRRfix16K_80000:
1748         case MSR_MTRRfix16K_A0000:
1749         case MSR_MTRRfix4K_C0000:
1750         case MSR_MTRRfix4K_C8000:
1751         case MSR_MTRRfix4K_D0000:
1752         case MSR_MTRRfix4K_D8000:
1753         case MSR_MTRRfix4K_E0000:
1754         case MSR_MTRRfix4K_E8000:
1755         case MSR_MTRRfix4K_F0000:
1756         case MSR_MTRRfix4K_F8000:
1757         case MSR_MTRRdefType:
1758         case MSR_IA32_CR_PAT:
1759                 return true;
1760         case 0x2f8:
1761                 return true;
1762         }
1763         return false;
1764 }
1765
1766 static bool valid_pat_type(unsigned t)
1767 {
1768         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1769 }
1770
1771 static bool valid_mtrr_type(unsigned t)
1772 {
1773         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1774 }
1775
1776 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1777 {
1778         int i;
1779         u64 mask;
1780
1781         if (!msr_mtrr_valid(msr))
1782                 return false;
1783
1784         if (msr == MSR_IA32_CR_PAT) {
1785                 for (i = 0; i < 8; i++)
1786                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
1787                                 return false;
1788                 return true;
1789         } else if (msr == MSR_MTRRdefType) {
1790                 if (data & ~0xcff)
1791                         return false;
1792                 return valid_mtrr_type(data & 0xff);
1793         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1794                 for (i = 0; i < 8 ; i++)
1795                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1796                                 return false;
1797                 return true;
1798         }
1799
1800         /* variable MTRRs */
1801         WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
1802
1803         mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
1804         if ((msr & 1) == 0) {
1805                 /* MTRR base */
1806                 if (!valid_mtrr_type(data & 0xff))
1807                         return false;
1808                 mask |= 0xf00;
1809         } else
1810                 /* MTRR mask */
1811                 mask |= 0x7ff;
1812         if (data & mask) {
1813                 kvm_inject_gp(vcpu, 0);
1814                 return false;
1815         }
1816
1817         return true;
1818 }
1819 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
1820
1821 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1822 {
1823         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1824
1825         if (!kvm_mtrr_valid(vcpu, msr, data))
1826                 return 1;
1827
1828         if (msr == MSR_MTRRdefType) {
1829                 vcpu->arch.mtrr_state.def_type = data;
1830                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1831         } else if (msr == MSR_MTRRfix64K_00000)
1832                 p[0] = data;
1833         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1834                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1835         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1836                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1837         else if (msr == MSR_IA32_CR_PAT)
1838                 vcpu->arch.pat = data;
1839         else {  /* Variable MTRRs */
1840                 int idx, is_mtrr_mask;
1841                 u64 *pt;
1842
1843                 idx = (msr - 0x200) / 2;
1844                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1845                 if (!is_mtrr_mask)
1846                         pt =
1847                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1848                 else
1849                         pt =
1850                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1851                 *pt = data;
1852         }
1853
1854         kvm_mmu_reset_context(vcpu);
1855         return 0;
1856 }
1857
1858 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1859 {
1860         u64 mcg_cap = vcpu->arch.mcg_cap;
1861         unsigned bank_num = mcg_cap & 0xff;
1862
1863         switch (msr) {
1864         case MSR_IA32_MCG_STATUS:
1865                 vcpu->arch.mcg_status = data;
1866                 break;
1867         case MSR_IA32_MCG_CTL:
1868                 if (!(mcg_cap & MCG_CTL_P))
1869                         return 1;
1870                 if (data != 0 && data != ~(u64)0)
1871                         return -1;
1872                 vcpu->arch.mcg_ctl = data;
1873                 break;
1874         default:
1875                 if (msr >= MSR_IA32_MC0_CTL &&
1876                     msr < MSR_IA32_MCx_CTL(bank_num)) {
1877                         u32 offset = msr - MSR_IA32_MC0_CTL;
1878                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1879                          * some Linux kernels though clear bit 10 in bank 4 to
1880                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1881                          * this to avoid an uncatched #GP in the guest
1882                          */
1883                         if ((offset & 0x3) == 0 &&
1884                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1885                                 return -1;
1886                         vcpu->arch.mce_banks[offset] = data;
1887                         break;
1888                 }
1889                 return 1;
1890         }
1891         return 0;
1892 }
1893
1894 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1895 {
1896         struct kvm *kvm = vcpu->kvm;
1897         int lm = is_long_mode(vcpu);
1898         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1899                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1900         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1901                 : kvm->arch.xen_hvm_config.blob_size_32;
1902         u32 page_num = data & ~PAGE_MASK;
1903         u64 page_addr = data & PAGE_MASK;
1904         u8 *page;
1905         int r;
1906
1907         r = -E2BIG;
1908         if (page_num >= blob_size)
1909                 goto out;
1910         r = -ENOMEM;
1911         page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
1912         if (IS_ERR(page)) {
1913                 r = PTR_ERR(page);
1914                 goto out;
1915         }
1916         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1917                 goto out_free;
1918         r = 0;
1919 out_free:
1920         kfree(page);
1921 out:
1922         return r;
1923 }
1924
1925 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1926 {
1927         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1928 }
1929
1930 static bool kvm_hv_msr_partition_wide(u32 msr)
1931 {
1932         bool r = false;
1933         switch (msr) {
1934         case HV_X64_MSR_GUEST_OS_ID:
1935         case HV_X64_MSR_HYPERCALL:
1936         case HV_X64_MSR_REFERENCE_TSC:
1937         case HV_X64_MSR_TIME_REF_COUNT:
1938                 r = true;
1939                 break;
1940         }
1941
1942         return r;
1943 }
1944
1945 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1946 {
1947         struct kvm *kvm = vcpu->kvm;
1948
1949         switch (msr) {
1950         case HV_X64_MSR_GUEST_OS_ID:
1951                 kvm->arch.hv_guest_os_id = data;
1952                 /* setting guest os id to zero disables hypercall page */
1953                 if (!kvm->arch.hv_guest_os_id)
1954                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1955                 break;
1956         case HV_X64_MSR_HYPERCALL: {
1957                 u64 gfn;
1958                 unsigned long addr;
1959                 u8 instructions[4];
1960
1961                 /* if guest os id is not set hypercall should remain disabled */
1962                 if (!kvm->arch.hv_guest_os_id)
1963                         break;
1964                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1965                         kvm->arch.hv_hypercall = data;
1966                         break;
1967                 }
1968                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1969                 addr = gfn_to_hva(kvm, gfn);
1970                 if (kvm_is_error_hva(addr))
1971                         return 1;
1972                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1973                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1974                 if (__copy_to_user((void __user *)addr, instructions, 4))
1975                         return 1;
1976                 kvm->arch.hv_hypercall = data;
1977                 mark_page_dirty(kvm, gfn);
1978                 break;
1979         }
1980         case HV_X64_MSR_REFERENCE_TSC: {
1981                 u64 gfn;
1982                 HV_REFERENCE_TSC_PAGE tsc_ref;
1983                 memset(&tsc_ref, 0, sizeof(tsc_ref));
1984                 kvm->arch.hv_tsc_page = data;
1985                 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1986                         break;
1987                 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1988                 if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
1989                         &tsc_ref, sizeof(tsc_ref)))
1990                         return 1;
1991                 mark_page_dirty(kvm, gfn);
1992                 break;
1993         }
1994         default:
1995                 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1996                             "data 0x%llx\n", msr, data);
1997                 return 1;
1998         }
1999         return 0;
2000 }
2001
2002 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2003 {
2004         switch (msr) {
2005         case HV_X64_MSR_APIC_ASSIST_PAGE: {
2006                 u64 gfn;
2007                 unsigned long addr;
2008
2009                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
2010                         vcpu->arch.hv_vapic = data;
2011                         if (kvm_lapic_enable_pv_eoi(vcpu, 0))
2012                                 return 1;
2013                         break;
2014                 }
2015                 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
2016                 addr = gfn_to_hva(vcpu->kvm, gfn);
2017                 if (kvm_is_error_hva(addr))
2018                         return 1;
2019                 if (__clear_user((void __user *)addr, PAGE_SIZE))
2020                         return 1;
2021                 vcpu->arch.hv_vapic = data;
2022                 mark_page_dirty(vcpu->kvm, gfn);
2023                 if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
2024                         return 1;
2025                 break;
2026         }
2027         case HV_X64_MSR_EOI:
2028                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
2029         case HV_X64_MSR_ICR:
2030                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
2031         case HV_X64_MSR_TPR:
2032                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
2033         default:
2034                 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
2035                             "data 0x%llx\n", msr, data);
2036                 return 1;
2037         }
2038
2039         return 0;
2040 }
2041
2042 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
2043 {
2044         gpa_t gpa = data & ~0x3f;
2045
2046         /* Bits 2:5 are reserved, Should be zero */
2047         if (data & 0x3c)
2048                 return 1;
2049
2050         vcpu->arch.apf.msr_val = data;
2051
2052         if (!(data & KVM_ASYNC_PF_ENABLED)) {
2053                 kvm_clear_async_pf_completion_queue(vcpu);
2054                 kvm_async_pf_hash_reset(vcpu);
2055                 return 0;
2056         }
2057
2058         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2059                                         sizeof(u32)))
2060                 return 1;
2061
2062         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2063         kvm_async_pf_wakeup_all(vcpu);
2064         return 0;
2065 }
2066
2067 static void kvmclock_reset(struct kvm_vcpu *vcpu)
2068 {
2069         vcpu->arch.pv_time_enabled = false;
2070 }
2071
2072 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
2073 {
2074         u64 delta;
2075
2076         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2077                 return;
2078
2079         delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
2080         vcpu->arch.st.last_steal = current->sched_info.run_delay;
2081         vcpu->arch.st.accum_steal = delta;
2082 }
2083
2084 static void record_steal_time(struct kvm_vcpu *vcpu)
2085 {
2086         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
2087                 return;
2088
2089         if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2090                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
2091                 return;
2092
2093         vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
2094         vcpu->arch.st.steal.version += 2;
2095         vcpu->arch.st.accum_steal = 0;
2096
2097         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
2098                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
2099 }
2100
2101 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2102 {
2103         bool pr = false;
2104         u32 msr = msr_info->index;
2105         u64 data = msr_info->data;
2106
2107         switch (msr) {
2108         case MSR_AMD64_NB_CFG:
2109         case MSR_IA32_UCODE_REV:
2110         case MSR_IA32_UCODE_WRITE:
2111         case MSR_VM_HSAVE_PA:
2112         case MSR_AMD64_PATCH_LOADER:
2113         case MSR_AMD64_BU_CFG2:
2114                 break;
2115
2116         case MSR_EFER:
2117                 return set_efer(vcpu, data);
2118         case MSR_K7_HWCR:
2119                 data &= ~(u64)0x40;     /* ignore flush filter disable */
2120                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
2121                 data &= ~(u64)0x8;      /* ignore TLB cache disable */
2122                 data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2123                 if (data != 0) {
2124                         vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
2125                                     data);
2126                         return 1;
2127                 }
2128                 break;
2129         case MSR_FAM10H_MMIO_CONF_BASE:
2130                 if (data != 0) {
2131                         vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
2132                                     "0x%llx\n", data);
2133                         return 1;
2134                 }
2135                 break;
2136         case MSR_IA32_DEBUGCTLMSR:
2137                 if (!data) {
2138                         /* We support the non-activated case already */
2139                         break;
2140                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
2141                         /* Values other than LBR and BTF are vendor-specific,
2142                            thus reserved and should throw a #GP */
2143                         return 1;
2144                 }
2145                 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
2146                             __func__, data);
2147                 break;
2148         case 0x200 ... 0x2ff:
2149                 return set_msr_mtrr(vcpu, msr, data);
2150         case MSR_IA32_APICBASE:
2151                 return kvm_set_apic_base(vcpu, msr_info);
2152         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2153                 return kvm_x2apic_msr_write(vcpu, msr, data);
2154         case MSR_IA32_TSCDEADLINE:
2155                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
2156                 break;
2157         case MSR_IA32_TSC_ADJUST:
2158                 if (guest_cpuid_has_tsc_adjust(vcpu)) {
2159                         if (!msr_info->host_initiated) {
2160                                 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2161                                 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
2162                         }
2163                         vcpu->arch.ia32_tsc_adjust_msr = data;
2164                 }
2165                 break;
2166         case MSR_IA32_MISC_ENABLE:
2167                 vcpu->arch.ia32_misc_enable_msr = data;
2168                 break;
2169         case MSR_KVM_WALL_CLOCK_NEW:
2170         case MSR_KVM_WALL_CLOCK:
2171                 vcpu->kvm->arch.wall_clock = data;
2172                 kvm_write_wall_clock(vcpu->kvm, data);
2173                 break;
2174         case MSR_KVM_SYSTEM_TIME_NEW:
2175         case MSR_KVM_SYSTEM_TIME: {
2176                 u64 gpa_offset;
2177                 kvmclock_reset(vcpu);
2178
2179                 vcpu->arch.time = data;
2180                 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2181
2182                 /* we verify if the enable bit is set... */
2183                 if (!(data & 1))
2184                         break;
2185
2186                 gpa_offset = data & ~(PAGE_MASK | 1);
2187
2188                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2189                      &vcpu->arch.pv_time, data & ~1ULL,
2190                      sizeof(struct pvclock_vcpu_time_info)))
2191                         vcpu->arch.pv_time_enabled = false;
2192                 else
2193                         vcpu->arch.pv_time_enabled = true;
2194
2195                 break;
2196         }
2197         case MSR_KVM_ASYNC_PF_EN:
2198                 if (kvm_pv_enable_async_pf(vcpu, data))
2199                         return 1;
2200                 break;
2201         case MSR_KVM_STEAL_TIME:
2202
2203                 if (unlikely(!sched_info_on()))
2204                         return 1;
2205
2206                 if (data & KVM_STEAL_RESERVED_MASK)
2207                         return 1;
2208
2209                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2210                                                 data & KVM_STEAL_VALID_BITS,
2211                                                 sizeof(struct kvm_steal_time)))
2212                         return 1;
2213
2214                 vcpu->arch.st.msr_val = data;
2215
2216                 if (!(data & KVM_MSR_ENABLED))
2217                         break;
2218
2219                 vcpu->arch.st.last_steal = current->sched_info.run_delay;
2220
2221                 preempt_disable();
2222                 accumulate_steal_time(vcpu);
2223                 preempt_enable();
2224
2225                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2226
2227                 break;
2228         case MSR_KVM_PV_EOI_EN:
2229                 if (kvm_lapic_enable_pv_eoi(vcpu, data))
2230                         return 1;
2231                 break;
2232
2233         case MSR_IA32_MCG_CTL:
2234         case MSR_IA32_MCG_STATUS:
2235         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2236                 return set_msr_mce(vcpu, msr, data);
2237
2238         /* Performance counters are not protected by a CPUID bit,
2239          * so we should check all of them in the generic path for the sake of
2240          * cross vendor migration.
2241          * Writing a zero into the event select MSRs disables them,
2242          * which we perfectly emulate ;-). Any other value should be at least
2243          * reported, some guests depend on them.
2244          */
2245         case MSR_K7_EVNTSEL0:
2246         case MSR_K7_EVNTSEL1:
2247         case MSR_K7_EVNTSEL2:
2248         case MSR_K7_EVNTSEL3:
2249                 if (data != 0)
2250                         vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
2251                                     "0x%x data 0x%llx\n", msr, data);
2252                 break;
2253         /* at least RHEL 4 unconditionally writes to the perfctr registers,
2254          * so we ignore writes to make it happy.
2255          */
2256         case MSR_K7_PERFCTR0:
2257         case MSR_K7_PERFCTR1:
2258         case MSR_K7_PERFCTR2:
2259         case MSR_K7_PERFCTR3:
2260                 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
2261                             "0x%x data 0x%llx\n", msr, data);
2262                 break;
2263         case MSR_P6_PERFCTR0:
2264         case MSR_P6_PERFCTR1:
2265                 pr = true;
2266         case MSR_P6_EVNTSEL0:
2267         case MSR_P6_EVNTSEL1:
2268                 if (kvm_pmu_msr(vcpu, msr))
2269                         return kvm_pmu_set_msr(vcpu, msr_info);
2270
2271                 if (pr || data != 0)
2272                         vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
2273                                     "0x%x data 0x%llx\n", msr, data);
2274                 break;
2275         case MSR_K7_CLK_CTL:
2276                 /*
2277                  * Ignore all writes to this no longer documented MSR.
2278                  * Writes are only relevant for old K7 processors,
2279                  * all pre-dating SVM, but a recommended workaround from
2280                  * AMD for these chips. It is possible to specify the
2281                  * affected processor models on the command line, hence
2282                  * the need to ignore the workaround.
2283                  */
2284                 break;
2285         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2286                 if (kvm_hv_msr_partition_wide(msr)) {
2287                         int r;
2288                         mutex_lock(&vcpu->kvm->lock);
2289                         r = set_msr_hyperv_pw(vcpu, msr, data);
2290                         mutex_unlock(&vcpu->kvm->lock);
2291                         return r;
2292                 } else
2293                         return set_msr_hyperv(vcpu, msr, data);
2294                 break;
2295         case MSR_IA32_BBL_CR_CTL3:
2296                 /* Drop writes to this legacy MSR -- see rdmsr
2297                  * counterpart for further detail.
2298                  */
2299                 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
2300                 break;
2301         case MSR_AMD64_OSVW_ID_LENGTH:
2302                 if (!guest_cpuid_has_osvw(vcpu))
2303                         return 1;
2304                 vcpu->arch.osvw.length = data;
2305                 break;
2306         case MSR_AMD64_OSVW_STATUS:
2307                 if (!guest_cpuid_has_osvw(vcpu))
2308                         return 1;
2309                 vcpu->arch.osvw.status = data;
2310                 break;
2311         default:
2312                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
2313                         return xen_hvm_config(vcpu, data);
2314                 if (kvm_pmu_msr(vcpu, msr))
2315                         return kvm_pmu_set_msr(vcpu, msr_info);
2316                 if (!ignore_msrs) {
2317                         vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
2318                                     msr, data);
2319                         return 1;
2320                 } else {
2321                         vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
2322                                     msr, data);
2323                         break;
2324                 }
2325         }
2326         return 0;
2327 }
2328 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
2329
2330
2331 /*
2332  * Reads an msr value (of 'msr_index') into 'pdata'.
2333  * Returns 0 on success, non-0 otherwise.
2334  * Assumes vcpu_load() was already called.
2335  */
2336 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2337 {
2338         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
2339 }
2340 EXPORT_SYMBOL_GPL(kvm_get_msr);
2341
2342 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2343 {
2344         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
2345
2346         if (!msr_mtrr_valid(msr))
2347                 return 1;
2348
2349         if (msr == MSR_MTRRdefType)
2350                 *pdata = vcpu->arch.mtrr_state.def_type +
2351                          (vcpu->arch.mtrr_state.enabled << 10);
2352         else if (msr == MSR_MTRRfix64K_00000)
2353                 *pdata = p[0];
2354         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
2355                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
2356         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
2357                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
2358         else if (msr == MSR_IA32_CR_PAT)
2359                 *pdata = vcpu->arch.pat;
2360         else {  /* Variable MTRRs */
2361                 int idx, is_mtrr_mask;
2362                 u64 *pt;
2363
2364                 idx = (msr - 0x200) / 2;
2365                 is_mtrr_mask = msr - 0x200 - 2 * idx;
2366                 if (!is_mtrr_mask)
2367                         pt =
2368                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
2369                 else
2370                         pt =
2371                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
2372                 *pdata = *pt;
2373         }
2374
2375         return 0;
2376 }
2377
2378 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2379 {
2380         u64 data;
2381         u64 mcg_cap = vcpu->arch.mcg_cap;
2382         unsigned bank_num = mcg_cap & 0xff;
2383
2384         switch (msr) {
2385         case MSR_IA32_P5_MC_ADDR:
2386         case MSR_IA32_P5_MC_TYPE:
2387                 data = 0;
2388                 break;
2389         case MSR_IA32_MCG_CAP:
2390                 data = vcpu->arch.mcg_cap;
2391                 break;
2392         case MSR_IA32_MCG_CTL:
2393                 if (!(mcg_cap & MCG_CTL_P))
2394                         return 1;
2395                 data = vcpu->arch.mcg_ctl;
2396                 break;
2397         case MSR_IA32_MCG_STATUS:
2398                 data = vcpu->arch.mcg_status;
2399                 break;
2400         default:
2401                 if (msr >= MSR_IA32_MC0_CTL &&
2402                     msr < MSR_IA32_MCx_CTL(bank_num)) {
2403                         u32 offset = msr - MSR_IA32_MC0_CTL;
2404                         data = vcpu->arch.mce_banks[offset];
2405                         break;
2406                 }
2407                 return 1;
2408         }
2409         *pdata = data;
2410         return 0;
2411 }
2412
2413 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2414 {
2415         u64 data = 0;
2416         struct kvm *kvm = vcpu->kvm;
2417
2418         switch (msr) {
2419         case HV_X64_MSR_GUEST_OS_ID:
2420                 data = kvm->arch.hv_guest_os_id;
2421                 break;
2422         case HV_X64_MSR_HYPERCALL:
2423                 data = kvm->arch.hv_hypercall;
2424                 break;
2425         case HV_X64_MSR_TIME_REF_COUNT: {
2426                 data =
2427                      div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
2428                 break;
2429         }
2430         case HV_X64_MSR_REFERENCE_TSC:
2431                 data = kvm->arch.hv_tsc_page;
2432                 break;
2433         default:
2434                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
2435                 return 1;
2436         }
2437
2438         *pdata = data;
2439         return 0;
2440 }
2441
2442 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2443 {
2444         u64 data = 0;
2445
2446         switch (msr) {
2447         case HV_X64_MSR_VP_INDEX: {
2448                 int r;
2449                 struct kvm_vcpu *v;
2450                 kvm_for_each_vcpu(r, v, vcpu->kvm) {
2451                         if (v == vcpu) {
2452                                 data = r;
2453                                 break;
2454                         }
2455                 }
2456                 break;
2457         }
2458         case HV_X64_MSR_EOI:
2459                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
2460         case HV_X64_MSR_ICR:
2461                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
2462         case HV_X64_MSR_TPR:
2463                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
2464         case HV_X64_MSR_APIC_ASSIST_PAGE:
2465                 data = vcpu->arch.hv_vapic;
2466                 break;
2467         default:
2468                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
2469                 return 1;
2470         }
2471         *pdata = data;
2472         return 0;
2473 }
2474
2475 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2476 {
2477         u64 data;
2478
2479         switch (msr) {
2480         case MSR_IA32_PLATFORM_ID:
2481         case MSR_IA32_EBL_CR_POWERON:
2482         case MSR_IA32_DEBUGCTLMSR:
2483         case MSR_IA32_LASTBRANCHFROMIP:
2484         case MSR_IA32_LASTBRANCHTOIP:
2485         case MSR_IA32_LASTINTFROMIP:
2486         case MSR_IA32_LASTINTTOIP:
2487         case MSR_K8_SYSCFG:
2488         case MSR_K7_HWCR:
2489         case MSR_VM_HSAVE_PA:
2490         case MSR_K7_EVNTSEL0:
2491         case MSR_K7_EVNTSEL1:
2492         case MSR_K7_EVNTSEL2:
2493         case MSR_K7_EVNTSEL3:
2494         case MSR_K7_PERFCTR0:
2495         case MSR_K7_PERFCTR1:
2496         case MSR_K7_PERFCTR2:
2497         case MSR_K7_PERFCTR3:
2498         case MSR_K8_INT_PENDING_MSG:
2499         case MSR_AMD64_NB_CFG:
2500         case MSR_FAM10H_MMIO_CONF_BASE:
2501         case MSR_AMD64_BU_CFG2:
2502                 data = 0;
2503                 break;
2504         case MSR_P6_PERFCTR0:
2505         case MSR_P6_PERFCTR1:
2506         case MSR_P6_EVNTSEL0:
2507         case MSR_P6_EVNTSEL1:
2508                 if (kvm_pmu_msr(vcpu, msr))
2509                         return kvm_pmu_get_msr(vcpu, msr, pdata);
2510                 data = 0;
2511                 break;
2512         case MSR_IA32_UCODE_REV:
2513                 data = 0x100000000ULL;
2514                 break;
2515         case MSR_MTRRcap:
2516                 data = 0x500 | KVM_NR_VAR_MTRR;
2517                 break;
2518         case 0x200 ... 0x2ff:
2519                 return get_msr_mtrr(vcpu, msr, pdata);
2520         case 0xcd: /* fsb frequency */
2521                 data = 3;
2522                 break;
2523                 /*
2524                  * MSR_EBC_FREQUENCY_ID
2525                  * Conservative value valid for even the basic CPU models.
2526                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
2527                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
2528                  * and 266MHz for model 3, or 4. Set Core Clock
2529                  * Frequency to System Bus Frequency Ratio to 1 (bits
2530                  * 31:24) even though these are only valid for CPU
2531                  * models > 2, however guests may end up dividing or
2532                  * multiplying by zero otherwise.
2533                  */
2534         case MSR_EBC_FREQUENCY_ID:
2535                 data = 1 << 24;
2536                 break;
2537         case MSR_IA32_APICBASE:
2538                 data = kvm_get_apic_base(vcpu);
2539                 break;
2540         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2541                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
2542                 break;
2543         case MSR_IA32_TSCDEADLINE:
2544                 data = kvm_get_lapic_tscdeadline_msr(vcpu);
2545                 break;
2546         case MSR_IA32_TSC_ADJUST:
2547                 data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2548                 break;
2549         case MSR_IA32_MISC_ENABLE:
2550                 data = vcpu->arch.ia32_misc_enable_msr;
2551                 break;
2552         case MSR_IA32_PERF_STATUS:
2553                 /* TSC increment by tick */
2554                 data = 1000ULL;
2555                 /* CPU multiplier */
2556                 data |= (((uint64_t)4ULL) << 40);
2557                 break;
2558         case MSR_EFER:
2559                 data = vcpu->arch.efer;
2560                 break;
2561         case MSR_KVM_WALL_CLOCK:
2562         case MSR_KVM_WALL_CLOCK_NEW:
2563                 data = vcpu->kvm->arch.wall_clock;
2564                 break;
2565         case MSR_KVM_SYSTEM_TIME:
2566         case MSR_KVM_SYSTEM_TIME_NEW:
2567                 data = vcpu->arch.time;
2568                 break;
2569         case MSR_KVM_ASYNC_PF_EN:
2570                 data = vcpu->arch.apf.msr_val;
2571                 break;
2572         case MSR_KVM_STEAL_TIME:
2573                 data = vcpu->arch.st.msr_val;
2574                 break;
2575         case MSR_KVM_PV_EOI_EN:
2576                 data = vcpu->arch.pv_eoi.msr_val;
2577                 break;
2578         case MSR_IA32_P5_MC_ADDR:
2579         case MSR_IA32_P5_MC_TYPE:
2580         case MSR_IA32_MCG_CAP:
2581         case MSR_IA32_MCG_CTL:
2582         case MSR_IA32_MCG_STATUS:
2583         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2584                 return get_msr_mce(vcpu, msr, pdata);
2585         case MSR_K7_CLK_CTL:
2586                 /*
2587                  * Provide expected ramp-up count for K7. All other
2588                  * are set to zero, indicating minimum divisors for
2589                  * every field.
2590                  *
2591                  * This prevents guest kernels on AMD host with CPU
2592                  * type 6, model 8 and higher from exploding due to
2593                  * the rdmsr failing.
2594                  */
2595                 data = 0x20000000;
2596                 break;
2597         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2598                 if (kvm_hv_msr_partition_wide(msr)) {
2599                         int r;
2600                         mutex_lock(&vcpu->kvm->lock);
2601                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
2602                         mutex_unlock(&vcpu->kvm->lock);
2603                         return r;
2604                 } else
2605                         return get_msr_hyperv(vcpu, msr, pdata);
2606                 break;
2607         case MSR_IA32_BBL_CR_CTL3:
2608                 /* This legacy MSR exists but isn't fully documented in current
2609                  * silicon.  It is however accessed by winxp in very narrow
2610                  * scenarios where it sets bit #19, itself documented as
2611                  * a "reserved" bit.  Best effort attempt to source coherent
2612                  * read data here should the balance of the register be
2613                  * interpreted by the guest:
2614                  *
2615                  * L2 cache control register 3: 64GB range, 256KB size,
2616                  * enabled, latency 0x1, configured
2617                  */
2618                 data = 0xbe702111;
2619                 break;
2620         case MSR_AMD64_OSVW_ID_LENGTH:
2621                 if (!guest_cpuid_has_osvw(vcpu))
2622                         return 1;
2623                 data = vcpu->arch.osvw.length;
2624                 break;
2625         case MSR_AMD64_OSVW_STATUS:
2626                 if (!guest_cpuid_has_osvw(vcpu))
2627                         return 1;
2628                 data = vcpu->arch.osvw.status;
2629                 break;
2630         default:
2631                 if (kvm_pmu_msr(vcpu, msr))
2632                         return kvm_pmu_get_msr(vcpu, msr, pdata);
2633                 if (!ignore_msrs) {
2634                         vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
2635                         return 1;
2636                 } else {
2637                         vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
2638                         data = 0;
2639                 }
2640                 break;
2641         }
2642         *pdata = data;
2643         return 0;
2644 }
2645 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2646
2647 /*
2648  * Read or write a bunch of msrs. All parameters are kernel addresses.
2649  *
2650  * @return number of msrs set successfully.
2651  */
2652 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2653                     struct kvm_msr_entry *entries,
2654                     int (*do_msr)(struct kvm_vcpu *vcpu,
2655                                   unsigned index, u64 *data))
2656 {
2657         int i, idx;
2658
2659         idx = srcu_read_lock(&vcpu->kvm->srcu);
2660         for (i = 0; i < msrs->nmsrs; ++i)
2661                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2662                         break;
2663         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2664
2665         return i;
2666 }
2667
2668 /*
2669  * Read or write a bunch of msrs. Parameters are user addresses.
2670  *
2671  * @return number of msrs set successfully.
2672  */
2673 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2674                   int (*do_msr)(struct kvm_vcpu *vcpu,
2675                                 unsigned index, u64 *data),
2676                   int writeback)
2677 {
2678         struct kvm_msrs msrs;
2679         struct kvm_msr_entry *entries;
2680         int r, n;
2681         unsigned size;
2682
2683         r = -EFAULT;
2684         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2685                 goto out;
2686
2687         r = -E2BIG;
2688         if (msrs.nmsrs >= MAX_IO_MSRS)
2689                 goto out;
2690
2691         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2692         entries = memdup_user(user_msrs->entries, size);
2693         if (IS_ERR(entries)) {
2694                 r = PTR_ERR(entries);
2695                 goto out;
2696         }
2697
2698         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2699         if (r < 0)
2700                 goto out_free;
2701
2702         r = -EFAULT;
2703         if (writeback && copy_to_user(user_msrs->entries, entries, size))
2704                 goto out_free;
2705
2706         r = n;
2707
2708 out_free:
2709         kfree(entries);
2710 out:
2711         return r;
2712 }
2713
2714 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2715 {
2716         int r;
2717
2718         switch (ext) {
2719         case KVM_CAP_IRQCHIP:
2720         case KVM_CAP_HLT:
2721         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2722         case KVM_CAP_SET_TSS_ADDR:
2723         case KVM_CAP_EXT_CPUID:
2724         case KVM_CAP_EXT_EMUL_CPUID:
2725         case KVM_CAP_CLOCKSOURCE:
2726         case KVM_CAP_PIT:
2727         case KVM_CAP_NOP_IO_DELAY:
2728         case KVM_CAP_MP_STATE:
2729         case KVM_CAP_SYNC_MMU:
2730         case KVM_CAP_USER_NMI:
2731         case KVM_CAP_REINJECT_CONTROL:
2732         case KVM_CAP_IRQ_INJECT_STATUS:
2733         case KVM_CAP_IRQFD:
2734         case KVM_CAP_IOEVENTFD:
2735         case KVM_CAP_IOEVENTFD_NO_LENGTH:
2736         case KVM_CAP_PIT2:
2737         case KVM_CAP_PIT_STATE2:
2738         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2739         case KVM_CAP_XEN_HVM:
2740         case KVM_CAP_ADJUST_CLOCK:
2741         case KVM_CAP_VCPU_EVENTS:
2742         case KVM_CAP_HYPERV:
2743         case KVM_CAP_HYPERV_VAPIC:
2744         case KVM_CAP_HYPERV_SPIN:
2745         case KVM_CAP_PCI_SEGMENT:
2746         case KVM_CAP_DEBUGREGS:
2747         case KVM_CAP_X86_ROBUST_SINGLESTEP:
2748         case KVM_CAP_XSAVE:
2749         case KVM_CAP_ASYNC_PF:
2750         case KVM_CAP_GET_TSC_KHZ:
2751         case KVM_CAP_KVMCLOCK_CTRL:
2752         case KVM_CAP_READONLY_MEM:
2753         case KVM_CAP_HYPERV_TIME:
2754         case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2755         case KVM_CAP_TSC_DEADLINE_TIMER:
2756 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2757         case KVM_CAP_ASSIGN_DEV_IRQ:
2758         case KVM_CAP_PCI_2_3:
2759 #endif
2760                 r = 1;
2761                 break;
2762         case KVM_CAP_COALESCED_MMIO:
2763                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2764                 break;
2765         case KVM_CAP_VAPIC:
2766                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2767                 break;
2768         case KVM_CAP_NR_VCPUS:
2769                 r = KVM_SOFT_MAX_VCPUS;
2770                 break;
2771         case KVM_CAP_MAX_VCPUS:
2772                 r = KVM_MAX_VCPUS;
2773                 break;
2774         case KVM_CAP_NR_MEMSLOTS:
2775                 r = KVM_USER_MEM_SLOTS;
2776                 break;
2777         case KVM_CAP_PV_MMU:    /* obsolete */
2778                 r = 0;
2779                 break;
2780 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
2781         case KVM_CAP_IOMMU:
2782                 r = iommu_present(&pci_bus_type);
2783                 break;
2784 #endif
2785         case KVM_CAP_MCE:
2786                 r = KVM_MAX_MCE_BANKS;
2787                 break;
2788         case KVM_CAP_XCRS:
2789                 r = cpu_has_xsave;
2790                 break;
2791         case KVM_CAP_TSC_CONTROL:
2792                 r = kvm_has_tsc_control;
2793                 break;
2794         default:
2795                 r = 0;
2796                 break;
2797         }
2798         return r;
2799
2800 }
2801
2802 long kvm_arch_dev_ioctl(struct file *filp,
2803                         unsigned int ioctl, unsigned long arg)
2804 {
2805         void __user *argp = (void __user *)arg;
2806         long r;
2807
2808         switch (ioctl) {
2809         case KVM_GET_MSR_INDEX_LIST: {
2810                 struct kvm_msr_list __user *user_msr_list = argp;
2811                 struct kvm_msr_list msr_list;
2812                 unsigned n;
2813
2814                 r = -EFAULT;
2815                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2816                         goto out;
2817                 n = msr_list.nmsrs;
2818                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2819                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2820                         goto out;
2821                 r = -E2BIG;
2822                 if (n < msr_list.nmsrs)
2823                         goto out;
2824                 r = -EFAULT;
2825                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2826                                  num_msrs_to_save * sizeof(u32)))
2827                         goto out;
2828                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2829                                  &emulated_msrs,
2830                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2831                         goto out;
2832                 r = 0;
2833                 break;
2834         }
2835         case KVM_GET_SUPPORTED_CPUID:
2836         case KVM_GET_EMULATED_CPUID: {
2837                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2838                 struct kvm_cpuid2 cpuid;
2839
2840                 r = -EFAULT;
2841                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2842                         goto out;
2843
2844                 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
2845                                             ioctl);
2846                 if (r)
2847                         goto out;
2848
2849                 r = -EFAULT;
2850                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2851                         goto out;
2852                 r = 0;
2853                 break;
2854         }
2855         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2856                 u64 mce_cap;
2857
2858                 mce_cap = KVM_MCE_CAP_SUPPORTED;
2859                 r = -EFAULT;
2860                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2861                         goto out;
2862                 r = 0;
2863                 break;
2864         }
2865         default:
2866                 r = -EINVAL;
2867         }
2868 out:
2869         return r;
2870 }
2871
2872 static void wbinvd_ipi(void *garbage)
2873 {
2874         wbinvd();
2875 }
2876
2877 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2878 {
2879         return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2880 }
2881
2882 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2883 {
2884         /* Address WBINVD may be executed by guest */
2885         if (need_emulate_wbinvd(vcpu)) {
2886                 if (kvm_x86_ops->has_wbinvd_exit())
2887                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2888                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2889                         smp_call_function_single(vcpu->cpu,
2890                                         wbinvd_ipi, NULL, 1);
2891         }
2892
2893         kvm_x86_ops->vcpu_load(vcpu, cpu);
2894
2895         /* Apply any externally detected TSC adjustments (due to suspend) */
2896         if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2897                 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2898                 vcpu->arch.tsc_offset_adjustment = 0;
2899                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2900         }
2901
2902         if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2903                 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2904                                 native_read_tsc() - vcpu->arch.last_host_tsc;
2905                 if (tsc_delta < 0)
2906                         mark_tsc_unstable("KVM discovered backwards TSC");
2907                 if (check_tsc_unstable()) {
2908                         u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
2909                                                 vcpu->arch.last_guest_tsc);
2910                         kvm_x86_ops->write_tsc_offset(vcpu, offset);
2911                         vcpu->arch.tsc_catchup = 1;
2912                 }
2913                 /*
2914                  * On a host with synchronized TSC, there is no need to update
2915                  * kvmclock on vcpu->cpu migration
2916                  */
2917                 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2918                         kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2919                 if (vcpu->cpu != cpu)
2920                         kvm_migrate_timers(vcpu);
2921                 vcpu->cpu = cpu;
2922         }
2923
2924         accumulate_steal_time(vcpu);
2925         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2926 }
2927
2928 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2929 {
2930         kvm_x86_ops->vcpu_put(vcpu);
2931         kvm_put_guest_fpu(vcpu);
2932         vcpu->arch.last_host_tsc = native_read_tsc();
2933 }
2934
2935 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2936                                     struct kvm_lapic_state *s)
2937 {
2938         kvm_x86_ops->sync_pir_to_irr(vcpu);
2939         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2940
2941         return 0;
2942 }
2943
2944 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2945                                     struct kvm_lapic_state *s)
2946 {
2947         kvm_apic_post_state_restore(vcpu, s);
2948         update_cr8_intercept(vcpu);
2949
2950         return 0;
2951 }
2952
2953 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2954                                     struct kvm_interrupt *irq)
2955 {
2956         if (irq->irq >= KVM_NR_INTERRUPTS)
2957                 return -EINVAL;
2958         if (irqchip_in_kernel(vcpu->kvm))
2959                 return -ENXIO;
2960
2961         kvm_queue_interrupt(vcpu, irq->irq, false);
2962         kvm_make_request(KVM_REQ_EVENT, vcpu);
2963
2964         return 0;
2965 }
2966
2967 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2968 {
2969         kvm_inject_nmi(vcpu);
2970
2971         return 0;
2972 }
2973
2974 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2975                                            struct kvm_tpr_access_ctl *tac)
2976 {
2977         if (tac->flags)
2978                 return -EINVAL;
2979         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2980         return 0;
2981 }
2982
2983 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2984                                         u64 mcg_cap)
2985 {
2986         int r;
2987         unsigned bank_num = mcg_cap & 0xff, bank;
2988
2989         r = -EINVAL;
2990         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2991                 goto out;
2992         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2993                 goto out;
2994         r = 0;
2995         vcpu->arch.mcg_cap = mcg_cap;
2996         /* Init IA32_MCG_CTL to all 1s */
2997         if (mcg_cap & MCG_CTL_P)
2998                 vcpu->arch.mcg_ctl = ~(u64)0;
2999         /* Init IA32_MCi_CTL to all 1s */
3000         for (bank = 0; bank < bank_num; bank++)
3001                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
3002 out:
3003         return r;
3004 }
3005
3006 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
3007                                       struct kvm_x86_mce *mce)
3008 {
3009         u64 mcg_cap = vcpu->arch.mcg_cap;
3010         unsigned bank_num = mcg_cap & 0xff;
3011         u64 *banks = vcpu->arch.mce_banks;
3012
3013         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
3014                 return -EINVAL;
3015         /*
3016          * if IA32_MCG_CTL is not all 1s, the uncorrected error
3017          * reporting is disabled
3018          */
3019         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
3020             vcpu->arch.mcg_ctl != ~(u64)0)
3021                 return 0;
3022         banks += 4 * mce->bank;
3023         /*
3024          * if IA32_MCi_CTL is not all 1s, the uncorrected error
3025          * reporting is disabled for the bank
3026          */
3027         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
3028                 return 0;
3029         if (mce->status & MCI_STATUS_UC) {
3030                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3031                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3032                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3033                         return 0;
3034                 }
3035                 if (banks[1] & MCI_STATUS_VAL)
3036                         mce->status |= MCI_STATUS_OVER;
3037                 banks[2] = mce->addr;
3038                 banks[3] = mce->misc;
3039                 vcpu->arch.mcg_status = mce->mcg_status;
3040                 banks[1] = mce->status;
3041                 kvm_queue_exception(vcpu, MC_VECTOR);
3042         } else if (!(banks[1] & MCI_STATUS_VAL)
3043                    || !(banks[1] & MCI_STATUS_UC)) {
3044                 if (banks[1] & MCI_STATUS_VAL)
3045                         mce->status |= MCI_STATUS_OVER;
3046                 banks[2] = mce->addr;
3047                 banks[3] = mce->misc;
3048                 banks[1] = mce->status;
3049         } else
3050                 banks[1] |= MCI_STATUS_OVER;
3051         return 0;
3052 }
3053
3054 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3055                                                struct kvm_vcpu_events *events)
3056 {
3057         process_nmi(vcpu);
3058         events->exception.injected =
3059                 vcpu->arch.exception.pending &&
3060                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
3061         events->exception.nr = vcpu->arch.exception.nr;
3062         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3063         events->exception.pad = 0;
3064         events->exception.error_code = vcpu->arch.exception.error_code;
3065
3066         events->interrupt.injected =
3067                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
3068         events->interrupt.nr = vcpu->arch.interrupt.nr;
3069         events->interrupt.soft = 0;
3070         events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
3071
3072         events->nmi.injected = vcpu->arch.nmi_injected;
3073         events->nmi.pending = vcpu->arch.nmi_pending != 0;
3074         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3075         events->nmi.pad = 0;
3076
3077         events->sipi_vector = 0; /* never valid when reporting to user space */
3078
3079         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3080                          | KVM_VCPUEVENT_VALID_SHADOW);
3081         memset(&events->reserved, 0, sizeof(events->reserved));
3082 }
3083
3084 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3085                                               struct kvm_vcpu_events *events)
3086 {
3087         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3088                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3089                               | KVM_VCPUEVENT_VALID_SHADOW))
3090                 return -EINVAL;
3091
3092         process_nmi(vcpu);
3093         vcpu->arch.exception.pending = events->exception.injected;
3094         vcpu->arch.exception.nr = events->exception.nr;
3095         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
3096         vcpu->arch.exception.error_code = events->exception.error_code;
3097
3098         vcpu->arch.interrupt.pending = events->interrupt.injected;
3099         vcpu->arch.interrupt.nr = events->interrupt.nr;
3100         vcpu->arch.interrupt.soft = events->interrupt.soft;
3101         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
3102                 kvm_x86_ops->set_interrupt_shadow(vcpu,
3103                                                   events->interrupt.shadow);
3104
3105         vcpu->arch.nmi_injected = events->nmi.injected;
3106         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
3107                 vcpu->arch.nmi_pending = events->nmi.pending;
3108         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
3109
3110         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3111             kvm_vcpu_has_lapic(vcpu))
3112                 vcpu->arch.apic->sipi_vector = events->sipi_vector;
3113
3114         kvm_make_request(KVM_REQ_EVENT, vcpu);
3115
3116         return 0;
3117 }
3118
3119 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
3120                                              struct kvm_debugregs *dbgregs)
3121 {
3122         unsigned long val;
3123
3124         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3125         kvm_get_dr(vcpu, 6, &val);
3126         dbgregs->dr6 = val;
3127         dbgregs->dr7 = vcpu->arch.dr7;
3128         dbgregs->flags = 0;
3129         memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3130 }
3131
3132 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
3133                                             struct kvm_debugregs *dbgregs)
3134 {
3135         if (dbgregs->flags)
3136                 return -EINVAL;
3137
3138         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3139         vcpu->arch.dr6 = dbgregs->dr6;
3140         kvm_update_dr6(vcpu);
3141         vcpu->arch.dr7 = dbgregs->dr7;
3142         kvm_update_dr7(vcpu);
3143
3144         return 0;
3145 }
3146
3147 #define XSTATE_COMPACTION_ENABLED (1ULL << 63)
3148
3149 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3150 {
3151         struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
3152         u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
3153         u64 valid;
3154
3155         /*
3156          * Copy legacy XSAVE area, to avoid complications with CPUID
3157          * leaves 0 and 1 in the loop below.
3158          */
3159         memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3160
3161         /* Set XSTATE_BV */
3162         *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3163
3164         /*
3165          * Copy each region from the possibly compacted offset to the
3166          * non-compacted offset.
3167          */
3168         valid = xstate_bv & ~XSTATE_FPSSE;
3169         while (valid) {
3170                 u64 feature = valid & -valid;
3171                 int index = fls64(feature) - 1;
3172                 void *src = get_xsave_addr(xsave, feature);
3173
3174                 if (src) {
3175                         u32 size, offset, ecx, edx;
3176                         cpuid_count(XSTATE_CPUID, index,
3177                                     &size, &offset, &ecx, &edx);
3178                         memcpy(dest + offset, src, size);
3179                 }
3180
3181                 valid -= feature;
3182         }
3183 }
3184
3185 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3186 {
3187         struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
3188         u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
3189         u64 valid;
3190
3191         /*
3192          * Copy legacy XSAVE area, to avoid complications with CPUID
3193          * leaves 0 and 1 in the loop below.
3194          */
3195         memcpy(xsave, src, XSAVE_HDR_OFFSET);
3196
3197         /* Set XSTATE_BV and possibly XCOMP_BV.  */
3198         xsave->xsave_hdr.xstate_bv = xstate_bv;
3199         if (cpu_has_xsaves)
3200                 xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3201
3202         /*
3203          * Copy each region from the non-compacted offset to the
3204          * possibly compacted offset.
3205          */
3206         valid = xstate_bv & ~XSTATE_FPSSE;
3207         while (valid) {
3208                 u64 feature = valid & -valid;
3209                 int index = fls64(feature) - 1;
3210                 void *dest = get_xsave_addr(xsave, feature);
3211
3212                 if (dest) {
3213                         u32 size, offset, ecx, edx;
3214                         cpuid_count(XSTATE_CPUID, index,
3215                                     &size, &offset, &ecx, &edx);
3216                         memcpy(dest, src + offset, size);
3217                 } else
3218                         WARN_ON_ONCE(1);
3219
3220                 valid -= feature;
3221         }
3222 }
3223
3224 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
3225                                          struct kvm_xsave *guest_xsave)
3226 {
3227         if (cpu_has_xsave) {
3228                 memset(guest_xsave, 0, sizeof(struct kvm_xsave));
3229                 fill_xsave((u8 *) guest_xsave->region, vcpu);
3230         } else {
3231                 memcpy(guest_xsave->region,
3232                         &vcpu->arch.guest_fpu.state->fxsave,
3233                         sizeof(struct i387_fxsave_struct));
3234                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
3235                         XSTATE_FPSSE;
3236         }
3237 }
3238
3239 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
3240                                         struct kvm_xsave *guest_xsave)
3241 {
3242         u64 xstate_bv =
3243                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3244
3245         if (cpu_has_xsave) {
3246                 /*
3247                  * Here we allow setting states that are not present in
3248                  * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
3249                  * with old userspace.
3250                  */
3251                 if (xstate_bv & ~kvm_supported_xcr0())
3252                         return -EINVAL;
3253                 load_xsave(vcpu, (u8 *)guest_xsave->region);
3254         } else {
3255                 if (xstate_bv & ~XSTATE_FPSSE)
3256                         return -EINVAL;
3257                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
3258                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
3259         }
3260         return 0;
3261 }
3262
3263 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
3264                                         struct kvm_xcrs *guest_xcrs)
3265 {
3266         if (!cpu_has_xsave) {
3267                 guest_xcrs->nr_xcrs = 0;
3268                 return;
3269         }
3270
3271         guest_xcrs->nr_xcrs = 1;
3272         guest_xcrs->flags = 0;
3273         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
3274         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
3275 }
3276
3277 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
3278                                        struct kvm_xcrs *guest_xcrs)
3279 {
3280         int i, r = 0;
3281
3282         if (!cpu_has_xsave)
3283                 return -EINVAL;
3284
3285         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
3286                 return -EINVAL;
3287
3288         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
3289                 /* Only support XCR0 currently */
3290                 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3291                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
3292                                 guest_xcrs->xcrs[i].value);
3293                         break;
3294                 }
3295         if (r)
3296                 r = -EINVAL;
3297         return r;
3298 }
3299
3300 /*
3301  * kvm_set_guest_paused() indicates to the guest kernel that it has been
3302  * stopped by the hypervisor.  This function will be called from the host only.
3303  * EINVAL is returned when the host attempts to set the flag for a guest that
3304  * does not support pv clocks.
3305  */
3306 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
3307 {
3308         if (!vcpu->arch.pv_time_enabled)
3309                 return -EINVAL;
3310         vcpu->arch.pvclock_set_guest_stopped_request = true;
3311         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3312         return 0;
3313 }
3314
3315 long kvm_arch_vcpu_ioctl(struct file *filp,
3316                          unsigned int ioctl, unsigned long arg)
3317 {
3318         struct kvm_vcpu *vcpu = filp->private_data;
3319         void __user *argp = (void __user *)arg;
3320         int r;
3321         union {
3322                 struct kvm_lapic_state *lapic;
3323                 struct kvm_xsave *xsave;
3324                 struct kvm_xcrs *xcrs;
3325                 void *buffer;
3326         } u;
3327
3328         u.buffer = NULL;
3329         switch (ioctl) {
3330         case KVM_GET_LAPIC: {
3331                 r = -EINVAL;
3332                 if (!vcpu->arch.apic)
3333                         goto out;
3334                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3335
3336                 r = -ENOMEM;
3337                 if (!u.lapic)
3338                         goto out;
3339                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3340                 if (r)
3341                         goto out;
3342                 r = -EFAULT;
3343                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3344                         goto out;
3345                 r = 0;
3346                 break;
3347         }
3348         case KVM_SET_LAPIC: {
3349                 r = -EINVAL;
3350                 if (!vcpu->arch.apic)
3351                         goto out;
3352                 u.lapic = memdup_user(argp, sizeof(*u.lapic));
3353                 if (IS_ERR(u.lapic))
3354                         return PTR_ERR(u.lapic);
3355
3356                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3357                 break;
3358         }
3359         case KVM_INTERRUPT: {
3360                 struct kvm_interrupt irq;
3361
3362                 r = -EFAULT;
3363                 if (copy_from_user(&irq, argp, sizeof irq))
3364                         goto out;
3365                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
3366                 break;
3367         }
3368         case KVM_NMI: {
3369                 r = kvm_vcpu_ioctl_nmi(vcpu);
3370                 break;
3371         }
3372         case KVM_SET_CPUID: {
3373                 struct kvm_cpuid __user *cpuid_arg = argp;
3374                 struct kvm_cpuid cpuid;
3375
3376                 r = -EFAULT;
3377                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3378                         goto out;
3379                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3380                 break;
3381         }
3382         case KVM_SET_CPUID2: {
3383                 struct kvm_cpuid2 __user *cpuid_arg = argp;
3384                 struct kvm_cpuid2 cpuid;
3385
3386                 r = -EFAULT;
3387                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3388                         goto out;
3389                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3390                                               cpuid_arg->entries);
3391                 break;
3392         }
3393         case KVM_GET_CPUID2: {
3394                 struct kvm_cpuid2 __user *cpuid_arg = argp;
3395                 struct kvm_cpuid2 cpuid;
3396
3397                 r = -EFAULT;
3398                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3399                         goto out;
3400                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3401                                               cpuid_arg->entries);
3402                 if (r)
3403                         goto out;
3404                 r = -EFAULT;
3405                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
3406                         goto out;
3407                 r = 0;
3408                 break;
3409         }
3410         case KVM_GET_MSRS:
3411                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
3412                 break;
3413         case KVM_SET_MSRS:
3414                 r = msr_io(vcpu, argp, do_set_msr, 0);
3415                 break;
3416         case KVM_TPR_ACCESS_REPORTING: {
3417                 struct kvm_tpr_access_ctl tac;
3418
3419                 r = -EFAULT;
3420                 if (copy_from_user(&tac, argp, sizeof tac))
3421                         goto out;
3422                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
3423                 if (r)
3424                         goto out;
3425                 r = -EFAULT;
3426                 if (copy_to_user(argp, &tac, sizeof tac))
3427                         goto out;
3428                 r = 0;
3429                 break;
3430         };
3431         case KVM_SET_VAPIC_ADDR: {
3432                 struct kvm_vapic_addr va;
3433
3434                 r = -EINVAL;
3435                 if (!irqchip_in_kernel(vcpu->kvm))
3436                         goto out;
3437                 r = -EFAULT;
3438                 if (copy_from_user(&va, argp, sizeof va))
3439                         goto out;
3440                 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3441                 break;
3442         }
3443         case KVM_X86_SETUP_MCE: {
3444                 u64 mcg_cap;
3445
3446                 r = -EFAULT;
3447                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
3448                         goto out;
3449                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
3450                 break;
3451         }
3452         case KVM_X86_SET_MCE: {
3453                 struct kvm_x86_mce mce;
3454
3455                 r = -EFAULT;
3456                 if (copy_from_user(&mce, argp, sizeof mce))
3457                         goto out;
3458                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
3459                 break;
3460         }
3461         case KVM_GET_VCPU_EVENTS: {
3462                 struct kvm_vcpu_events events;
3463
3464                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
3465
3466                 r = -EFAULT;
3467                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
3468                         break;
3469                 r = 0;
3470                 break;
3471         }
3472         case KVM_SET_VCPU_EVENTS: {
3473                 struct kvm_vcpu_events events;
3474
3475                 r = -EFAULT;
3476                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
3477                         break;
3478
3479                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
3480                 break;
3481         }
3482         case KVM_GET_DEBUGREGS: {
3483                 struct kvm_debugregs dbgregs;
3484
3485                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
3486
3487                 r = -EFAULT;
3488                 if (copy_to_user(argp, &dbgregs,
3489                                  sizeof(struct kvm_debugregs)))
3490                         break;
3491                 r = 0;
3492                 break;
3493         }
3494         case KVM_SET_DEBUGREGS: {
3495                 struct kvm_debugregs dbgregs;
3496
3497                 r = -EFAULT;
3498                 if (copy_from_user(&dbgregs, argp,
3499                                    sizeof(struct kvm_debugregs)))
3500                         break;
3501
3502                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
3503                 break;
3504         }
3505         case KVM_GET_XSAVE: {
3506                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3507                 r = -ENOMEM;
3508                 if (!u.xsave)
3509                         break;
3510
3511                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3512
3513                 r = -EFAULT;
3514                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3515                         break;
3516                 r = 0;
3517                 break;
3518         }
3519         case KVM_SET_XSAVE: {
3520                 u.xsave = memdup_user(argp, sizeof(*u.xsave));
3521                 if (IS_ERR(u.xsave))
3522                         return PTR_ERR(u.xsave);
3523
3524                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3525                 break;
3526         }
3527         case KVM_GET_XCRS: {
3528                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3529                 r = -ENOMEM;
3530                 if (!u.xcrs)
3531                         break;
3532
3533                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3534
3535                 r = -EFAULT;
3536                 if (copy_to_user(argp, u.xcrs,
3537                                  sizeof(struct kvm_xcrs)))
3538                         break;
3539                 r = 0;
3540                 break;
3541         }
3542         case KVM_SET_XCRS: {
3543                 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
3544                 if (IS_ERR(u.xcrs))
3545                         return PTR_ERR(u.xcrs);
3546
3547                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3548                 break;
3549         }
3550         case KVM_SET_TSC_KHZ: {
3551                 u32 user_tsc_khz;
3552
3553                 r = -EINVAL;
3554                 user_tsc_khz = (u32)arg;
3555
3556                 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
3557                         goto out;
3558
3559                 if (user_tsc_khz == 0)
3560                         user_tsc_khz = tsc_khz;
3561
3562                 kvm_set_tsc_khz(vcpu, user_tsc_khz);
3563
3564                 r = 0;
3565                 goto out;
3566         }
3567         case KVM_GET_TSC_KHZ: {
3568                 r = vcpu->arch.virtual_tsc_khz;
3569                 goto out;
3570         }
3571         case KVM_KVMCLOCK_CTRL: {
3572                 r = kvm_set_guest_paused(vcpu);
3573                 goto out;
3574         }
3575         default:
3576                 r = -EINVAL;
3577         }
3578 out:
3579         kfree(u.buffer);
3580         return r;
3581 }
3582
3583 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3584 {
3585         return VM_FAULT_SIGBUS;
3586 }
3587
3588 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
3589 {
3590         int ret;
3591
3592         if (addr > (unsigned int)(-3 * PAGE_SIZE))
3593                 return -EINVAL;
3594         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
3595         return ret;
3596 }
3597
3598 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
3599                                               u64 ident_addr)
3600 {
3601         kvm->arch.ept_identity_map_addr = ident_addr;
3602         return 0;
3603 }
3604
3605 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
3606                                           u32 kvm_nr_mmu_pages)
3607 {
3608         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
3609                 return -EINVAL;
3610
3611         mutex_lock(&kvm->slots_lock);
3612
3613         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3614         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3615
3616         mutex_unlock(&kvm->slots_lock);
3617         return 0;
3618 }
3619
3620 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
3621 {
3622         return kvm->arch.n_max_mmu_pages;
3623 }
3624
3625 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3626 {
3627         int r;
3628
3629         r = 0;
3630         switch (chip->chip_id) {
3631         case KVM_IRQCHIP_PIC_MASTER:
3632                 memcpy(&chip->chip.pic,
3633                         &pic_irqchip(kvm)->pics[0],
3634                         sizeof(struct kvm_pic_state));
3635                 break;
3636         case KVM_IRQCHIP_PIC_SLAVE:
3637                 memcpy(&chip->chip.pic,
3638                         &pic_irqchip(kvm)->pics[1],
3639                         sizeof(struct kvm_pic_state));
3640                 break;
3641         case KVM_IRQCHIP_IOAPIC:
3642                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3643                 break;
3644         default:
3645                 r = -EINVAL;
3646                 break;
3647         }
3648         return r;
3649 }
3650
3651 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3652 {
3653         int r;
3654
3655         r = 0;
3656         switch (chip->chip_id) {
3657         case KVM_IRQCHIP_PIC_MASTER:
3658                 spin_lock(&pic_irqchip(kvm)->lock);
3659                 memcpy(&pic_irqchip(kvm)->pics[0],
3660                         &chip->chip.pic,
3661                         sizeof(struct kvm_pic_state));
3662                 spin_unlock(&pic_irqchip(kvm)->lock);
3663                 break;
3664         case KVM_IRQCHIP_PIC_SLAVE:
3665                 spin_lock(&pic_irqchip(kvm)->lock);
3666                 memcpy(&pic_irqchip(kvm)->pics[1],
3667                         &chip->chip.pic,
3668                         sizeof(struct kvm_pic_state));
3669                 spin_unlock(&pic_irqchip(kvm)->lock);
3670                 break;
3671         case KVM_IRQCHIP_IOAPIC:
3672                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3673                 break;
3674         default:
3675                 r = -EINVAL;
3676                 break;
3677         }
3678         kvm_pic_update_irq(pic_irqchip(kvm));
3679         return r;
3680 }
3681
3682 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3683 {
3684         int r = 0;
3685
3686         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3687         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
3688         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3689         return r;
3690 }
3691
3692 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3693 {
3694         int r = 0;
3695
3696         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3697         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3698         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
3699         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3700         return r;
3701 }
3702
3703 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3704 {
3705         int r = 0;
3706
3707         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3708         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3709                 sizeof(ps->channels));
3710         ps->flags = kvm->arch.vpit->pit_state.flags;
3711         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3712         memset(&ps->reserved, 0, sizeof(ps->reserved));
3713         return r;
3714 }
3715
3716 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3717 {
3718         int r = 0, start = 0;
3719         u32 prev_legacy, cur_legacy;
3720         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3721         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3722         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3723         if (!prev_legacy && cur_legacy)
3724                 start = 1;
3725         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3726                sizeof(kvm->arch.vpit->pit_state.channels));
3727         kvm->arch.vpit->pit_state.flags = ps->flags;
3728         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3729         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3730         return r;
3731 }
3732
3733 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3734                                  struct kvm_reinject_control *control)
3735 {
3736         if (!kvm->arch.vpit)
3737                 return -ENXIO;
3738         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3739         kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
3740         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3741         return 0;
3742 }
3743
3744 /**
3745  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
3746  * @kvm: kvm instance
3747  * @log: slot id and address to which we copy the log
3748  *
3749  * We need to keep it in mind that VCPU threads can write to the bitmap
3750  * concurrently.  So, to avoid losing data, we keep the following order for
3751  * each bit:
3752  *
3753  *   1. Take a snapshot of the bit and clear it if needed.
3754  *   2. Write protect the corresponding page.
3755  *   3. Flush TLB's if needed.
3756  *   4. Copy the snapshot to the userspace.
3757  *
3758  * Between 2 and 3, the guest may write to the page using the remaining TLB
3759  * entry.  This is not a problem because the page will be reported dirty at
3760  * step 4 using the snapshot taken before and step 3 ensures that successive
3761  * writes will be logged for the next call.
3762  */
3763 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3764 {
3765         int r;
3766         struct kvm_memory_slot *memslot;
3767         unsigned long n, i;
3768         unsigned long *dirty_bitmap;
3769         unsigned long *dirty_bitmap_buffer;
3770         bool is_dirty = false;
3771
3772         mutex_lock(&kvm->slots_lock);
3773
3774         r = -EINVAL;
3775         if (log->slot >= KVM_USER_MEM_SLOTS)
3776                 goto out;
3777
3778         memslot = id_to_memslot(kvm->memslots, log->slot);
3779
3780         dirty_bitmap = memslot->dirty_bitmap;
3781         r = -ENOENT;
3782         if (!dirty_bitmap)
3783                 goto out;
3784
3785         n = kvm_dirty_bitmap_bytes(memslot);
3786
3787         dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
3788         memset(dirty_bitmap_buffer, 0, n);
3789
3790         spin_lock(&kvm->mmu_lock);
3791
3792         for (i = 0; i < n / sizeof(long); i++) {
3793                 unsigned long mask;
3794                 gfn_t offset;
3795
3796                 if (!dirty_bitmap[i])
3797                         continue;
3798
3799                 is_dirty = true;
3800
3801                 mask = xchg(&dirty_bitmap[i], 0);
3802                 dirty_bitmap_buffer[i] = mask;
3803
3804                 offset = i * BITS_PER_LONG;
3805                 kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
3806         }
3807
3808         spin_unlock(&kvm->mmu_lock);
3809
3810         /* See the comments in kvm_mmu_slot_remove_write_access(). */
3811         lockdep_assert_held(&kvm->slots_lock);
3812
3813         /*
3814          * All the TLBs can be flushed out of mmu lock, see the comments in
3815          * kvm_mmu_slot_remove_write_access().
3816          */
3817         if (is_dirty)
3818                 kvm_flush_remote_tlbs(kvm);
3819
3820         r = -EFAULT;
3821         if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
3822                 goto out;
3823
3824         r = 0;
3825 out:
3826         mutex_unlock(&kvm->slots_lock);
3827         return r;
3828 }
3829
3830 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
3831                         bool line_status)
3832 {
3833         if (!irqchip_in_kernel(kvm))
3834                 return -ENXIO;
3835
3836         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3837                                         irq_event->irq, irq_event->level,
3838                                         line_status);
3839         return 0;
3840 }
3841
3842 long kvm_arch_vm_ioctl(struct file *filp,
3843                        unsigned int ioctl, unsigned long arg)
3844 {
3845         struct kvm *kvm = filp->private_data;
3846         void __user *argp = (void __user *)arg;
3847         int r = -ENOTTY;
3848         /*
3849          * This union makes it completely explicit to gcc-3.x
3850          * that these two variables' stack usage should be
3851          * combined, not added together.
3852          */
3853         union {
3854                 struct kvm_pit_state ps;
3855                 struct kvm_pit_state2 ps2;
3856                 struct kvm_pit_config pit_config;
3857         } u;
3858
3859         switch (ioctl) {
3860         case KVM_SET_TSS_ADDR:
3861                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3862                 break;
3863         case KVM_SET_IDENTITY_MAP_ADDR: {
3864                 u64 ident_addr;
3865
3866                 r = -EFAULT;
3867                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3868                         goto out;
3869                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3870                 break;
3871         }
3872         case KVM_SET_NR_MMU_PAGES:
3873                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3874                 break;
3875         case KVM_GET_NR_MMU_PAGES:
3876                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3877                 break;
3878         case KVM_CREATE_IRQCHIP: {
3879                 struct kvm_pic *vpic;
3880
3881                 mutex_lock(&kvm->lock);
3882                 r = -EEXIST;
3883                 if (kvm->arch.vpic)
3884                         goto create_irqchip_unlock;
3885                 r = -EINVAL;
3886                 if (atomic_read(&kvm->online_vcpus))
3887                         goto create_irqchip_unlock;
3888                 r = -ENOMEM;
3889                 vpic = kvm_create_pic(kvm);
3890                 if (vpic) {
3891                         r = kvm_ioapic_init(kvm);
3892                         if (r) {
3893                                 mutex_lock(&kvm->slots_lock);
3894                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3895                                                           &vpic->dev_master);
3896                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3897                                                           &vpic->dev_slave);
3898                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3899                                                           &vpic->dev_eclr);
3900                                 mutex_unlock(&kvm->slots_lock);
3901                                 kfree(vpic);
3902                                 goto create_irqchip_unlock;
3903                         }
3904                 } else
3905                         goto create_irqchip_unlock;
3906                 smp_wmb();
3907                 kvm->arch.vpic = vpic;
3908                 smp_wmb();
3909                 r = kvm_setup_default_irq_routing(kvm);
3910                 if (r) {
3911                         mutex_lock(&kvm->slots_lock);
3912                         mutex_lock(&kvm->irq_lock);
3913                         kvm_ioapic_destroy(kvm);
3914                         kvm_destroy_pic(kvm);
3915                         mutex_unlock(&kvm->irq_lock);
3916                         mutex_unlock(&kvm->slots_lock);
3917                 }
3918         create_irqchip_unlock:
3919                 mutex_unlock(&kvm->lock);
3920                 break;
3921         }
3922         case KVM_CREATE_PIT:
3923                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3924                 goto create_pit;
3925         case KVM_CREATE_PIT2:
3926                 r = -EFAULT;
3927                 if (copy_from_user(&u.pit_config, argp,
3928                                    sizeof(struct kvm_pit_config)))
3929                         goto out;
3930         create_pit:
3931                 mutex_lock(&kvm->slots_lock);
3932                 r = -EEXIST;
3933                 if (kvm->arch.vpit)
3934                         goto create_pit_unlock;
3935                 r = -ENOMEM;
3936                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3937                 if (kvm->arch.vpit)
3938                         r = 0;
3939         create_pit_unlock:
3940                 mutex_unlock(&kvm->slots_lock);
3941                 break;
3942         case KVM_GET_IRQCHIP: {
3943                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3944                 struct kvm_irqchip *chip;
3945
3946                 chip = memdup_user(argp, sizeof(*chip));
3947                 if (IS_ERR(chip)) {
3948                         r = PTR_ERR(chip);
3949                         goto out;
3950                 }
3951
3952                 r = -ENXIO;
3953                 if (!irqchip_in_kernel(kvm))
3954                         goto get_irqchip_out;
3955                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3956                 if (r)
3957                         goto get_irqchip_out;
3958                 r = -EFAULT;
3959                 if (copy_to_user(argp, chip, sizeof *chip))
3960                         goto get_irqchip_out;
3961                 r = 0;
3962         get_irqchip_out:
3963                 kfree(chip);
3964                 break;
3965         }
3966         case KVM_SET_IRQCHIP: {
3967                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3968                 struct kvm_irqchip *chip;
3969
3970                 chip = memdup_user(argp, sizeof(*chip));
3971                 if (IS_ERR(chip)) {
3972                         r = PTR_ERR(chip);
3973                         goto out;
3974                 }
3975
3976                 r = -ENXIO;
3977                 if (!irqchip_in_kernel(kvm))
3978                         goto set_irqchip_out;
3979                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3980                 if (r)
3981                         goto set_irqchip_out;
3982                 r = 0;
3983         set_irqchip_out:
3984                 kfree(chip);
3985                 break;
3986         }
3987         case KVM_GET_PIT: {
3988                 r = -EFAULT;
3989                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3990                         goto out;
3991                 r = -ENXIO;
3992                 if (!kvm->arch.vpit)
3993                         goto out;
3994                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3995                 if (r)
3996                         goto out;
3997                 r = -EFAULT;
3998                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3999                         goto out;
4000                 r = 0;
4001                 break;
4002         }
4003         case KVM_SET_PIT: {
4004                 r = -EFAULT;
4005                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
4006                         goto out;
4007                 r = -ENXIO;
4008                 if (!kvm->arch.vpit)
4009                         goto out;
4010                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
4011                 break;
4012         }
4013         case KVM_GET_PIT2: {
4014                 r = -ENXIO;
4015                 if (!kvm->arch.vpit)
4016                         goto out;
4017                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
4018                 if (r)
4019                         goto out;
4020                 r = -EFAULT;
4021                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
4022                         goto out;
4023                 r = 0;
4024                 break;
4025         }
4026         case KVM_SET_PIT2: {
4027                 r = -EFAULT;
4028                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
4029                         goto out;
4030                 r = -ENXIO;
4031                 if (!kvm->arch.vpit)
4032                         goto out;
4033                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
4034                 break;
4035         }
4036         case KVM_REINJECT_CONTROL: {
4037                 struct kvm_reinject_control control;
4038                 r =  -EFAULT;
4039                 if (copy_from_user(&control, argp, sizeof(control)))
4040                         goto out;
4041                 r = kvm_vm_ioctl_reinject(kvm, &control);
4042                 break;
4043         }
4044         case KVM_XEN_HVM_CONFIG: {
4045                 r = -EFAULT;
4046                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
4047                                    sizeof(struct kvm_xen_hvm_config)))
4048                         goto out;
4049                 r = -EINVAL;
4050                 if (kvm->arch.xen_hvm_config.flags)
4051                         goto out;
4052                 r = 0;
4053                 break;
4054         }
4055         case KVM_SET_CLOCK: {
4056                 struct kvm_clock_data user_ns;
4057                 u64 now_ns;
4058                 s64 delta;
4059
4060                 r = -EFAULT;
4061                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
4062                         goto out;
4063
4064                 r = -EINVAL;
4065                 if (user_ns.flags)
4066                         goto out;
4067
4068                 r = 0;
4069                 local_irq_disable();
4070                 now_ns = get_kernel_ns();
4071                 delta = user_ns.clock - now_ns;
4072                 local_irq_enable();
4073                 kvm->arch.kvmclock_offset = delta;
4074                 kvm_gen_update_masterclock(kvm);
4075                 break;
4076         }
4077         case KVM_GET_CLOCK: {
4078                 struct kvm_clock_data user_ns;
4079                 u64 now_ns;
4080
4081                 local_irq_disable();
4082                 now_ns = get_kernel_ns();
4083                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
4084                 local_irq_enable();
4085                 user_ns.flags = 0;
4086                 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4087
4088                 r = -EFAULT;
4089                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
4090                         goto out;
4091                 r = 0;
4092                 break;
4093         }
4094
4095         default:
4096                 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
4097         }
4098 out:
4099         return r;
4100 }
4101
4102 static void kvm_init_msr_list(void)
4103 {
4104         u32 dummy[2];
4105         unsigned i, j;
4106
4107         /* skip the first msrs in the list. KVM-specific */
4108         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
4109                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
4110                         continue;
4111
4112                 /*
4113                  * Even MSRs that are valid in the host may not be exposed
4114                  * to the guests in some cases.  We could work around this
4115                  * in VMX with the generic MSR save/load machinery, but it
4116                  * is not really worthwhile since it will really only
4117                  * happen with nested virtualization.
4118                  */
4119                 switch (msrs_to_save[i]) {
4120                 case MSR_IA32_BNDCFGS:
4121                         if (!kvm_x86_ops->mpx_supported())
4122                                 continue;
4123                         break;
4124                 default:
4125                         break;
4126                 }
4127
4128                 if (j < i)
4129                         msrs_to_save[j] = msrs_to_save[i];
4130                 j++;
4131         }
4132         num_msrs_to_save = j;
4133 }
4134
4135 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
4136                            const void *v)
4137 {
4138         int handled = 0;
4139         int n;
4140
4141         do {
4142                 n = min(len, 8);
4143                 if (!(vcpu->arch.apic &&
4144                       !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
4145                     && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
4146                         break;
4147                 handled += n;
4148                 addr += n;
4149                 len -= n;
4150                 v += n;
4151         } while (len);
4152
4153         return handled;
4154 }
4155
4156 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4157 {
4158         int handled = 0;
4159         int n;
4160
4161         do {
4162                 n = min(len, 8);
4163                 if (!(vcpu->arch.apic &&
4164                       !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
4165                     && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
4166                         break;
4167                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
4168                 handled += n;
4169                 addr += n;
4170                 len -= n;
4171                 v += n;
4172         } while (len);
4173
4174         return handled;
4175 }
4176
4177 static void kvm_set_segment(struct kvm_vcpu *vcpu,
4178                         struct kvm_segment *var, int seg)
4179 {
4180         kvm_x86_ops->set_segment(vcpu, var, seg);
4181 }
4182
4183 void kvm_get_segment(struct kvm_vcpu *vcpu,
4184                      struct kvm_segment *var, int seg)
4185 {
4186         kvm_x86_ops->get_segment(vcpu, var, seg);
4187 }
4188
4189 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
4190                            struct x86_exception *exception)
4191 {
4192         gpa_t t_gpa;
4193
4194         BUG_ON(!mmu_is_nested(vcpu));
4195
4196         /* NPT walks are always user-walks */
4197         access |= PFERR_USER_MASK;
4198         t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4199
4200         return t_gpa;
4201 }
4202
4203 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
4204                               struct x86_exception *exception)
4205 {
4206         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4207         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4208 }
4209
4210  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
4211                                 struct x86_exception *exception)
4212 {
4213         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4214         access |= PFERR_FETCH_MASK;
4215         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4216 }
4217
4218 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
4219                                struct x86_exception *exception)
4220 {
4221         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4222         access |= PFERR_WRITE_MASK;
4223         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4224 }
4225
4226 /* uses this to access any guest's mapped memory without checking CPL */
4227 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
4228                                 struct x86_exception *exception)
4229 {
4230         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
4231 }
4232
4233 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
4234                                       struct kvm_vcpu *vcpu, u32 access,
4235                                       struct x86_exception *exception)
4236 {
4237         void *data = val;
4238         int r = X86EMUL_CONTINUE;
4239
4240         while (bytes) {
4241                 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4242                                                             exception);
4243                 unsigned offset = addr & (PAGE_SIZE-1);
4244                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4245                 int ret;
4246
4247                 if (gpa == UNMAPPED_GVA)
4248                         return X86EMUL_PROPAGATE_FAULT;
4249                 ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
4250                                           offset, toread);
4251                 if (ret < 0) {
4252                         r = X86EMUL_IO_NEEDED;
4253                         goto out;
4254                 }
4255
4256                 bytes -= toread;
4257                 data += toread;
4258                 addr += toread;
4259         }
4260 out:
4261         return r;
4262 }
4263
4264 /* used for instruction fetching */
4265 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
4266                                 gva_t addr, void *val, unsigned int bytes,
4267                                 struct x86_exception *exception)
4268 {
4269         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4270         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4271         unsigned offset;
4272         int ret;
4273
4274         /* Inline kvm_read_guest_virt_helper for speed.  */
4275         gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
4276                                                     exception);
4277         if (unlikely(gpa == UNMAPPED_GVA))
4278                 return X86EMUL_PROPAGATE_FAULT;
4279
4280         offset = addr & (PAGE_SIZE-1);
4281         if (WARN_ON(offset + bytes > PAGE_SIZE))
4282                 bytes = (unsigned)PAGE_SIZE - offset;
4283         ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
4284                                   offset, bytes);
4285         if (unlikely(ret < 0))
4286                 return X86EMUL_IO_NEEDED;
4287
4288         return X86EMUL_CONTINUE;
4289 }
4290
4291 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4292                                gva_t addr, void *val, unsigned int bytes,
4293                                struct x86_exception *exception)
4294 {
4295         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4296         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4297
4298         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4299                                           exception);
4300 }
4301 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4302
4303 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4304                                       gva_t addr, void *val, unsigned int bytes,
4305                                       struct x86_exception *exception)
4306 {
4307         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4308         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4309 }
4310
4311 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4312                                        gva_t addr, void *val,
4313                                        unsigned int bytes,
4314                                        struct x86_exception *exception)
4315 {
4316         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4317         void *data = val;
4318         int r = X86EMUL_CONTINUE;
4319
4320         while (bytes) {
4321                 gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4322                                                              PFERR_WRITE_MASK,
4323                                                              exception);
4324                 unsigned offset = addr & (PAGE_SIZE-1);
4325                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
4326                 int ret;
4327
4328                 if (gpa == UNMAPPED_GVA)
4329                         return X86EMUL_PROPAGATE_FAULT;
4330                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
4331                 if (ret < 0) {
4332                         r = X86EMUL_IO_NEEDED;
4333                         goto out;
4334                 }
4335
4336                 bytes -= towrite;
4337                 data += towrite;
4338                 addr += towrite;
4339         }
4340 out:
4341         return r;
4342 }
4343 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4344
4345 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4346                                 gpa_t *gpa, struct x86_exception *exception,
4347                                 bool write)
4348 {
4349         u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
4350                 | (write ? PFERR_WRITE_MASK : 0);
4351
4352         if (vcpu_match_mmio_gva(vcpu, gva)
4353             && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4354                                  vcpu->arch.access, access)) {
4355                 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4356                                         (gva & (PAGE_SIZE - 1));
4357                 trace_vcpu_match_mmio(gva, *gpa, write, false);
4358                 return 1;
4359         }
4360
4361         *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4362
4363         if (*gpa == UNMAPPED_GVA)
4364                 return -1;
4365
4366         /* For APIC access vmexit */
4367         if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4368                 return 1;
4369
4370         if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
4371                 trace_vcpu_match_mmio(gva, *gpa, write, true);
4372                 return 1;
4373         }
4374
4375         return 0;
4376 }
4377
4378 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4379                         const void *val, int bytes)
4380 {
4381         int ret;
4382
4383         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
4384         if (ret < 0)
4385                 return 0;
4386         kvm_mmu_pte_write(vcpu, gpa, val, bytes);
4387         return 1;
4388 }
4389
4390 struct read_write_emulator_ops {
4391         int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4392                                   int bytes);
4393         int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4394                                   void *val, int bytes);
4395         int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4396                                int bytes, void *val);
4397         int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4398                                     void *val, int bytes);
4399         bool write;
4400 };
4401
4402 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
4403 {
4404         if (vcpu->mmio_read_completed) {
4405                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4406                                vcpu->mmio_fragments[0].gpa, *(u64 *)val);
4407                 vcpu->mmio_read_completed = 0;
4408                 return 1;
4409         }
4410
4411         return 0;
4412 }
4413
4414 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4415                         void *val, int bytes)
4416 {
4417         return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
4418 }
4419
4420 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
4421                          void *val, int bytes)
4422 {
4423         return emulator_write_phys(vcpu, gpa, val, bytes);
4424 }
4425
4426 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
4427 {
4428         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
4429         return vcpu_mmio_write(vcpu, gpa, bytes, val);
4430 }
4431
4432 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4433                           void *val, int bytes)
4434 {
4435         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
4436         return X86EMUL_IO_NEEDED;
4437 }
4438
4439 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
4440                            void *val, int bytes)
4441 {
4442         struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
4443
4444         memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
4445         return X86EMUL_CONTINUE;
4446 }
4447
4448 static const struct read_write_emulator_ops read_emultor = {
4449         .read_write_prepare = read_prepare,
4450         .read_write_emulate = read_emulate,
4451         .read_write_mmio = vcpu_mmio_read,
4452         .read_write_exit_mmio = read_exit_mmio,
4453 };
4454
4455 static const struct read_write_emulator_ops write_emultor = {
4456         .read_write_emulate = write_emulate,
4457         .read_write_mmio = write_mmio,
4458         .read_write_exit_mmio = write_exit_mmio,
4459         .write = true,
4460 };
4461
4462 static int emulator_read_write_onepage(unsigned long addr, void *val,
4463                                        unsigned int bytes,
4464                                        struct x86_exception *exception,
4465                                        struct kvm_vcpu *vcpu,
4466                                        const struct read_write_emulator_ops *ops)
4467 {
4468         gpa_t gpa;
4469         int handled, ret;
4470         bool write = ops->write;
4471         struct kvm_mmio_fragment *frag;
4472
4473         ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4474
4475         if (ret < 0)
4476                 return X86EMUL_PROPAGATE_FAULT;
4477
4478         /* For APIC access vmexit */
4479         if (ret)
4480                 goto mmio;
4481
4482         if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4483                 return X86EMUL_CONTINUE;
4484
4485 mmio:
4486         /*
4487          * Is this MMIO handled locally?
4488          */
4489         handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4490         if (handled == bytes)
4491                 return X86EMUL_CONTINUE;
4492
4493         gpa += handled;
4494         bytes -= handled;
4495         val += handled;
4496
4497         WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
4498         frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
4499         frag->gpa = gpa;
4500         frag->data = val;
4501         frag->len = bytes;
4502         return X86EMUL_CONTINUE;
4503 }
4504
4505 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
4506                         void *val, unsigned int bytes,
4507                         struct x86_exception *exception,
4508                         const struct read_write_emulator_ops *ops)
4509 {
4510         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4511         gpa_t gpa;
4512         int rc;
4513
4514         if (ops->read_write_prepare &&
4515                   ops->read_write_prepare(vcpu, val, bytes))
4516                 return X86EMUL_CONTINUE;
4517
4518         vcpu->mmio_nr_fragments = 0;
4519
4520         /* Crossing a page boundary? */
4521         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
4522                 int now;
4523
4524                 now = -addr & ~PAGE_MASK;
4525                 rc = emulator_read_write_onepage(addr, val, now, exception,
4526                                                  vcpu, ops);
4527
4528                 if (rc != X86EMUL_CONTINUE)
4529                         return rc;
4530                 addr += now;
4531                 val += now;
4532                 bytes -= now;
4533         }
4534
4535         rc = emulator_read_write_onepage(addr, val, bytes, exception,
4536                                          vcpu, ops);
4537         if (rc != X86EMUL_CONTINUE)
4538                 return rc;
4539
4540         if (!vcpu->mmio_nr_fragments)
4541                 return rc;
4542
4543         gpa = vcpu->mmio_fragments[0].gpa;
4544
4545         vcpu->mmio_needed = 1;
4546         vcpu->mmio_cur_fragment = 0;
4547
4548         vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
4549         vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
4550         vcpu->run->exit_reason = KVM_EXIT_MMIO;
4551         vcpu->run->mmio.phys_addr = gpa;
4552
4553         return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4554 }
4555
4556 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
4557                                   unsigned long addr,
4558                                   void *val,
4559                                   unsigned int bytes,
4560                                   struct x86_exception *exception)
4561 {
4562         return emulator_read_write(ctxt, addr, val, bytes,
4563                                    exception, &read_emultor);
4564 }
4565
4566 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4567                             unsigned long addr,
4568                             const void *val,
4569                             unsigned int bytes,
4570                             struct x86_exception *exception)
4571 {
4572         return emulator_read_write(ctxt, addr, (void *)val, bytes,
4573                                    exception, &write_emultor);
4574 }
4575
4576 #define CMPXCHG_TYPE(t, ptr, old, new) \
4577         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
4578
4579 #ifdef CONFIG_X86_64
4580 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
4581 #else
4582 #  define CMPXCHG64(ptr, old, new) \
4583         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4584 #endif
4585
4586 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
4587                                      unsigned long addr,
4588                                      const void *old,
4589                                      const void *new,
4590                                      unsigned int bytes,
4591                                      struct x86_exception *exception)
4592 {
4593         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4594         gpa_t gpa;
4595         struct page *page;
4596         char *kaddr;
4597         bool exchanged;
4598
4599         /* guests cmpxchg8b have to be emulated atomically */
4600         if (bytes > 8 || (bytes & (bytes - 1)))
4601                 goto emul_write;
4602
4603         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4604
4605         if (gpa == UNMAPPED_GVA ||
4606             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
4607                 goto emul_write;
4608
4609         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
4610                 goto emul_write;
4611
4612         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4613         if (is_error_page(page))
4614                 goto emul_write;
4615
4616         kaddr = kmap_atomic(page);
4617         kaddr += offset_in_page(gpa);
4618         switch (bytes) {
4619         case 1:
4620                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
4621                 break;
4622         case 2:
4623                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
4624                 break;
4625         case 4:
4626                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
4627                 break;
4628         case 8:
4629                 exchanged = CMPXCHG64(kaddr, old, new);
4630                 break;
4631         default:
4632                 BUG();
4633         }
4634         kunmap_atomic(kaddr);
4635         kvm_release_page_dirty(page);
4636
4637         if (!exchanged)
4638                 return X86EMUL_CMPXCHG_FAILED;
4639
4640         mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
4641         kvm_mmu_pte_write(vcpu, gpa, new, bytes);
4642
4643         return X86EMUL_CONTINUE;
4644
4645 emul_write:
4646         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4647
4648         return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4649 }
4650
4651 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
4652 {
4653         /* TODO: String I/O for in kernel device */
4654         int r;
4655
4656         if (vcpu->arch.pio.in)
4657                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
4658                                     vcpu->arch.pio.size, pd);
4659         else
4660                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
4661                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
4662                                      pd);
4663         return r;
4664 }
4665
4666 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
4667                                unsigned short port, void *val,
4668                                unsigned int count, bool in)
4669 {
4670         vcpu->arch.pio.port = port;
4671         vcpu->arch.pio.in = in;
4672         vcpu->arch.pio.count  = count;
4673         vcpu->arch.pio.size = size;
4674
4675         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4676                 vcpu->arch.pio.count = 0;
4677                 return 1;
4678         }
4679
4680         vcpu->run->exit_reason = KVM_EXIT_IO;
4681         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
4682         vcpu->run->io.size = size;
4683         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
4684         vcpu->run->io.count = count;
4685         vcpu->run->io.port = port;
4686
4687         return 0;
4688 }
4689
4690 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4691                                     int size, unsigned short port, void *val,
4692                                     unsigned int count)
4693 {
4694         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4695         int ret;
4696
4697         if (vcpu->arch.pio.count)
4698                 goto data_avail;
4699
4700         ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4701         if (ret) {
4702 data_avail:
4703                 memcpy(val, vcpu->arch.pio_data, size * count);
4704                 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
4705                 vcpu->arch.pio.count = 0;
4706                 return 1;
4707         }
4708
4709         return 0;
4710 }
4711
4712 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4713                                      int size, unsigned short port,
4714                                      const void *val, unsigned int count)
4715 {
4716         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4717
4718         memcpy(vcpu->arch.pio_data, val, size * count);
4719         trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
4720         return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4721 }
4722
4723 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4724 {
4725         return kvm_x86_ops->get_segment_base(vcpu, seg);
4726 }
4727
4728 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4729 {
4730         kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4731 }
4732
4733 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4734 {
4735         if (!need_emulate_wbinvd(vcpu))
4736                 return X86EMUL_CONTINUE;
4737
4738         if (kvm_x86_ops->has_wbinvd_exit()) {
4739                 int cpu = get_cpu();
4740
4741                 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4742                 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4743                                 wbinvd_ipi, NULL, 1);
4744                 put_cpu();
4745                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4746         } else
4747                 wbinvd();
4748         return X86EMUL_CONTINUE;
4749 }
4750 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4751
4752 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4753 {
4754         kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
4755 }
4756
4757 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4758 {
4759         return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4760 }
4761
4762 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4763 {
4764
4765         return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4766 }
4767
4768 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4769 {
4770         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4771 }
4772
4773 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4774 {
4775         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4776         unsigned long value;
4777
4778         switch (cr) {
4779         case 0:
4780                 value = kvm_read_cr0(vcpu);
4781                 break;
4782         case 2:
4783                 value = vcpu->arch.cr2;
4784                 break;
4785         case 3:
4786                 value = kvm_read_cr3(vcpu);
4787                 break;
4788         case 4:
4789                 value = kvm_read_cr4(vcpu);
4790                 break;
4791         case 8:
4792                 value = kvm_get_cr8(vcpu);
4793                 break;
4794         default:
4795                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
4796                 return 0;
4797         }
4798
4799         return value;
4800 }
4801
4802 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4803 {
4804         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4805         int res = 0;
4806
4807         switch (cr) {
4808         case 0:
4809                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4810                 break;
4811         case 2:
4812                 vcpu->arch.cr2 = val;
4813                 break;
4814         case 3:
4815                 res = kvm_set_cr3(vcpu, val);
4816                 break;
4817         case 4:
4818                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4819                 break;
4820         case 8:
4821                 res = kvm_set_cr8(vcpu, val);
4822                 break;
4823         default:
4824                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
4825                 res = -1;
4826         }
4827
4828         return res;
4829 }
4830
4831 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4832 {
4833         return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4834 }
4835
4836 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4837 {
4838         kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4839 }
4840
4841 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4842 {
4843         kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4844 }
4845
4846 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4847 {
4848         kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4849 }
4850
4851 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4852 {
4853         kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4854 }
4855
4856 static unsigned long emulator_get_cached_segment_base(
4857         struct x86_emulate_ctxt *ctxt, int seg)
4858 {
4859         return get_segment_base(emul_to_vcpu(ctxt), seg);
4860 }
4861
4862 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4863                                  struct desc_struct *desc, u32 *base3,
4864                                  int seg)
4865 {
4866         struct kvm_segment var;
4867
4868         kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4869         *selector = var.selector;
4870
4871         if (var.unusable) {
4872                 memset(desc, 0, sizeof(*desc));
4873                 return false;
4874         }
4875
4876         if (var.g)
4877                 var.limit >>= 12;
4878         set_desc_limit(desc, var.limit);
4879         set_desc_base(desc, (unsigned long)var.base);
4880 #ifdef CONFIG_X86_64
4881         if (base3)
4882                 *base3 = var.base >> 32;
4883 #endif
4884         desc->type = var.type;
4885         desc->s = var.s;
4886         desc->dpl = var.dpl;
4887         desc->p = var.present;
4888         desc->avl = var.avl;
4889         desc->l = var.l;
4890         desc->d = var.db;
4891         desc->g = var.g;
4892
4893         return true;
4894 }
4895
4896 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4897                                  struct desc_struct *desc, u32 base3,
4898                                  int seg)
4899 {
4900         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4901         struct kvm_segment var;
4902
4903         var.selector = selector;
4904         var.base = get_desc_base(desc);
4905 #ifdef CONFIG_X86_64
4906         var.base |= ((u64)base3) << 32;
4907 #endif
4908         var.limit = get_desc_limit(desc);
4909         if (desc->g)
4910                 var.limit = (var.limit << 12) | 0xfff;
4911         var.type = desc->type;
4912         var.dpl = desc->dpl;
4913         var.db = desc->d;
4914         var.s = desc->s;
4915         var.l = desc->l;
4916         var.g = desc->g;
4917         var.avl = desc->avl;
4918         var.present = desc->p;
4919         var.unusable = !var.present;
4920         var.padding = 0;
4921
4922         kvm_set_segment(vcpu, &var, seg);
4923         return;
4924 }
4925
4926 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4927                             u32 msr_index, u64 *pdata)
4928 {
4929         return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4930 }
4931
4932 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4933                             u32 msr_index, u64 data)
4934 {
4935         struct msr_data msr;
4936
4937         msr.data = data;
4938         msr.index = msr_index;
4939         msr.host_initiated = false;
4940         return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
4941 }
4942
4943 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
4944                               u32 pmc)
4945 {
4946         return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
4947 }
4948
4949 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
4950                              u32 pmc, u64 *pdata)
4951 {
4952         return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
4953 }
4954
4955 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4956 {
4957         emul_to_vcpu(ctxt)->arch.halt_request = 1;
4958 }
4959
4960 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4961 {
4962         preempt_disable();
4963         kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4964         /*
4965          * CR0.TS may reference the host fpu state, not the guest fpu state,
4966          * so it may be clear at this point.
4967          */
4968         clts();
4969 }
4970
4971 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4972 {
4973         preempt_enable();
4974 }
4975
4976 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4977                               struct x86_instruction_info *info,
4978                               enum x86_intercept_stage stage)
4979 {
4980         return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4981 }
4982
4983 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4984                                u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4985 {
4986         kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
4987 }
4988
4989 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
4990 {
4991         return kvm_register_read(emul_to_vcpu(ctxt), reg);
4992 }
4993
4994 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
4995 {
4996         kvm_register_write(emul_to_vcpu(ctxt), reg, val);
4997 }
4998
4999 static const struct x86_emulate_ops emulate_ops = {
5000         .read_gpr            = emulator_read_gpr,
5001         .write_gpr           = emulator_write_gpr,
5002         .read_std            = kvm_read_guest_virt_system,
5003         .write_std           = kvm_write_guest_virt_system,
5004         .fetch               = kvm_fetch_guest_virt,
5005         .read_emulated       = emulator_read_emulated,
5006         .write_emulated      = emulator_write_emulated,
5007         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
5008         .invlpg              = emulator_invlpg,
5009         .pio_in_emulated     = emulator_pio_in_emulated,
5010         .pio_out_emulated    = emulator_pio_out_emulated,
5011         .get_segment         = emulator_get_segment,
5012         .set_segment         = emulator_set_segment,
5013         .get_cached_segment_base = emulator_get_cached_segment_base,
5014         .get_gdt             = emulator_get_gdt,
5015         .get_idt             = emulator_get_idt,
5016         .set_gdt             = emulator_set_gdt,
5017         .set_idt             = emulator_set_idt,
5018         .get_cr              = emulator_get_cr,
5019         .set_cr              = emulator_set_cr,
5020         .cpl                 = emulator_get_cpl,
5021         .get_dr              = emulator_get_dr,
5022         .set_dr              = emulator_set_dr,
5023         .set_msr             = emulator_set_msr,
5024         .get_msr             = emulator_get_msr,
5025         .check_pmc           = emulator_check_pmc,
5026         .read_pmc            = emulator_read_pmc,
5027         .halt                = emulator_halt,
5028         .wbinvd              = emulator_wbinvd,
5029         .fix_hypercall       = emulator_fix_hypercall,
5030         .get_fpu             = emulator_get_fpu,
5031         .put_fpu             = emulator_put_fpu,
5032         .intercept           = emulator_intercept,
5033         .get_cpuid           = emulator_get_cpuid,
5034 };
5035
5036 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
5037 {
5038         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
5039         /*
5040          * an sti; sti; sequence only disable interrupts for the first
5041          * instruction. So, if the last instruction, be it emulated or
5042          * not, left the system with the INT_STI flag enabled, it
5043          * means that the last instruction is an sti. We should not
5044          * leave the flag on in this case. The same goes for mov ss
5045          */
5046         if (int_shadow & mask)
5047                 mask = 0;
5048         if (unlikely(int_shadow || mask)) {
5049                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
5050                 if (!mask)
5051                         kvm_make_request(KVM_REQ_EVENT, vcpu);
5052         }
5053 }
5054
5055 static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
5056 {
5057         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5058         if (ctxt->exception.vector == PF_VECTOR)
5059                 return kvm_propagate_fault(vcpu, &ctxt->exception);
5060
5061         if (ctxt->exception.error_code_valid)
5062                 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
5063                                       ctxt->exception.error_code);
5064         else
5065                 kvm_queue_exception(vcpu, ctxt->exception.vector);
5066         return false;
5067 }
5068
5069 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
5070 {
5071         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5072         int cs_db, cs_l;
5073
5074         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5075
5076         ctxt->eflags = kvm_get_rflags(vcpu);
5077         ctxt->eip = kvm_rip_read(vcpu);
5078         ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
5079                      (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
5080                      (cs_l && is_long_mode(vcpu))       ? X86EMUL_MODE_PROT64 :
5081                      cs_db                              ? X86EMUL_MODE_PROT32 :
5082                                                           X86EMUL_MODE_PROT16;
5083         ctxt->guest_mode = is_guest_mode(vcpu);
5084
5085         init_decode_cache(ctxt);
5086         vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5087 }
5088
5089 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
5090 {
5091         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5092         int ret;
5093
5094         init_emulate_ctxt(vcpu);
5095
5096         ctxt->op_bytes = 2;
5097         ctxt->ad_bytes = 2;
5098         ctxt->_eip = ctxt->eip + inc_eip;
5099         ret = emulate_int_real(ctxt, irq);
5100
5101         if (ret != X86EMUL_CONTINUE)
5102                 return EMULATE_FAIL;
5103
5104         ctxt->eip = ctxt->_eip;
5105         kvm_rip_write(vcpu, ctxt->eip);
5106         kvm_set_rflags(vcpu, ctxt->eflags);
5107
5108         if (irq == NMI_VECTOR)
5109                 vcpu->arch.nmi_pending = 0;
5110         else
5111                 vcpu->arch.interrupt.pending = false;
5112
5113         return EMULATE_DONE;
5114 }
5115 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
5116
5117 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
5118 {
5119         int r = EMULATE_DONE;
5120
5121         ++vcpu->stat.insn_emulation_fail;
5122         trace_kvm_emulate_insn_failed(vcpu);
5123         if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
5124                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5125                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5126                 vcpu->run->internal.ndata = 0;
5127                 r = EMULATE_FAIL;
5128         }
5129         kvm_queue_exception(vcpu, UD_VECTOR);
5130
5131         return r;
5132 }
5133
5134 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5135                                   bool write_fault_to_shadow_pgtable,
5136                                   int emulation_type)
5137 {
5138         gpa_t gpa = cr2;
5139         pfn_t pfn;
5140
5141         if (emulation_type & EMULTYPE_NO_REEXECUTE)
5142                 return false;
5143
5144         if (!vcpu->arch.mmu.direct_map) {
5145                 /*
5146                  * Write permission should be allowed since only
5147                  * write access need to be emulated.
5148                  */
5149                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5150
5151                 /*
5152                  * If the mapping is invalid in guest, let cpu retry
5153                  * it to generate fault.
5154                  */
5155                 if (gpa == UNMAPPED_GVA)
5156                         return true;
5157         }
5158
5159         /*
5160          * Do not retry the unhandleable instruction if it faults on the
5161          * readonly host memory, otherwise it will goto a infinite loop:
5162          * retry instruction -> write #PF -> emulation fail -> retry
5163          * instruction -> ...
5164          */
5165         pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
5166
5167         /*
5168          * If the instruction failed on the error pfn, it can not be fixed,
5169          * report the error to userspace.
5170          */
5171         if (is_error_noslot_pfn(pfn))
5172                 return false;
5173
5174         kvm_release_pfn_clean(pfn);
5175
5176         /* The instructions are well-emulated on direct mmu. */
5177         if (vcpu->arch.mmu.direct_map) {
5178                 unsigned int indirect_shadow_pages;
5179
5180                 spin_lock(&vcpu->kvm->mmu_lock);
5181                 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
5182                 spin_unlock(&vcpu->kvm->mmu_lock);
5183
5184                 if (indirect_shadow_pages)
5185                         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5186
5187                 return true;
5188         }
5189
5190         /*
5191          * if emulation was due to access to shadowed page table
5192          * and it failed try to unshadow page and re-enter the
5193          * guest to let CPU execute the instruction.
5194          */
5195         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5196
5197         /*
5198          * If the access faults on its page table, it can not
5199          * be fixed by unprotecting shadow page and it should
5200          * be reported to userspace.
5201          */
5202         return !write_fault_to_shadow_pgtable;
5203 }
5204
5205 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
5206                               unsigned long cr2,  int emulation_type)
5207 {
5208         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5209         unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
5210
5211         last_retry_eip = vcpu->arch.last_retry_eip;
5212         last_retry_addr = vcpu->arch.last_retry_addr;
5213
5214         /*
5215          * If the emulation is caused by #PF and it is non-page_table
5216          * writing instruction, it means the VM-EXIT is caused by shadow
5217          * page protected, we can zap the shadow page and retry this
5218          * instruction directly.
5219          *
5220          * Note: if the guest uses a non-page-table modifying instruction
5221          * on the PDE that points to the instruction, then we will unmap
5222          * the instruction and go to an infinite loop. So, we cache the
5223          * last retried eip and the last fault address, if we meet the eip
5224          * and the address again, we can break out of the potential infinite
5225          * loop.
5226          */
5227         vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
5228
5229         if (!(emulation_type & EMULTYPE_RETRY))
5230                 return false;
5231
5232         if (x86_page_table_writing_insn(ctxt))
5233                 return false;
5234
5235         if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
5236                 return false;
5237
5238         vcpu->arch.last_retry_eip = ctxt->eip;
5239         vcpu->arch.last_retry_addr = cr2;
5240
5241         if (!vcpu->arch.mmu.direct_map)
5242                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5243
5244         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5245
5246         return true;
5247 }
5248
5249 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
5250 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
5251
5252 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5253                                 unsigned long *db)
5254 {
5255         u32 dr6 = 0;
5256         int i;
5257         u32 enable, rwlen;
5258
5259         enable = dr7;
5260         rwlen = dr7 >> 16;
5261         for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
5262                 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
5263                         dr6 |= (1 << i);
5264         return dr6;
5265 }
5266
5267 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
5268 {
5269         struct kvm_run *kvm_run = vcpu->run;
5270
5271         /*
5272          * rflags is the old, "raw" value of the flags.  The new value has
5273          * not been saved yet.
5274          *
5275          * This is correct even for TF set by the guest, because "the
5276          * processor will not generate this exception after the instruction
5277          * that sets the TF flag".
5278          */
5279         if (unlikely(rflags & X86_EFLAGS_TF)) {
5280                 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5281                         kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
5282                                                   DR6_RTM;
5283                         kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5284                         kvm_run->debug.arch.exception = DB_VECTOR;
5285                         kvm_run->exit_reason = KVM_EXIT_DEBUG;
5286                         *r = EMULATE_USER_EXIT;
5287                 } else {
5288                         vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
5289                         /*
5290                          * "Certain debug exceptions may clear bit 0-3.  The
5291                          * remaining contents of the DR6 register are never
5292                          * cleared by the processor".
5293                          */
5294                         vcpu->arch.dr6 &= ~15;
5295                         vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5296                         kvm_queue_exception(vcpu, DB_VECTOR);
5297                 }
5298         }
5299 }
5300
5301 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
5302 {
5303         if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
5304             (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5305                 struct kvm_run *kvm_run = vcpu->run;
5306                 unsigned long eip = kvm_get_linear_rip(vcpu);
5307                 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5308                                            vcpu->arch.guest_debug_dr7,
5309                                            vcpu->arch.eff_db);
5310
5311                 if (dr6 != 0) {
5312                         kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
5313                         kvm_run->debug.arch.pc = eip;
5314                         kvm_run->debug.arch.exception = DB_VECTOR;
5315                         kvm_run->exit_reason = KVM_EXIT_DEBUG;
5316                         *r = EMULATE_USER_EXIT;
5317                         return true;
5318                 }
5319         }
5320
5321         if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
5322             !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
5323                 unsigned long eip = kvm_get_linear_rip(vcpu);
5324                 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5325                                            vcpu->arch.dr7,
5326                                            vcpu->arch.db);
5327
5328                 if (dr6 != 0) {
5329                         vcpu->arch.dr6 &= ~15;
5330                         vcpu->arch.dr6 |= dr6 | DR6_RTM;
5331                         kvm_queue_exception(vcpu, DB_VECTOR);
5332                         *r = EMULATE_DONE;
5333                         return true;
5334                 }
5335         }
5336
5337         return false;
5338 }
5339
5340 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
5341                             unsigned long cr2,
5342                             int emulation_type,
5343                             void *insn,
5344                             int insn_len)
5345 {
5346         int r;
5347         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5348         bool writeback = true;
5349         bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
5350
5351         /*
5352          * Clear write_fault_to_shadow_pgtable here to ensure it is
5353          * never reused.
5354          */
5355         vcpu->arch.write_fault_to_shadow_pgtable = false;
5356         kvm_clear_exception_queue(vcpu);
5357
5358         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
5359                 init_emulate_ctxt(vcpu);
5360
5361                 /*
5362                  * We will reenter on the same instruction since
5363                  * we do not set complete_userspace_io.  This does not
5364                  * handle watchpoints yet, those would be handled in
5365                  * the emulate_ops.
5366                  */
5367                 if (kvm_vcpu_check_breakpoint(vcpu, &r))
5368                         return r;
5369
5370                 ctxt->interruptibility = 0;
5371                 ctxt->have_exception = false;
5372                 ctxt->exception.vector = -1;
5373                 ctxt->perm_ok = false;
5374
5375                 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
5376
5377                 r = x86_decode_insn(ctxt, insn, insn_len);
5378
5379                 trace_kvm_emulate_insn_start(vcpu);
5380                 ++vcpu->stat.insn_emulation;
5381                 if (r != EMULATION_OK)  {
5382                         if (emulation_type & EMULTYPE_TRAP_UD)
5383                                 return EMULATE_FAIL;
5384                         if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5385                                                 emulation_type))
5386                                 return EMULATE_DONE;
5387                         if (emulation_type & EMULTYPE_SKIP)
5388                                 return EMULATE_FAIL;
5389                         return handle_emulation_failure(vcpu);
5390                 }
5391         }
5392
5393         if (emulation_type & EMULTYPE_SKIP) {
5394                 kvm_rip_write(vcpu, ctxt->_eip);
5395                 if (ctxt->eflags & X86_EFLAGS_RF)
5396                         kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
5397                 return EMULATE_DONE;
5398         }
5399
5400         if (retry_instruction(ctxt, cr2, emulation_type))
5401                 return EMULATE_DONE;
5402
5403         /* this is needed for vmware backdoor interface to work since it
5404            changes registers values  during IO operation */
5405         if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
5406                 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5407                 emulator_invalidate_register_cache(ctxt);
5408         }
5409
5410 restart:
5411         r = x86_emulate_insn(ctxt);
5412
5413         if (r == EMULATION_INTERCEPTED)
5414                 return EMULATE_DONE;
5415
5416         if (r == EMULATION_FAILED) {
5417                 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
5418                                         emulation_type))
5419                         return EMULATE_DONE;
5420
5421                 return handle_emulation_failure(vcpu);
5422         }
5423
5424         if (ctxt->have_exception) {
5425                 r = EMULATE_DONE;
5426                 if (inject_emulated_exception(vcpu))
5427                         return r;
5428         } else if (vcpu->arch.pio.count) {
5429                 if (!vcpu->arch.pio.in) {
5430                         /* FIXME: return into emulator if single-stepping.  */
5431                         vcpu->arch.pio.count = 0;
5432                 } else {
5433                         writeback = false;
5434                         vcpu->arch.complete_userspace_io = complete_emulated_pio;
5435                 }
5436                 r = EMULATE_USER_EXIT;
5437         } else if (vcpu->mmio_needed) {
5438                 if (!vcpu->mmio_is_write)
5439                         writeback = false;
5440                 r = EMULATE_USER_EXIT;
5441                 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5442         } else if (r == EMULATION_RESTART)
5443                 goto restart;
5444         else
5445                 r = EMULATE_DONE;
5446
5447         if (writeback) {
5448                 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5449                 toggle_interruptibility(vcpu, ctxt->interruptibility);
5450                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5451                 kvm_rip_write(vcpu, ctxt->eip);
5452                 if (r == EMULATE_DONE)
5453                         kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5454                 if (!ctxt->have_exception ||
5455                     exception_type(ctxt->exception.vector) == EXCPT_TRAP)
5456                         __kvm_set_rflags(vcpu, ctxt->eflags);
5457
5458                 /*
5459                  * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
5460                  * do nothing, and it will be requested again as soon as
5461                  * the shadow expires.  But we still need to check here,
5462                  * because POPF has no interrupt shadow.
5463                  */
5464                 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
5465                         kvm_make_request(KVM_REQ_EVENT, vcpu);
5466         } else
5467                 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5468
5469         return r;
5470 }
5471 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
5472
5473 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
5474 {
5475         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5476         int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
5477                                             size, port, &val, 1);
5478         /* do not return to emulator after return from userspace */
5479         vcpu->arch.pio.count = 0;
5480         return ret;
5481 }
5482 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
5483
5484 static void tsc_bad(void *info)
5485 {
5486         __this_cpu_write(cpu_tsc_khz, 0);
5487 }
5488
5489 static void tsc_khz_changed(void *data)
5490 {
5491         struct cpufreq_freqs *freq = data;
5492         unsigned long khz = 0;
5493
5494         if (data)
5495                 khz = freq->new;
5496         else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5497                 khz = cpufreq_quick_get(raw_smp_processor_id());
5498         if (!khz)
5499                 khz = tsc_khz;
5500         __this_cpu_write(cpu_tsc_khz, khz);
5501 }
5502
5503 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
5504                                      void *data)
5505 {
5506         struct cpufreq_freqs *freq = data;
5507         struct kvm *kvm;
5508         struct kvm_vcpu *vcpu;
5509         int i, send_ipi = 0;
5510
5511         /*
5512          * We allow guests to temporarily run on slowing clocks,
5513          * provided we notify them after, or to run on accelerating
5514          * clocks, provided we notify them before.  Thus time never
5515          * goes backwards.
5516          *
5517          * However, we have a problem.  We can't atomically update
5518          * the frequency of a given CPU from this function; it is
5519          * merely a notifier, which can be called from any CPU.
5520          * Changing the TSC frequency at arbitrary points in time
5521          * requires a recomputation of local variables related to
5522          * the TSC for each VCPU.  We must flag these local variables
5523          * to be updated and be sure the update takes place with the
5524          * new frequency before any guests proceed.
5525          *
5526          * Unfortunately, the combination of hotplug CPU and frequency
5527          * change creates an intractable locking scenario; the order
5528          * of when these callouts happen is undefined with respect to
5529          * CPU hotplug, and they can race with each other.  As such,
5530          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
5531          * undefined; you can actually have a CPU frequency change take
5532          * place in between the computation of X and the setting of the
5533          * variable.  To protect against this problem, all updates of
5534          * the per_cpu tsc_khz variable are done in an interrupt
5535          * protected IPI, and all callers wishing to update the value
5536          * must wait for a synchronous IPI to complete (which is trivial
5537          * if the caller is on the CPU already).  This establishes the
5538          * necessary total order on variable updates.
5539          *
5540          * Note that because a guest time update may take place
5541          * anytime after the setting of the VCPU's request bit, the
5542          * correct TSC value must be set before the request.  However,
5543          * to ensure the update actually makes it to any guest which
5544          * starts running in hardware virtualization between the set
5545          * and the acquisition of the spinlock, we must also ping the
5546          * CPU after setting the request bit.
5547          *
5548          */
5549
5550         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
5551                 return 0;
5552         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
5553                 return 0;
5554
5555         smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5556
5557         spin_lock(&kvm_lock);
5558         list_for_each_entry(kvm, &vm_list, vm_list) {
5559                 kvm_for_each_vcpu(i, vcpu, kvm) {
5560                         if (vcpu->cpu != freq->cpu)
5561                                 continue;
5562                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5563                         if (vcpu->cpu != smp_processor_id())
5564                                 send_ipi = 1;
5565                 }
5566         }
5567         spin_unlock(&kvm_lock);
5568
5569         if (freq->old < freq->new && send_ipi) {
5570                 /*
5571                  * We upscale the frequency.  Must make the guest
5572                  * doesn't see old kvmclock values while running with
5573                  * the new frequency, otherwise we risk the guest sees
5574                  * time go backwards.
5575                  *
5576                  * In case we update the frequency for another cpu
5577                  * (which might be in guest context) send an interrupt
5578                  * to kick the cpu out of guest context.  Next time
5579                  * guest context is entered kvmclock will be updated,
5580                  * so the guest will not see stale values.
5581                  */
5582                 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5583         }
5584         return 0;
5585 }
5586
5587 static struct notifier_block kvmclock_cpufreq_notifier_block = {
5588         .notifier_call  = kvmclock_cpufreq_notifier
5589 };
5590
5591 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
5592                                         unsigned long action, void *hcpu)
5593 {
5594         unsigned int cpu = (unsigned long)hcpu;
5595
5596         switch (action) {
5597                 case CPU_ONLINE:
5598                 case CPU_DOWN_FAILED:
5599                         smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5600                         break;
5601                 case CPU_DOWN_PREPARE:
5602                         smp_call_function_single(cpu, tsc_bad, NULL, 1);
5603                         break;
5604         }
5605         return NOTIFY_OK;
5606 }
5607
5608 static struct notifier_block kvmclock_cpu_notifier_block = {
5609         .notifier_call  = kvmclock_cpu_notifier,
5610         .priority = -INT_MAX
5611 };
5612
5613 static void kvm_timer_init(void)
5614 {
5615         int cpu;
5616
5617         max_tsc_khz = tsc_khz;
5618
5619         cpu_notifier_register_begin();
5620         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5621 #ifdef CONFIG_CPU_FREQ
5622                 struct cpufreq_policy policy;
5623                 memset(&policy, 0, sizeof(policy));
5624                 cpu = get_cpu();
5625                 cpufreq_get_policy(&policy, cpu);
5626                 if (policy.cpuinfo.max_freq)
5627                         max_tsc_khz = policy.cpuinfo.max_freq;
5628                 put_cpu();
5629 #endif
5630                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
5631                                           CPUFREQ_TRANSITION_NOTIFIER);
5632         }
5633         pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5634         for_each_online_cpu(cpu)
5635                 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
5636
5637         __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5638         cpu_notifier_register_done();
5639
5640 }
5641
5642 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
5643
5644 int kvm_is_in_guest(void)
5645 {
5646         return __this_cpu_read(current_vcpu) != NULL;
5647 }
5648
5649 static int kvm_is_user_mode(void)
5650 {
5651         int user_mode = 3;
5652
5653         if (__this_cpu_read(current_vcpu))
5654                 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
5655
5656         return user_mode != 0;
5657 }
5658
5659 static unsigned long kvm_get_guest_ip(void)
5660 {
5661         unsigned long ip = 0;
5662
5663         if (__this_cpu_read(current_vcpu))
5664                 ip = kvm_rip_read(__this_cpu_read(current_vcpu));
5665
5666         return ip;
5667 }
5668
5669 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5670         .is_in_guest            = kvm_is_in_guest,
5671         .is_user_mode           = kvm_is_user_mode,
5672         .get_guest_ip           = kvm_get_guest_ip,
5673 };
5674
5675 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
5676 {
5677         __this_cpu_write(current_vcpu, vcpu);
5678 }
5679 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
5680
5681 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
5682 {
5683         __this_cpu_write(current_vcpu, NULL);
5684 }
5685 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
5686
5687 static void kvm_set_mmio_spte_mask(void)
5688 {
5689         u64 mask;
5690         int maxphyaddr = boot_cpu_data.x86_phys_bits;
5691
5692         /*
5693          * Set the reserved bits and the present bit of an paging-structure
5694          * entry to generate page fault with PFER.RSV = 1.
5695          */
5696          /* Mask the reserved physical address bits. */
5697         mask = rsvd_bits(maxphyaddr, 51);
5698
5699         /* Bit 62 is always reserved for 32bit host. */
5700         mask |= 0x3ull << 62;
5701
5702         /* Set the present bit. */
5703         mask |= 1ull;
5704
5705 #ifdef CONFIG_X86_64
5706         /*
5707          * If reserved bit is not supported, clear the present bit to disable
5708          * mmio page fault.
5709          */
5710         if (maxphyaddr == 52)
5711                 mask &= ~1ull;
5712 #endif
5713
5714         kvm_mmu_set_mmio_spte_mask(mask);
5715 }
5716
5717 #ifdef CONFIG_X86_64
5718 static void pvclock_gtod_update_fn(struct work_struct *work)
5719 {
5720         struct kvm *kvm;
5721
5722         struct kvm_vcpu *vcpu;
5723         int i;
5724
5725         spin_lock(&kvm_lock);
5726         list_for_each_entry(kvm, &vm_list, vm_list)
5727                 kvm_for_each_vcpu(i, vcpu, kvm)
5728                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
5729         atomic_set(&kvm_guest_has_master_clock, 0);
5730         spin_unlock(&kvm_lock);
5731 }
5732
5733 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
5734
5735 /*
5736  * Notification about pvclock gtod data update.
5737  */
5738 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
5739                                void *priv)
5740 {
5741         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
5742         struct timekeeper *tk = priv;
5743
5744         update_pvclock_gtod(tk);
5745
5746         /* disable master clock if host does not trust, or does not
5747          * use, TSC clocksource
5748          */
5749         if (gtod->clock.vclock_mode != VCLOCK_TSC &&
5750             atomic_read(&kvm_guest_has_master_clock) != 0)
5751                 queue_work(system_long_wq, &pvclock_gtod_work);
5752
5753         return 0;
5754 }
5755
5756 static struct notifier_block pvclock_gtod_notifier = {
5757         .notifier_call = pvclock_gtod_notify,
5758 };
5759 #endif
5760
5761 int kvm_arch_init(void *opaque)
5762 {
5763         int r;
5764         struct kvm_x86_ops *ops = opaque;
5765
5766         if (kvm_x86_ops) {
5767                 printk(KERN_ERR "kvm: already loaded the other module\n");
5768                 r = -EEXIST;
5769                 goto out;
5770         }
5771
5772         if (!ops->cpu_has_kvm_support()) {
5773                 printk(KERN_ERR "kvm: no hardware support\n");
5774                 r = -EOPNOTSUPP;
5775                 goto out;
5776         }
5777         if (ops->disabled_by_bios()) {
5778                 printk(KERN_ERR "kvm: disabled by bios\n");
5779                 r = -EOPNOTSUPP;
5780                 goto out;
5781         }
5782
5783         r = -ENOMEM;
5784         shared_msrs = alloc_percpu(struct kvm_shared_msrs);
5785         if (!shared_msrs) {
5786                 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
5787                 goto out;
5788         }
5789
5790         r = kvm_mmu_module_init();
5791         if (r)
5792                 goto out_free_percpu;
5793
5794         kvm_set_mmio_spte_mask();
5795
5796         kvm_x86_ops = ops;
5797         kvm_init_msr_list();
5798
5799         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
5800                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
5801
5802         kvm_timer_init();
5803
5804         perf_register_guest_info_callbacks(&kvm_guest_cbs);
5805
5806         if (cpu_has_xsave)
5807                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
5808
5809         kvm_lapic_init();
5810 #ifdef CONFIG_X86_64
5811         pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
5812 #endif
5813
5814         return 0;
5815
5816 out_free_percpu:
5817         free_percpu(shared_msrs);
5818 out:
5819         return r;
5820 }
5821
5822 void kvm_arch_exit(void)
5823 {
5824         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5825
5826         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
5827                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
5828                                             CPUFREQ_TRANSITION_NOTIFIER);
5829         unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
5830 #ifdef CONFIG_X86_64
5831         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
5832 #endif
5833         kvm_x86_ops = NULL;
5834         kvm_mmu_module_exit();
5835         free_percpu(shared_msrs);
5836 }
5837
5838 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
5839 {
5840         ++vcpu->stat.halt_exits;
5841         if (irqchip_in_kernel(vcpu->kvm)) {
5842                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
5843                 return 1;
5844         } else {
5845                 vcpu->run->exit_reason = KVM_EXIT_HLT;
5846                 return 0;
5847         }
5848 }
5849 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
5850
5851 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
5852 {
5853         u64 param, ingpa, outgpa, ret;
5854         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
5855         bool fast, longmode;
5856
5857         /*
5858          * hypercall generates UD from non zero cpl and real mode
5859          * per HYPER-V spec
5860          */
5861         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
5862                 kvm_queue_exception(vcpu, UD_VECTOR);
5863                 return 0;
5864         }
5865
5866         longmode = is_64_bit_mode(vcpu);
5867
5868         if (!longmode) {
5869                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
5870                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
5871                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
5872                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
5873                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
5874                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
5875         }
5876 #ifdef CONFIG_X86_64
5877         else {
5878                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
5879                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
5880                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
5881         }
5882 #endif
5883
5884         code = param & 0xffff;
5885         fast = (param >> 16) & 0x1;
5886         rep_cnt = (param >> 32) & 0xfff;
5887         rep_idx = (param >> 48) & 0xfff;
5888
5889         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
5890
5891         switch (code) {
5892         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
5893                 kvm_vcpu_on_spin(vcpu);
5894                 break;
5895         default:
5896                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
5897                 break;
5898         }
5899
5900         ret = res | (((u64)rep_done & 0xfff) << 32);
5901         if (longmode) {
5902                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5903         } else {
5904                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
5905                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
5906         }
5907
5908         return 1;
5909 }
5910
5911 /*
5912  * kvm_pv_kick_cpu_op:  Kick a vcpu.
5913  *
5914  * @apicid - apicid of vcpu to be kicked.
5915  */
5916 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
5917 {
5918         struct kvm_lapic_irq lapic_irq;
5919
5920         lapic_irq.shorthand = 0;
5921         lapic_irq.dest_mode = 0;
5922         lapic_irq.dest_id = apicid;
5923
5924         lapic_irq.delivery_mode = APIC_DM_REMRD;
5925         kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
5926 }
5927
5928 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5929 {
5930         unsigned long nr, a0, a1, a2, a3, ret;
5931         int op_64_bit, r = 1;
5932
5933         if (kvm_hv_hypercall_enabled(vcpu->kvm))
5934                 return kvm_hv_hypercall(vcpu);
5935
5936         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
5937         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
5938         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
5939         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
5940         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
5941
5942         trace_kvm_hypercall(nr, a0, a1, a2, a3);
5943
5944         op_64_bit = is_64_bit_mode(vcpu);
5945         if (!op_64_bit) {
5946                 nr &= 0xFFFFFFFF;
5947                 a0 &= 0xFFFFFFFF;
5948                 a1 &= 0xFFFFFFFF;
5949                 a2 &= 0xFFFFFFFF;
5950                 a3 &= 0xFFFFFFFF;
5951         }
5952
5953         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5954                 ret = -KVM_EPERM;
5955                 goto out;
5956         }
5957
5958         switch (nr) {
5959         case KVM_HC_VAPIC_POLL_IRQ:
5960                 ret = 0;
5961                 break;
5962         case KVM_HC_KICK_CPU:
5963                 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
5964                 ret = 0;
5965                 break;
5966         default:
5967                 ret = -KVM_ENOSYS;
5968                 break;
5969         }
5970 out:
5971         if (!op_64_bit)
5972                 ret = (u32)ret;
5973         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5974         ++vcpu->stat.hypercalls;
5975         return r;
5976 }
5977 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5978
5979 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5980 {
5981         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5982         char instruction[3];
5983         unsigned long rip = kvm_rip_read(vcpu);
5984
5985         kvm_x86_ops->patch_hypercall(vcpu, instruction);
5986
5987         return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5988 }
5989
5990 /*
5991  * Check if userspace requested an interrupt window, and that the
5992  * interrupt window is open.
5993  *
5994  * No need to exit to userspace if we already have an interrupt queued.
5995  */
5996 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5997 {
5998         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
5999                 vcpu->run->request_interrupt_window &&
6000                 kvm_arch_interrupt_allowed(vcpu));
6001 }
6002
6003 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
6004 {
6005         struct kvm_run *kvm_run = vcpu->run;
6006
6007         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
6008         kvm_run->cr8 = kvm_get_cr8(vcpu);
6009         kvm_run->apic_base = kvm_get_apic_base(vcpu);
6010         if (irqchip_in_kernel(vcpu->kvm))
6011                 kvm_run->ready_for_interrupt_injection = 1;
6012         else
6013                 kvm_run->ready_for_interrupt_injection =
6014                         kvm_arch_interrupt_allowed(vcpu) &&
6015                         !kvm_cpu_has_interrupt(vcpu) &&
6016                         !kvm_event_needs_reinjection(vcpu);
6017 }
6018
6019 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
6020 {
6021         int max_irr, tpr;
6022
6023         if (!kvm_x86_ops->update_cr8_intercept)
6024                 return;
6025
6026         if (!vcpu->arch.apic)
6027                 return;
6028
6029         if (!vcpu->arch.apic->vapic_addr)
6030                 max_irr = kvm_lapic_find_highest_irr(vcpu);
6031         else
6032                 max_irr = -1;
6033
6034         if (max_irr != -1)
6035                 max_irr >>= 4;
6036
6037         tpr = kvm_lapic_get_cr8(vcpu);
6038
6039         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
6040 }
6041
6042 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6043 {
6044         int r;
6045
6046         /* try to reinject previous events if any */
6047         if (vcpu->arch.exception.pending) {
6048                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
6049                                         vcpu->arch.exception.has_error_code,
6050                                         vcpu->arch.exception.error_code);
6051
6052                 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
6053                         __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
6054                                              X86_EFLAGS_RF);
6055
6056                 if (vcpu->arch.exception.nr == DB_VECTOR &&
6057                     (vcpu->arch.dr7 & DR7_GD)) {
6058                         vcpu->arch.dr7 &= ~DR7_GD;
6059                         kvm_update_dr7(vcpu);
6060                 }
6061
6062                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
6063                                           vcpu->arch.exception.has_error_code,
6064                                           vcpu->arch.exception.error_code,
6065                                           vcpu->arch.exception.reinject);
6066                 return 0;
6067         }
6068
6069         if (vcpu->arch.nmi_injected) {
6070                 kvm_x86_ops->set_nmi(vcpu);
6071                 return 0;
6072         }
6073
6074         if (vcpu->arch.interrupt.pending) {
6075                 kvm_x86_ops->set_irq(vcpu);
6076                 return 0;
6077         }
6078
6079         if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
6080                 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
6081                 if (r != 0)
6082                         return r;
6083         }
6084
6085         /* try to inject new event if pending */
6086         if (vcpu->arch.nmi_pending) {
6087                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
6088                         --vcpu->arch.nmi_pending;
6089                         vcpu->arch.nmi_injected = true;
6090                         kvm_x86_ops->set_nmi(vcpu);
6091                 }
6092         } else if (kvm_cpu_has_injectable_intr(vcpu)) {
6093                 /*
6094                  * Because interrupts can be injected asynchronously, we are
6095                  * calling check_nested_events again here to avoid a race condition.
6096                  * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
6097                  * proposal and current concerns.  Perhaps we should be setting
6098                  * KVM_REQ_EVENT only on certain events and not unconditionally?
6099                  */
6100                 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
6101                         r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
6102                         if (r != 0)
6103                                 return r;
6104                 }
6105                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
6106                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
6107                                             false);
6108                         kvm_x86_ops->set_irq(vcpu);
6109                 }
6110         }
6111         return 0;
6112 }
6113
6114 static void process_nmi(struct kvm_vcpu *vcpu)
6115 {
6116         unsigned limit = 2;
6117
6118         /*
6119          * x86 is limited to one NMI running, and one NMI pending after it.
6120          * If an NMI is already in progress, limit further NMIs to just one.
6121          * Otherwise, allow two (and we'll inject the first one immediately).
6122          */
6123         if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
6124                 limit = 1;
6125
6126         vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
6127         vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
6128         kvm_make_request(KVM_REQ_EVENT, vcpu);
6129 }
6130
6131 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
6132 {
6133         u64 eoi_exit_bitmap[4];
6134         u32 tmr[8];
6135
6136         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
6137                 return;
6138
6139         memset(eoi_exit_bitmap, 0, 32);
6140         memset(tmr, 0, 32);
6141
6142         kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
6143         kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
6144         kvm_apic_update_tmr(vcpu, tmr);
6145 }
6146
6147 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6148 {
6149         ++vcpu->stat.tlb_flush;
6150         kvm_x86_ops->tlb_flush(vcpu);
6151 }
6152
6153 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6154 {
6155         struct page *page = NULL;
6156
6157         if (!irqchip_in_kernel(vcpu->kvm))
6158                 return;
6159
6160         if (!kvm_x86_ops->set_apic_access_page_addr)
6161                 return;
6162
6163         page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6164         kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
6165
6166         /*
6167          * Do not pin apic access page in memory, the MMU notifier
6168          * will call us again if it is migrated or swapped out.
6169          */
6170         put_page(page);
6171 }
6172 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
6173
6174 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6175                                            unsigned long address)
6176 {
6177         /*
6178          * The physical address of apic access page is stored in the VMCS.
6179          * Update it when it becomes invalid.
6180          */
6181         if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
6182                 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6183 }
6184
6185 /*
6186  * Returns 1 to let __vcpu_run() continue the guest execution loop without
6187  * exiting to the userspace.  Otherwise, the value will be returned to the
6188  * userspace.
6189  */
6190 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6191 {
6192         int r;
6193         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
6194                 vcpu->run->request_interrupt_window;
6195         bool req_immediate_exit = false;
6196
6197         if (vcpu->requests) {
6198                 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
6199                         kvm_mmu_unload(vcpu);
6200                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
6201                         __kvm_migrate_timers(vcpu);
6202                 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
6203                         kvm_gen_update_masterclock(vcpu->kvm);
6204                 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
6205                         kvm_gen_kvmclock_update(vcpu);
6206                 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
6207                         r = kvm_guest_time_update(vcpu);
6208                         if (unlikely(r))
6209                                 goto out;
6210                 }
6211                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
6212                         kvm_mmu_sync_roots(vcpu);
6213                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
6214                         kvm_vcpu_flush_tlb(vcpu);
6215                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
6216                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
6217                         r = 0;
6218                         goto out;
6219                 }
6220                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
6221                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
6222                         r = 0;
6223                         goto out;
6224                 }
6225                 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
6226                         vcpu->fpu_active = 0;
6227                         kvm_x86_ops->fpu_deactivate(vcpu);
6228                 }
6229                 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
6230                         /* Page is swapped out. Do synthetic halt */
6231                         vcpu->arch.apf.halted = true;
6232                         r = 1;
6233                         goto out;
6234                 }
6235                 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
6236                         record_steal_time(vcpu);
6237                 if (kvm_check_request(KVM_REQ_NMI, vcpu))
6238                         process_nmi(vcpu);
6239                 if (kvm_check_request(KVM_REQ_PMU, vcpu))
6240                         kvm_handle_pmu_event(vcpu);
6241                 if (kvm_check_request(KVM_REQ_PMI, vcpu))
6242                         kvm_deliver_pmi(vcpu);
6243                 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
6244                         vcpu_scan_ioapic(vcpu);
6245                 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
6246                         kvm_vcpu_reload_apic_access_page(vcpu);
6247         }
6248
6249         if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
6250                 kvm_apic_accept_events(vcpu);
6251                 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
6252                         r = 1;
6253                         goto out;
6254                 }
6255
6256                 if (inject_pending_event(vcpu, req_int_win) != 0)
6257                         req_immediate_exit = true;
6258                 /* enable NMI/IRQ window open exits if needed */
6259                 else if (vcpu->arch.nmi_pending)
6260                         kvm_x86_ops->enable_nmi_window(vcpu);
6261                 else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
6262                         kvm_x86_ops->enable_irq_window(vcpu);
6263
6264                 if (kvm_lapic_enabled(vcpu)) {
6265                         /*
6266                          * Update architecture specific hints for APIC
6267                          * virtual interrupt delivery.
6268                          */
6269                         if (kvm_x86_ops->hwapic_irr_update)
6270                                 kvm_x86_ops->hwapic_irr_update(vcpu,
6271                                         kvm_lapic_find_highest_irr(vcpu));
6272                         update_cr8_intercept(vcpu);
6273                         kvm_lapic_sync_to_vapic(vcpu);
6274                 }
6275         }
6276
6277         r = kvm_mmu_reload(vcpu);
6278         if (unlikely(r)) {
6279                 goto cancel_injection;
6280         }
6281
6282         preempt_disable();
6283
6284         kvm_x86_ops->prepare_guest_switch(vcpu);
6285         if (vcpu->fpu_active)
6286                 kvm_load_guest_fpu(vcpu);
6287         kvm_load_guest_xcr0(vcpu);
6288
6289         vcpu->mode = IN_GUEST_MODE;
6290
6291         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6292
6293         /* We should set ->mode before check ->requests,
6294          * see the comment in make_all_cpus_request.
6295          */
6296         smp_mb__after_srcu_read_unlock();
6297
6298         local_irq_disable();
6299
6300         if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
6301             || need_resched() || signal_pending(current)) {
6302                 vcpu->mode = OUTSIDE_GUEST_MODE;
6303                 smp_wmb();
6304                 local_irq_enable();
6305                 preempt_enable();
6306                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6307                 r = 1;
6308                 goto cancel_injection;
6309         }
6310
6311         if (req_immediate_exit)
6312                 smp_send_reschedule(vcpu->cpu);
6313
6314         kvm_guest_enter();
6315
6316         if (unlikely(vcpu->arch.switch_db_regs)) {
6317                 set_debugreg(0, 7);
6318                 set_debugreg(vcpu->arch.eff_db[0], 0);
6319                 set_debugreg(vcpu->arch.eff_db[1], 1);
6320                 set_debugreg(vcpu->arch.eff_db[2], 2);
6321                 set_debugreg(vcpu->arch.eff_db[3], 3);
6322                 set_debugreg(vcpu->arch.dr6, 6);
6323         }
6324
6325         trace_kvm_entry(vcpu->vcpu_id);
6326         wait_lapic_expire(vcpu);
6327         kvm_x86_ops->run(vcpu);
6328
6329         /*
6330          * Do this here before restoring debug registers on the host.  And
6331          * since we do this before handling the vmexit, a DR access vmexit
6332          * can (a) read the correct value of the debug registers, (b) set
6333          * KVM_DEBUGREG_WONT_EXIT again.
6334          */
6335         if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
6336                 int i;
6337
6338                 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
6339                 kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6340                 for (i = 0; i < KVM_NR_DB_REGS; i++)
6341                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6342         }
6343
6344         /*
6345          * If the guest has used debug registers, at least dr7
6346          * will be disabled while returning to the host.
6347          * If we don't have active breakpoints in the host, we don't
6348          * care about the messed up debug address registers. But if
6349          * we have some of them active, restore the old state.
6350          */
6351         if (hw_breakpoint_active())
6352                 hw_breakpoint_restore();
6353
6354         vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
6355                                                            native_read_tsc());
6356
6357         vcpu->mode = OUTSIDE_GUEST_MODE;
6358         smp_wmb();
6359
6360         /* Interrupt is enabled by handle_external_intr() */
6361         kvm_x86_ops->handle_external_intr(vcpu);
6362
6363         ++vcpu->stat.exits;
6364
6365         /*
6366          * We must have an instruction between local_irq_enable() and
6367          * kvm_guest_exit(), so the timer interrupt isn't delayed by
6368          * the interrupt shadow.  The stat.exits increment will do nicely.
6369          * But we need to prevent reordering, hence this barrier():
6370          */
6371         barrier();
6372
6373         kvm_guest_exit();
6374
6375         preempt_enable();
6376
6377         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6378
6379         /*
6380          * Profile KVM exit RIPs:
6381          */
6382         if (unlikely(prof_on == KVM_PROFILING)) {
6383                 unsigned long rip = kvm_rip_read(vcpu);
6384                 profile_hit(KVM_PROFILING, (void *)rip);
6385         }
6386
6387         if (unlikely(vcpu->arch.tsc_always_catchup))
6388                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6389
6390         if (vcpu->arch.apic_attention)
6391                 kvm_lapic_sync_from_vapic(vcpu);
6392
6393         r = kvm_x86_ops->handle_exit(vcpu);
6394         return r;
6395
6396 cancel_injection:
6397         kvm_x86_ops->cancel_injection(vcpu);
6398         if (unlikely(vcpu->arch.apic_attention))
6399                 kvm_lapic_sync_from_vapic(vcpu);
6400 out:
6401         return r;
6402 }
6403
6404
6405 static int __vcpu_run(struct kvm_vcpu *vcpu)
6406 {
6407         int r;
6408         struct kvm *kvm = vcpu->kvm;
6409
6410         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6411
6412         r = 1;
6413         while (r > 0) {
6414                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6415                     !vcpu->arch.apf.halted)
6416                         r = vcpu_enter_guest(vcpu);
6417                 else {
6418                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6419                         kvm_vcpu_block(vcpu);
6420                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6421                         if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
6422                                 kvm_apic_accept_events(vcpu);
6423                                 switch(vcpu->arch.mp_state) {
6424                                 case KVM_MP_STATE_HALTED:
6425                                         vcpu->arch.pv.pv_unhalted = false;
6426                                         vcpu->arch.mp_state =
6427                                                 KVM_MP_STATE_RUNNABLE;
6428                                 case KVM_MP_STATE_RUNNABLE:
6429                                         vcpu->arch.apf.halted = false;
6430                                         break;
6431                                 case KVM_MP_STATE_INIT_RECEIVED:
6432                                         break;
6433                                 default:
6434                                         r = -EINTR;
6435                                         break;
6436                                 }
6437                         }
6438                 }
6439
6440                 if (r <= 0)
6441                         break;
6442
6443                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
6444                 if (kvm_cpu_has_pending_timer(vcpu))
6445                         kvm_inject_pending_timer_irqs(vcpu);
6446
6447                 if (dm_request_for_irq_injection(vcpu)) {
6448                         r = -EINTR;
6449                         vcpu->run->exit_reason = KVM_EXIT_INTR;
6450                         ++vcpu->stat.request_irq_exits;
6451                 }
6452
6453                 kvm_check_async_pf_completion(vcpu);
6454
6455                 if (signal_pending(current)) {
6456                         r = -EINTR;
6457                         vcpu->run->exit_reason = KVM_EXIT_INTR;
6458                         ++vcpu->stat.signal_exits;
6459                 }
6460                 if (need_resched()) {
6461                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6462                         cond_resched();
6463                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6464                 }
6465         }
6466
6467         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6468
6469         return r;
6470 }
6471
6472 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
6473 {
6474         int r;
6475         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6476         r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
6477         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
6478         if (r != EMULATE_DONE)
6479                 return 0;
6480         return 1;
6481 }
6482
6483 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
6484 {
6485         BUG_ON(!vcpu->arch.pio.count);
6486
6487         return complete_emulated_io(vcpu);
6488 }
6489
6490 /*
6491  * Implements the following, as a state machine:
6492  *
6493  * read:
6494  *   for each fragment
6495  *     for each mmio piece in the fragment
6496  *       write gpa, len
6497  *       exit
6498  *       copy data
6499  *   execute insn
6500  *
6501  * write:
6502  *   for each fragment
6503  *     for each mmio piece in the fragment
6504  *       write gpa, len
6505  *       copy data
6506  *       exit
6507  */
6508 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
6509 {
6510         struct kvm_run *run = vcpu->run;
6511         struct kvm_mmio_fragment *frag;
6512         unsigned len;
6513
6514         BUG_ON(!vcpu->mmio_needed);
6515
6516         /* Complete previous fragment */
6517         frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
6518         len = min(8u, frag->len);
6519         if (!vcpu->mmio_is_write)
6520                 memcpy(frag->data, run->mmio.data, len);
6521
6522         if (frag->len <= 8) {
6523                 /* Switch to the next fragment. */
6524                 frag++;
6525                 vcpu->mmio_cur_fragment++;
6526         } else {
6527                 /* Go forward to the next mmio piece. */
6528                 frag->data += len;
6529                 frag->gpa += len;
6530                 frag->len -= len;
6531         }
6532
6533         if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
6534                 vcpu->mmio_needed = 0;
6535
6536                 /* FIXME: return into emulator if single-stepping.  */
6537                 if (vcpu->mmio_is_write)
6538                         return 1;
6539                 vcpu->mmio_read_completed = 1;
6540                 return complete_emulated_io(vcpu);
6541         }
6542
6543         run->exit_reason = KVM_EXIT_MMIO;
6544         run->mmio.phys_addr = frag->gpa;
6545         if (vcpu->mmio_is_write)
6546                 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
6547         run->mmio.len = min(8u, frag->len);
6548         run->mmio.is_write = vcpu->mmio_is_write;
6549         vcpu->arch.complete_userspace_io = complete_emulated_mmio;
6550         return 0;
6551 }
6552
6553
6554 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6555 {
6556         int r;
6557         sigset_t sigsaved;
6558
6559         if (!tsk_used_math(current) && init_fpu(current))
6560                 return -ENOMEM;
6561
6562         if (vcpu->sigset_active)
6563                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
6564
6565         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
6566                 kvm_vcpu_block(vcpu);
6567                 kvm_apic_accept_events(vcpu);
6568                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
6569                 r = -EAGAIN;
6570                 goto out;
6571         }
6572
6573         /* re-sync apic's tpr */
6574         if (!irqchip_in_kernel(vcpu->kvm)) {
6575                 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
6576                         r = -EINVAL;
6577                         goto out;
6578                 }
6579         }
6580
6581         if (unlikely(vcpu->arch.complete_userspace_io)) {
6582                 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
6583                 vcpu->arch.complete_userspace_io = NULL;
6584                 r = cui(vcpu);
6585                 if (r <= 0)
6586                         goto out;
6587         } else
6588                 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
6589
6590         r = __vcpu_run(vcpu);
6591
6592 out:
6593         post_kvm_run_save(vcpu);
6594         if (vcpu->sigset_active)
6595                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
6596
6597         return r;
6598 }
6599
6600 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6601 {
6602         if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
6603                 /*
6604                  * We are here if userspace calls get_regs() in the middle of
6605                  * instruction emulation. Registers state needs to be copied
6606                  * back from emulation context to vcpu. Userspace shouldn't do
6607                  * that usually, but some bad designed PV devices (vmware
6608                  * backdoor interface) need this to work
6609                  */
6610                 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
6611                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6612         }
6613         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
6614         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
6615         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
6616         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
6617         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
6618         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
6619         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
6620         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
6621 #ifdef CONFIG_X86_64
6622         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
6623         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
6624         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
6625         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
6626         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
6627         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
6628         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
6629         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
6630 #endif
6631
6632         regs->rip = kvm_rip_read(vcpu);
6633         regs->rflags = kvm_get_rflags(vcpu);
6634
6635         return 0;
6636 }
6637
6638 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
6639 {
6640         vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
6641         vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6642
6643         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
6644         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
6645         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
6646         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
6647         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
6648         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
6649         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
6650         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
6651 #ifdef CONFIG_X86_64
6652         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
6653         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
6654         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
6655         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
6656         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
6657         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
6658         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
6659         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
6660 #endif
6661
6662         kvm_rip_write(vcpu, regs->rip);
6663         kvm_set_rflags(vcpu, regs->rflags);
6664
6665         vcpu->arch.exception.pending = false;
6666
6667         kvm_make_request(KVM_REQ_EVENT, vcpu);
6668
6669         return 0;
6670 }
6671
6672 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
6673 {
6674         struct kvm_segment cs;
6675
6676         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
6677         *db = cs.db;
6678         *l = cs.l;
6679 }
6680 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
6681
6682 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
6683                                   struct kvm_sregs *sregs)
6684 {
6685         struct desc_ptr dt;
6686
6687         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6688         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6689         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6690         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6691         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6692         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6693
6694         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6695         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6696
6697         kvm_x86_ops->get_idt(vcpu, &dt);
6698         sregs->idt.limit = dt.size;
6699         sregs->idt.base = dt.address;
6700         kvm_x86_ops->get_gdt(vcpu, &dt);
6701         sregs->gdt.limit = dt.size;
6702         sregs->gdt.base = dt.address;
6703
6704         sregs->cr0 = kvm_read_cr0(vcpu);
6705         sregs->cr2 = vcpu->arch.cr2;
6706         sregs->cr3 = kvm_read_cr3(vcpu);
6707         sregs->cr4 = kvm_read_cr4(vcpu);
6708         sregs->cr8 = kvm_get_cr8(vcpu);
6709         sregs->efer = vcpu->arch.efer;
6710         sregs->apic_base = kvm_get_apic_base(vcpu);
6711
6712         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
6713
6714         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
6715                 set_bit(vcpu->arch.interrupt.nr,
6716                         (unsigned long *)sregs->interrupt_bitmap);
6717
6718         return 0;
6719 }
6720
6721 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6722                                     struct kvm_mp_state *mp_state)
6723 {
6724         kvm_apic_accept_events(vcpu);
6725         if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
6726                                         vcpu->arch.pv.pv_unhalted)
6727                 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
6728         else
6729                 mp_state->mp_state = vcpu->arch.mp_state;
6730
6731         return 0;
6732 }
6733
6734 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
6735                                     struct kvm_mp_state *mp_state)
6736 {
6737         if (!kvm_vcpu_has_lapic(vcpu) &&
6738             mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
6739                 return -EINVAL;
6740
6741         if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
6742                 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
6743                 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
6744         } else
6745                 vcpu->arch.mp_state = mp_state->mp_state;
6746         kvm_make_request(KVM_REQ_EVENT, vcpu);
6747         return 0;
6748 }
6749
6750 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
6751                     int reason, bool has_error_code, u32 error_code)
6752 {
6753         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6754         int ret;
6755
6756         init_emulate_ctxt(vcpu);
6757
6758         ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
6759                                    has_error_code, error_code);
6760
6761         if (ret)
6762                 return EMULATE_FAIL;
6763
6764         kvm_rip_write(vcpu, ctxt->eip);
6765         kvm_set_rflags(vcpu, ctxt->eflags);
6766         kvm_make_request(KVM_REQ_EVENT, vcpu);
6767         return EMULATE_DONE;
6768 }
6769 EXPORT_SYMBOL_GPL(kvm_task_switch);
6770
6771 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
6772                                   struct kvm_sregs *sregs)
6773 {
6774         struct msr_data apic_base_msr;
6775         int mmu_reset_needed = 0;
6776         int pending_vec, max_bits, idx;
6777         struct desc_ptr dt;
6778
6779         if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
6780                 return -EINVAL;
6781
6782         dt.size = sregs->idt.limit;
6783         dt.address = sregs->idt.base;
6784         kvm_x86_ops->set_idt(vcpu, &dt);
6785         dt.size = sregs->gdt.limit;
6786         dt.address = sregs->gdt.base;
6787         kvm_x86_ops->set_gdt(vcpu, &dt);
6788
6789         vcpu->arch.cr2 = sregs->cr2;
6790         mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
6791         vcpu->arch.cr3 = sregs->cr3;
6792         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
6793
6794         kvm_set_cr8(vcpu, sregs->cr8);
6795
6796         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
6797         kvm_x86_ops->set_efer(vcpu, sregs->efer);
6798         apic_base_msr.data = sregs->apic_base;
6799         apic_base_msr.host_initiated = true;
6800         kvm_set_apic_base(vcpu, &apic_base_msr);
6801
6802         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
6803         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
6804         vcpu->arch.cr0 = sregs->cr0;
6805
6806         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
6807         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
6808         if (sregs->cr4 & X86_CR4_OSXSAVE)
6809                 kvm_update_cpuid(vcpu);
6810
6811         idx = srcu_read_lock(&vcpu->kvm->srcu);
6812         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
6813                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
6814                 mmu_reset_needed = 1;
6815         }
6816         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6817
6818         if (mmu_reset_needed)
6819                 kvm_mmu_reset_context(vcpu);
6820
6821         max_bits = KVM_NR_INTERRUPTS;
6822         pending_vec = find_first_bit(
6823                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
6824         if (pending_vec < max_bits) {
6825                 kvm_queue_interrupt(vcpu, pending_vec, false);
6826                 pr_debug("Set back pending irq %d\n", pending_vec);
6827         }
6828
6829         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
6830         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
6831         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
6832         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
6833         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
6834         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
6835
6836         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
6837         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
6838
6839         update_cr8_intercept(vcpu);
6840
6841         /* Older userspace won't unhalt the vcpu on reset. */
6842         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
6843             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
6844             !is_protmode(vcpu))
6845                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6846
6847         kvm_make_request(KVM_REQ_EVENT, vcpu);
6848
6849         return 0;
6850 }
6851
6852 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
6853                                         struct kvm_guest_debug *dbg)
6854 {
6855         unsigned long rflags;
6856         int i, r;
6857
6858         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
6859                 r = -EBUSY;
6860                 if (vcpu->arch.exception.pending)
6861                         goto out;
6862                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
6863                         kvm_queue_exception(vcpu, DB_VECTOR);
6864                 else
6865                         kvm_queue_exception(vcpu, BP_VECTOR);
6866         }
6867
6868         /*
6869          * Read rflags as long as potentially injected trace flags are still
6870          * filtered out.
6871          */
6872         rflags = kvm_get_rflags(vcpu);
6873
6874         vcpu->guest_debug = dbg->control;
6875         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
6876                 vcpu->guest_debug = 0;
6877
6878         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
6879                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
6880                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
6881                 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
6882         } else {
6883                 for (i = 0; i < KVM_NR_DB_REGS; i++)
6884                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6885         }
6886         kvm_update_dr7(vcpu);
6887
6888         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6889                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
6890                         get_segment_base(vcpu, VCPU_SREG_CS);
6891
6892         /*
6893          * Trigger an rflags update that will inject or remove the trace
6894          * flags.
6895          */
6896         kvm_set_rflags(vcpu, rflags);
6897
6898         kvm_x86_ops->update_db_bp_intercept(vcpu);
6899
6900         r = 0;
6901
6902 out:
6903
6904         return r;
6905 }
6906
6907 /*
6908  * Translate a guest virtual address to a guest physical address.
6909  */
6910 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
6911                                     struct kvm_translation *tr)
6912 {
6913         unsigned long vaddr = tr->linear_address;
6914         gpa_t gpa;
6915         int idx;
6916
6917         idx = srcu_read_lock(&vcpu->kvm->srcu);
6918         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
6919         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6920         tr->physical_address = gpa;
6921         tr->valid = gpa != UNMAPPED_GVA;
6922         tr->writeable = 1;
6923         tr->usermode = 0;
6924
6925         return 0;
6926 }
6927
6928 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
6929 {
6930         struct i387_fxsave_struct *fxsave =
6931                         &vcpu->arch.guest_fpu.state->fxsave;
6932
6933         memcpy(fpu->fpr, fxsave->st_space, 128);
6934         fpu->fcw = fxsave->cwd;
6935         fpu->fsw = fxsave->swd;
6936         fpu->ftwx = fxsave->twd;
6937         fpu->last_opcode = fxsave->fop;
6938         fpu->last_ip = fxsave->rip;
6939         fpu->last_dp = fxsave->rdp;
6940         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
6941
6942         return 0;
6943 }
6944
6945 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
6946 {
6947         struct i387_fxsave_struct *fxsave =
6948                         &vcpu->arch.guest_fpu.state->fxsave;
6949
6950         memcpy(fxsave->st_space, fpu->fpr, 128);
6951         fxsave->cwd = fpu->fcw;
6952         fxsave->swd = fpu->fsw;
6953         fxsave->twd = fpu->ftwx;
6954         fxsave->fop = fpu->last_opcode;
6955         fxsave->rip = fpu->last_ip;
6956         fxsave->rdp = fpu->last_dp;
6957         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
6958
6959         return 0;
6960 }
6961
6962 int fx_init(struct kvm_vcpu *vcpu)
6963 {
6964         int err;
6965
6966         err = fpu_alloc(&vcpu->arch.guest_fpu);
6967         if (err)
6968                 return err;
6969
6970         fpu_finit(&vcpu->arch.guest_fpu);
6971         if (cpu_has_xsaves)
6972                 vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv =
6973                         host_xcr0 | XSTATE_COMPACTION_ENABLED;
6974
6975         /*
6976          * Ensure guest xcr0 is valid for loading
6977          */
6978         vcpu->arch.xcr0 = XSTATE_FP;
6979
6980         vcpu->arch.cr0 |= X86_CR0_ET;
6981
6982         return 0;
6983 }
6984 EXPORT_SYMBOL_GPL(fx_init);
6985
6986 static void fx_free(struct kvm_vcpu *vcpu)
6987 {
6988         fpu_free(&vcpu->arch.guest_fpu);
6989 }
6990
6991 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
6992 {
6993         if (vcpu->guest_fpu_loaded)
6994                 return;
6995
6996         /*
6997          * Restore all possible states in the guest,
6998          * and assume host would use all available bits.
6999          * Guest xcr0 would be loaded later.
7000          */
7001         kvm_put_guest_xcr0(vcpu);
7002         vcpu->guest_fpu_loaded = 1;
7003         __kernel_fpu_begin();
7004         fpu_restore_checking(&vcpu->arch.guest_fpu);
7005         trace_kvm_fpu(1);
7006 }
7007
7008 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7009 {
7010         kvm_put_guest_xcr0(vcpu);
7011
7012         if (!vcpu->guest_fpu_loaded)
7013                 return;
7014
7015         vcpu->guest_fpu_loaded = 0;
7016         fpu_save_init(&vcpu->arch.guest_fpu);
7017         __kernel_fpu_end();
7018         ++vcpu->stat.fpu_reload;
7019         kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
7020         trace_kvm_fpu(0);
7021 }
7022
7023 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
7024 {
7025         kvmclock_reset(vcpu);
7026
7027         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
7028         fx_free(vcpu);
7029         kvm_x86_ops->vcpu_free(vcpu);
7030 }
7031
7032 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7033                                                 unsigned int id)
7034 {
7035         if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
7036                 printk_once(KERN_WARNING
7037                 "kvm: SMP vm created on host with unstable TSC; "
7038                 "guest TSC will not be reliable\n");
7039         return kvm_x86_ops->vcpu_create(kvm, id);
7040 }
7041
7042 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
7043 {
7044         int r;
7045
7046         vcpu->arch.mtrr_state.have_fixed = 1;
7047         r = vcpu_load(vcpu);
7048         if (r)
7049                 return r;
7050         kvm_vcpu_reset(vcpu);
7051         kvm_mmu_setup(vcpu);
7052         vcpu_put(vcpu);
7053
7054         return r;
7055 }
7056
7057 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7058 {
7059         int r;
7060         struct msr_data msr;
7061         struct kvm *kvm = vcpu->kvm;
7062
7063         r = vcpu_load(vcpu);
7064         if (r)
7065                 return r;
7066         msr.data = 0x0;
7067         msr.index = MSR_IA32_TSC;
7068         msr.host_initiated = true;
7069         kvm_write_tsc(vcpu, &msr);
7070         vcpu_put(vcpu);
7071
7072         schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
7073                                         KVMCLOCK_SYNC_PERIOD);
7074
7075         return r;
7076 }
7077
7078 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
7079 {
7080         int r;
7081         vcpu->arch.apf.msr_val = 0;
7082
7083         r = vcpu_load(vcpu);
7084         BUG_ON(r);
7085         kvm_mmu_unload(vcpu);
7086         vcpu_put(vcpu);
7087
7088         fx_free(vcpu);
7089         kvm_x86_ops->vcpu_free(vcpu);
7090 }
7091
7092 void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
7093 {
7094         atomic_set(&vcpu->arch.nmi_queued, 0);
7095         vcpu->arch.nmi_pending = 0;
7096         vcpu->arch.nmi_injected = false;
7097         kvm_clear_interrupt_queue(vcpu);
7098         kvm_clear_exception_queue(vcpu);
7099
7100         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
7101         vcpu->arch.dr6 = DR6_INIT;
7102         kvm_update_dr6(vcpu);
7103         vcpu->arch.dr7 = DR7_FIXED_1;
7104         kvm_update_dr7(vcpu);
7105
7106         kvm_make_request(KVM_REQ_EVENT, vcpu);
7107         vcpu->arch.apf.msr_val = 0;
7108         vcpu->arch.st.msr_val = 0;
7109
7110         kvmclock_reset(vcpu);
7111
7112         kvm_clear_async_pf_completion_queue(vcpu);
7113         kvm_async_pf_hash_reset(vcpu);
7114         vcpu->arch.apf.halted = false;
7115
7116         kvm_pmu_reset(vcpu);
7117
7118         memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
7119         vcpu->arch.regs_avail = ~0;
7120         vcpu->arch.regs_dirty = ~0;
7121
7122         kvm_x86_ops->vcpu_reset(vcpu);
7123 }
7124
7125 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
7126 {
7127         struct kvm_segment cs;
7128
7129         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7130         cs.selector = vector << 8;
7131         cs.base = vector << 12;
7132         kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
7133         kvm_rip_write(vcpu, 0);
7134 }
7135
7136 int kvm_arch_hardware_enable(void)
7137 {
7138         struct kvm *kvm;
7139         struct kvm_vcpu *vcpu;
7140         int i;
7141         int ret;
7142         u64 local_tsc;
7143         u64 max_tsc = 0;
7144         bool stable, backwards_tsc = false;
7145
7146         kvm_shared_msr_cpu_online();
7147         ret = kvm_x86_ops->hardware_enable();
7148         if (ret != 0)
7149                 return ret;
7150
7151         local_tsc = native_read_tsc();
7152         stable = !check_tsc_unstable();
7153         list_for_each_entry(kvm, &vm_list, vm_list) {
7154                 kvm_for_each_vcpu(i, vcpu, kvm) {
7155                         if (!stable && vcpu->cpu == smp_processor_id())
7156                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7157                         if (stable && vcpu->arch.last_host_tsc > local_tsc) {
7158                                 backwards_tsc = true;
7159                                 if (vcpu->arch.last_host_tsc > max_tsc)
7160                                         max_tsc = vcpu->arch.last_host_tsc;
7161                         }
7162                 }
7163         }
7164
7165         /*
7166          * Sometimes, even reliable TSCs go backwards.  This happens on
7167          * platforms that reset TSC during suspend or hibernate actions, but
7168          * maintain synchronization.  We must compensate.  Fortunately, we can
7169          * detect that condition here, which happens early in CPU bringup,
7170          * before any KVM threads can be running.  Unfortunately, we can't
7171          * bring the TSCs fully up to date with real time, as we aren't yet far
7172          * enough into CPU bringup that we know how much real time has actually
7173          * elapsed; our helper function, get_kernel_ns() will be using boot
7174          * variables that haven't been updated yet.
7175          *
7176          * So we simply find the maximum observed TSC above, then record the
7177          * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
7178          * the adjustment will be applied.  Note that we accumulate
7179          * adjustments, in case multiple suspend cycles happen before some VCPU
7180          * gets a chance to run again.  In the event that no KVM threads get a
7181          * chance to run, we will miss the entire elapsed period, as we'll have
7182          * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
7183          * loose cycle time.  This isn't too big a deal, since the loss will be
7184          * uniform across all VCPUs (not to mention the scenario is extremely
7185          * unlikely). It is possible that a second hibernate recovery happens
7186          * much faster than a first, causing the observed TSC here to be
7187          * smaller; this would require additional padding adjustment, which is
7188          * why we set last_host_tsc to the local tsc observed here.
7189          *
7190          * N.B. - this code below runs only on platforms with reliable TSC,
7191          * as that is the only way backwards_tsc is set above.  Also note
7192          * that this runs for ALL vcpus, which is not a bug; all VCPUs should
7193          * have the same delta_cyc adjustment applied if backwards_tsc
7194          * is detected.  Note further, this adjustment is only done once,
7195          * as we reset last_host_tsc on all VCPUs to stop this from being
7196          * called multiple times (one for each physical CPU bringup).
7197          *
7198          * Platforms with unreliable TSCs don't have to deal with this, they
7199          * will be compensated by the logic in vcpu_load, which sets the TSC to
7200          * catchup mode.  This will catchup all VCPUs to real time, but cannot
7201          * guarantee that they stay in perfect synchronization.
7202          */
7203         if (backwards_tsc) {
7204                 u64 delta_cyc = max_tsc - local_tsc;
7205                 backwards_tsc_observed = true;
7206                 list_for_each_entry(kvm, &vm_list, vm_list) {
7207                         kvm_for_each_vcpu(i, vcpu, kvm) {
7208                                 vcpu->arch.tsc_offset_adjustment += delta_cyc;
7209                                 vcpu->arch.last_host_tsc = local_tsc;
7210                                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7211                         }
7212
7213                         /*
7214                          * We have to disable TSC offset matching.. if you were
7215                          * booting a VM while issuing an S4 host suspend....
7216                          * you may have some problem.  Solving this issue is
7217                          * left as an exercise to the reader.
7218                          */
7219                         kvm->arch.last_tsc_nsec = 0;
7220                         kvm->arch.last_tsc_write = 0;
7221                 }
7222
7223         }
7224         return 0;
7225 }
7226
7227 void kvm_arch_hardware_disable(void)
7228 {
7229         kvm_x86_ops->hardware_disable();
7230         drop_user_return_notifiers();
7231 }
7232
7233 int kvm_arch_hardware_setup(void)
7234 {
7235         return kvm_x86_ops->hardware_setup();
7236 }
7237
7238 void kvm_arch_hardware_unsetup(void)
7239 {
7240         kvm_x86_ops->hardware_unsetup();
7241 }
7242
7243 void kvm_arch_check_processor_compat(void *rtn)
7244 {
7245         kvm_x86_ops->check_processor_compatibility(rtn);
7246 }
7247
7248 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
7249 {
7250         return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
7251 }
7252
7253 struct static_key kvm_no_apic_vcpu __read_mostly;
7254
7255 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
7256 {
7257         struct page *page;
7258         struct kvm *kvm;
7259         int r;
7260
7261         BUG_ON(vcpu->kvm == NULL);
7262         kvm = vcpu->kvm;
7263
7264         vcpu->arch.pv.pv_unhalted = false;
7265         vcpu->arch.emulate_ctxt.ops = &emulate_ops;
7266         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
7267                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7268         else
7269                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
7270
7271         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
7272         if (!page) {
7273                 r = -ENOMEM;
7274                 goto fail;
7275         }
7276         vcpu->arch.pio_data = page_address(page);
7277
7278         kvm_set_tsc_khz(vcpu, max_tsc_khz);
7279
7280         r = kvm_mmu_create(vcpu);
7281         if (r < 0)
7282                 goto fail_free_pio_data;
7283
7284         if (irqchip_in_kernel(kvm)) {
7285                 r = kvm_create_lapic(vcpu);
7286                 if (r < 0)
7287                         goto fail_mmu_destroy;
7288         } else
7289                 static_key_slow_inc(&kvm_no_apic_vcpu);
7290
7291         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
7292                                        GFP_KERNEL);
7293         if (!vcpu->arch.mce_banks) {
7294                 r = -ENOMEM;
7295                 goto fail_free_lapic;
7296         }
7297         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
7298
7299         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
7300                 r = -ENOMEM;
7301                 goto fail_free_mce_banks;
7302         }
7303
7304         r = fx_init(vcpu);
7305         if (r)
7306                 goto fail_free_wbinvd_dirty_mask;
7307
7308         vcpu->arch.ia32_tsc_adjust_msr = 0x0;
7309         vcpu->arch.pv_time_enabled = false;
7310
7311         vcpu->arch.guest_supported_xcr0 = 0;
7312         vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
7313
7314         kvm_async_pf_hash_reset(vcpu);
7315         kvm_pmu_init(vcpu);
7316
7317         return 0;
7318 fail_free_wbinvd_dirty_mask:
7319         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
7320 fail_free_mce_banks:
7321         kfree(vcpu->arch.mce_banks);
7322 fail_free_lapic:
7323         kvm_free_lapic(vcpu);
7324 fail_mmu_destroy:
7325         kvm_mmu_destroy(vcpu);
7326 fail_free_pio_data:
7327         free_page((unsigned long)vcpu->arch.pio_data);
7328 fail:
7329         return r;
7330 }
7331
7332 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
7333 {
7334         int idx;
7335
7336         kvm_pmu_destroy(vcpu);
7337         kfree(vcpu->arch.mce_banks);
7338         kvm_free_lapic(vcpu);
7339         idx = srcu_read_lock(&vcpu->kvm->srcu);
7340         kvm_mmu_destroy(vcpu);
7341         srcu_read_unlock(&vcpu->kvm->srcu, idx);
7342         free_page((unsigned long)vcpu->arch.pio_data);
7343         if (!irqchip_in_kernel(vcpu->kvm))
7344                 static_key_slow_dec(&kvm_no_apic_vcpu);
7345 }
7346
7347 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
7348 {
7349         kvm_x86_ops->sched_in(vcpu, cpu);
7350 }
7351
7352 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7353 {
7354         if (type)
7355                 return -EINVAL;
7356
7357         INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
7358         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
7359         INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
7360         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
7361         atomic_set(&kvm->arch.noncoherent_dma_count, 0);
7362
7363         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
7364         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
7365         /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
7366         set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
7367                 &kvm->arch.irq_sources_bitmap);
7368
7369         raw_spin_lock_init(&kvm->arch.tsc_write_lock);
7370         mutex_init(&kvm->arch.apic_map_lock);
7371         spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
7372
7373         pvclock_update_vm_gtod_copy(kvm);
7374
7375         INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7376         INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7377
7378         return 0;
7379 }
7380
7381 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
7382 {
7383         int r;
7384         r = vcpu_load(vcpu);
7385         BUG_ON(r);
7386         kvm_mmu_unload(vcpu);
7387         vcpu_put(vcpu);
7388 }
7389
7390 static void kvm_free_vcpus(struct kvm *kvm)
7391 {
7392         unsigned int i;
7393         struct kvm_vcpu *vcpu;
7394
7395         /*
7396          * Unpin any mmu pages first.
7397          */
7398         kvm_for_each_vcpu(i, vcpu, kvm) {
7399                 kvm_clear_async_pf_completion_queue(vcpu);
7400                 kvm_unload_vcpu_mmu(vcpu);
7401         }
7402         kvm_for_each_vcpu(i, vcpu, kvm)
7403                 kvm_arch_vcpu_free(vcpu);
7404
7405         mutex_lock(&kvm->lock);
7406         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
7407                 kvm->vcpus[i] = NULL;
7408
7409         atomic_set(&kvm->online_vcpus, 0);
7410         mutex_unlock(&kvm->lock);
7411 }
7412
7413 void kvm_arch_sync_events(struct kvm *kvm)
7414 {
7415         cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7416         cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7417         kvm_free_all_assigned_devices(kvm);
7418         kvm_free_pit(kvm);
7419 }
7420
7421 void kvm_arch_destroy_vm(struct kvm *kvm)
7422 {
7423         if (current->mm == kvm->mm) {
7424                 /*
7425                  * Free memory regions allocated on behalf of userspace,
7426                  * unless the the memory map has changed due to process exit
7427                  * or fd copying.
7428                  */
7429                 struct kvm_userspace_memory_region mem;
7430                 memset(&mem, 0, sizeof(mem));
7431                 mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
7432                 kvm_set_memory_region(kvm, &mem);
7433
7434                 mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
7435                 kvm_set_memory_region(kvm, &mem);
7436
7437                 mem.slot = TSS_PRIVATE_MEMSLOT;
7438                 kvm_set_memory_region(kvm, &mem);
7439         }
7440         kvm_iommu_unmap_guest(kvm);
7441         kfree(kvm->arch.vpic);
7442         kfree(kvm->arch.vioapic);
7443         kvm_free_vcpus(kvm);
7444         kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
7445 }
7446
7447 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
7448                            struct kvm_memory_slot *dont)
7449 {
7450         int i;
7451
7452         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7453                 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
7454                         kvm_kvfree(free->arch.rmap[i]);
7455                         free->arch.rmap[i] = NULL;
7456                 }
7457                 if (i == 0)
7458                         continue;
7459
7460                 if (!dont || free->arch.lpage_info[i - 1] !=
7461                              dont->arch.lpage_info[i - 1]) {
7462                         kvm_kvfree(free->arch.lpage_info[i - 1]);
7463                         free->arch.lpage_info[i - 1] = NULL;
7464                 }
7465         }
7466 }
7467
7468 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
7469                             unsigned long npages)
7470 {
7471         int i;
7472
7473         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7474                 unsigned long ugfn;
7475                 int lpages;
7476                 int level = i + 1;
7477
7478                 lpages = gfn_to_index(slot->base_gfn + npages - 1,
7479                                       slot->base_gfn, level) + 1;
7480
7481                 slot->arch.rmap[i] =
7482                         kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
7483                 if (!slot->arch.rmap[i])
7484                         goto out_free;
7485                 if (i == 0)
7486                         continue;
7487
7488                 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
7489                                         sizeof(*slot->arch.lpage_info[i - 1]));
7490                 if (!slot->arch.lpage_info[i - 1])
7491                         goto out_free;
7492
7493                 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
7494                         slot->arch.lpage_info[i - 1][0].write_count = 1;
7495                 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
7496                         slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
7497                 ugfn = slot->userspace_addr >> PAGE_SHIFT;
7498                 /*
7499                  * If the gfn and userspace address are not aligned wrt each
7500                  * other, or if explicitly asked to, disable large page
7501                  * support for this slot
7502                  */
7503                 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
7504                     !kvm_largepages_enabled()) {
7505                         unsigned long j;
7506
7507                         for (j = 0; j < lpages; ++j)
7508                                 slot->arch.lpage_info[i - 1][j].write_count = 1;
7509                 }
7510         }
7511
7512         return 0;
7513
7514 out_free:
7515         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7516                 kvm_kvfree(slot->arch.rmap[i]);
7517                 slot->arch.rmap[i] = NULL;
7518                 if (i == 0)
7519                         continue;
7520
7521                 kvm_kvfree(slot->arch.lpage_info[i - 1]);
7522                 slot->arch.lpage_info[i - 1] = NULL;
7523         }
7524         return -ENOMEM;
7525 }
7526
7527 void kvm_arch_memslots_updated(struct kvm *kvm)
7528 {
7529         /*
7530          * memslots->generation has been incremented.
7531          * mmio generation may have reached its maximum value.
7532          */
7533         kvm_mmu_invalidate_mmio_sptes(kvm);
7534 }
7535
7536 int kvm_arch_prepare_memory_region(struct kvm *kvm,
7537                                 struct kvm_memory_slot *memslot,
7538                                 struct kvm_userspace_memory_region *mem,
7539                                 enum kvm_mr_change change)
7540 {
7541         /*
7542          * Only private memory slots need to be mapped here since
7543          * KVM_SET_MEMORY_REGION ioctl is no longer supported.
7544          */
7545         if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
7546                 unsigned long userspace_addr;
7547
7548                 /*
7549                  * MAP_SHARED to prevent internal slot pages from being moved
7550                  * by fork()/COW.
7551                  */
7552                 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
7553                                          PROT_READ | PROT_WRITE,
7554                                          MAP_SHARED | MAP_ANONYMOUS, 0);
7555
7556                 if (IS_ERR((void *)userspace_addr))
7557                         return PTR_ERR((void *)userspace_addr);
7558
7559                 memslot->userspace_addr = userspace_addr;
7560         }
7561
7562         return 0;
7563 }
7564
7565 void kvm_arch_commit_memory_region(struct kvm *kvm,
7566                                 struct kvm_userspace_memory_region *mem,
7567                                 const struct kvm_memory_slot *old,
7568                                 enum kvm_mr_change change)
7569 {
7570
7571         int nr_mmu_pages = 0;
7572
7573         if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
7574                 int ret;
7575
7576                 ret = vm_munmap(old->userspace_addr,
7577                                 old->npages * PAGE_SIZE);
7578                 if (ret < 0)
7579                         printk(KERN_WARNING
7580                                "kvm_vm_ioctl_set_memory_region: "
7581                                "failed to munmap memory\n");
7582         }
7583
7584         if (!kvm->arch.n_requested_mmu_pages)
7585                 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
7586
7587         if (nr_mmu_pages)
7588                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
7589         /*
7590          * Write protect all pages for dirty logging.
7591          *
7592          * All the sptes including the large sptes which point to this
7593          * slot are set to readonly. We can not create any new large
7594          * spte on this slot until the end of the logging.
7595          *
7596          * See the comments in fast_page_fault().
7597          */
7598         if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
7599                 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
7600 }
7601
7602 void kvm_arch_flush_shadow_all(struct kvm *kvm)
7603 {
7604         kvm_mmu_invalidate_zap_all_pages(kvm);
7605 }
7606
7607 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
7608                                    struct kvm_memory_slot *slot)
7609 {
7610         kvm_mmu_invalidate_zap_all_pages(kvm);
7611 }
7612
7613 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
7614 {
7615         if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
7616                 kvm_x86_ops->check_nested_events(vcpu, false);
7617
7618         return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
7619                 !vcpu->arch.apf.halted)
7620                 || !list_empty_careful(&vcpu->async_pf.done)
7621                 || kvm_apic_has_events(vcpu)
7622                 || vcpu->arch.pv.pv_unhalted
7623                 || atomic_read(&vcpu->arch.nmi_queued) ||
7624                 (kvm_arch_interrupt_allowed(vcpu) &&
7625                  kvm_cpu_has_interrupt(vcpu));
7626 }
7627
7628 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
7629 {
7630         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
7631 }
7632
7633 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
7634 {
7635         return kvm_x86_ops->interrupt_allowed(vcpu);
7636 }
7637
7638 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
7639 {
7640         if (is_64_bit_mode(vcpu))
7641                 return kvm_rip_read(vcpu);
7642         return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
7643                      kvm_rip_read(vcpu));
7644 }
7645 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
7646
7647 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
7648 {
7649         return kvm_get_linear_rip(vcpu) == linear_rip;
7650 }
7651 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
7652
7653 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
7654 {
7655         unsigned long rflags;
7656
7657         rflags = kvm_x86_ops->get_rflags(vcpu);
7658         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7659                 rflags &= ~X86_EFLAGS_TF;
7660         return rflags;
7661 }
7662 EXPORT_SYMBOL_GPL(kvm_get_rflags);
7663
7664 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
7665 {
7666         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
7667             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
7668                 rflags |= X86_EFLAGS_TF;
7669         kvm_x86_ops->set_rflags(vcpu, rflags);
7670 }
7671
7672 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
7673 {
7674         __kvm_set_rflags(vcpu, rflags);
7675         kvm_make_request(KVM_REQ_EVENT, vcpu);
7676 }
7677 EXPORT_SYMBOL_GPL(kvm_set_rflags);
7678
7679 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
7680 {
7681         int r;
7682
7683         if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
7684               work->wakeup_all)
7685                 return;
7686
7687         r = kvm_mmu_reload(vcpu);
7688         if (unlikely(r))
7689                 return;
7690
7691         if (!vcpu->arch.mmu.direct_map &&
7692               work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
7693                 return;
7694
7695         vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
7696 }
7697
7698 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
7699 {
7700         return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
7701 }
7702
7703 static inline u32 kvm_async_pf_next_probe(u32 key)
7704 {
7705         return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
7706 }
7707
7708 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7709 {
7710         u32 key = kvm_async_pf_hash_fn(gfn);
7711
7712         while (vcpu->arch.apf.gfns[key] != ~0)
7713                 key = kvm_async_pf_next_probe(key);
7714
7715         vcpu->arch.apf.gfns[key] = gfn;
7716 }
7717
7718 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
7719 {
7720         int i;
7721         u32 key = kvm_async_pf_hash_fn(gfn);
7722
7723         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
7724                      (vcpu->arch.apf.gfns[key] != gfn &&
7725                       vcpu->arch.apf.gfns[key] != ~0); i++)
7726                 key = kvm_async_pf_next_probe(key);
7727
7728         return key;
7729 }
7730
7731 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7732 {
7733         return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
7734 }
7735
7736 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
7737 {
7738         u32 i, j, k;
7739
7740         i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
7741         while (true) {
7742                 vcpu->arch.apf.gfns[i] = ~0;
7743                 do {
7744                         j = kvm_async_pf_next_probe(j);
7745                         if (vcpu->arch.apf.gfns[j] == ~0)
7746                                 return;
7747                         k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
7748                         /*
7749                          * k lies cyclically in ]i,j]
7750                          * |    i.k.j |
7751                          * |....j i.k.| or  |.k..j i...|
7752                          */
7753                 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
7754                 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
7755                 i = j;
7756         }
7757 }
7758
7759 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
7760 {
7761
7762         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
7763                                       sizeof(val));
7764 }
7765
7766 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
7767                                      struct kvm_async_pf *work)
7768 {
7769         struct x86_exception fault;
7770
7771         trace_kvm_async_pf_not_present(work->arch.token, work->gva);
7772         kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
7773
7774         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
7775             (vcpu->arch.apf.send_user_only &&
7776              kvm_x86_ops->get_cpl(vcpu) == 0))
7777                 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
7778         else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
7779                 fault.vector = PF_VECTOR;
7780                 fault.error_code_valid = true;
7781                 fault.error_code = 0;
7782                 fault.nested_page_fault = false;
7783                 fault.address = work->arch.token;
7784                 kvm_inject_page_fault(vcpu, &fault);
7785         }
7786 }
7787
7788 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
7789                                  struct kvm_async_pf *work)
7790 {
7791         struct x86_exception fault;
7792
7793         trace_kvm_async_pf_ready(work->arch.token, work->gva);
7794         if (work->wakeup_all)
7795                 work->arch.token = ~0; /* broadcast wakeup */
7796         else
7797                 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
7798
7799         if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
7800             !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
7801                 fault.vector = PF_VECTOR;
7802                 fault.error_code_valid = true;
7803                 fault.error_code = 0;
7804                 fault.nested_page_fault = false;
7805                 fault.address = work->arch.token;
7806                 kvm_inject_page_fault(vcpu, &fault);
7807         }
7808         vcpu->arch.apf.halted = false;
7809         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7810 }
7811
7812 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
7813 {
7814         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
7815                 return true;
7816         else
7817                 return !kvm_event_needs_reinjection(vcpu) &&
7818                         kvm_x86_ops->interrupt_allowed(vcpu);
7819 }
7820
7821 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
7822 {
7823         atomic_inc(&kvm->arch.noncoherent_dma_count);
7824 }
7825 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
7826
7827 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
7828 {
7829         atomic_dec(&kvm->arch.noncoherent_dma_count);
7830 }
7831 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
7832
7833 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
7834 {
7835         return atomic_read(&kvm->arch.noncoherent_dma_count);
7836 }
7837 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
7838
7839 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
7840 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
7841 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
7842 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
7843 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
7844 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
7845 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
7846 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
7847 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
7848 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
7849 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
7850 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
7851 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
7852 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);