]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/kvm/x86.c
KVM: x86: pass host_tsc to read_l1_tsc
[karo-tx-linux.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30
31 #include <linux/clocksource.h>
32 #include <linux/interrupt.h>
33 #include <linux/kvm.h>
34 #include <linux/fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/module.h>
37 #include <linux/mman.h>
38 #include <linux/highmem.h>
39 #include <linux/iommu.h>
40 #include <linux/intel-iommu.h>
41 #include <linux/cpufreq.h>
42 #include <linux/user-return-notifier.h>
43 #include <linux/srcu.h>
44 #include <linux/slab.h>
45 #include <linux/perf_event.h>
46 #include <linux/uaccess.h>
47 #include <linux/hash.h>
48 #include <linux/pci.h>
49 #include <trace/events/kvm.h>
50
51 #define CREATE_TRACE_POINTS
52 #include "trace.h"
53
54 #include <asm/debugreg.h>
55 #include <asm/msr.h>
56 #include <asm/desc.h>
57 #include <asm/mtrr.h>
58 #include <asm/mce.h>
59 #include <asm/i387.h>
60 #include <asm/fpu-internal.h> /* Ugh! */
61 #include <asm/xcr.h>
62 #include <asm/pvclock.h>
63 #include <asm/div64.h>
64
65 #define MAX_IO_MSRS 256
66 #define KVM_MAX_MCE_BANKS 32
67 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
68
69 #define emul_to_vcpu(ctxt) \
70         container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
71
72 /* EFER defaults:
73  * - enable syscall per default because its emulated by KVM
74  * - enable LME and LMA per default on 64 bit KVM
75  */
76 #ifdef CONFIG_X86_64
77 static
78 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
79 #else
80 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
81 #endif
82
83 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
84 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
85
86 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
87 static void process_nmi(struct kvm_vcpu *vcpu);
88
89 struct kvm_x86_ops *kvm_x86_ops;
90 EXPORT_SYMBOL_GPL(kvm_x86_ops);
91
92 static bool ignore_msrs = 0;
93 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
94
95 bool kvm_has_tsc_control;
96 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
97 u32  kvm_max_guest_tsc_khz;
98 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
99
100 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
101 static u32 tsc_tolerance_ppm = 250;
102 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
103
104 #define KVM_NR_SHARED_MSRS 16
105
106 struct kvm_shared_msrs_global {
107         int nr;
108         u32 msrs[KVM_NR_SHARED_MSRS];
109 };
110
111 struct kvm_shared_msrs {
112         struct user_return_notifier urn;
113         bool registered;
114         struct kvm_shared_msr_values {
115                 u64 host;
116                 u64 curr;
117         } values[KVM_NR_SHARED_MSRS];
118 };
119
120 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
121 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
122
123 struct kvm_stats_debugfs_item debugfs_entries[] = {
124         { "pf_fixed", VCPU_STAT(pf_fixed) },
125         { "pf_guest", VCPU_STAT(pf_guest) },
126         { "tlb_flush", VCPU_STAT(tlb_flush) },
127         { "invlpg", VCPU_STAT(invlpg) },
128         { "exits", VCPU_STAT(exits) },
129         { "io_exits", VCPU_STAT(io_exits) },
130         { "mmio_exits", VCPU_STAT(mmio_exits) },
131         { "signal_exits", VCPU_STAT(signal_exits) },
132         { "irq_window", VCPU_STAT(irq_window_exits) },
133         { "nmi_window", VCPU_STAT(nmi_window_exits) },
134         { "halt_exits", VCPU_STAT(halt_exits) },
135         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
136         { "hypercalls", VCPU_STAT(hypercalls) },
137         { "request_irq", VCPU_STAT(request_irq_exits) },
138         { "irq_exits", VCPU_STAT(irq_exits) },
139         { "host_state_reload", VCPU_STAT(host_state_reload) },
140         { "efer_reload", VCPU_STAT(efer_reload) },
141         { "fpu_reload", VCPU_STAT(fpu_reload) },
142         { "insn_emulation", VCPU_STAT(insn_emulation) },
143         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
144         { "irq_injections", VCPU_STAT(irq_injections) },
145         { "nmi_injections", VCPU_STAT(nmi_injections) },
146         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
147         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
148         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
149         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
150         { "mmu_flooded", VM_STAT(mmu_flooded) },
151         { "mmu_recycled", VM_STAT(mmu_recycled) },
152         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
153         { "mmu_unsync", VM_STAT(mmu_unsync) },
154         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
155         { "largepages", VM_STAT(lpages) },
156         { NULL }
157 };
158
159 u64 __read_mostly host_xcr0;
160
161 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
162
163 static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
164
165 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
166 {
167         int i;
168         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
169                 vcpu->arch.apf.gfns[i] = ~0;
170 }
171
172 static void kvm_on_user_return(struct user_return_notifier *urn)
173 {
174         unsigned slot;
175         struct kvm_shared_msrs *locals
176                 = container_of(urn, struct kvm_shared_msrs, urn);
177         struct kvm_shared_msr_values *values;
178
179         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
180                 values = &locals->values[slot];
181                 if (values->host != values->curr) {
182                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
183                         values->curr = values->host;
184                 }
185         }
186         locals->registered = false;
187         user_return_notifier_unregister(urn);
188 }
189
190 static void shared_msr_update(unsigned slot, u32 msr)
191 {
192         struct kvm_shared_msrs *smsr;
193         u64 value;
194
195         smsr = &__get_cpu_var(shared_msrs);
196         /* only read, and nobody should modify it at this time,
197          * so don't need lock */
198         if (slot >= shared_msrs_global.nr) {
199                 printk(KERN_ERR "kvm: invalid MSR slot!");
200                 return;
201         }
202         rdmsrl_safe(msr, &value);
203         smsr->values[slot].host = value;
204         smsr->values[slot].curr = value;
205 }
206
207 void kvm_define_shared_msr(unsigned slot, u32 msr)
208 {
209         if (slot >= shared_msrs_global.nr)
210                 shared_msrs_global.nr = slot + 1;
211         shared_msrs_global.msrs[slot] = msr;
212         /* we need ensured the shared_msr_global have been updated */
213         smp_wmb();
214 }
215 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
216
217 static void kvm_shared_msr_cpu_online(void)
218 {
219         unsigned i;
220
221         for (i = 0; i < shared_msrs_global.nr; ++i)
222                 shared_msr_update(i, shared_msrs_global.msrs[i]);
223 }
224
225 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
226 {
227         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
228
229         if (((value ^ smsr->values[slot].curr) & mask) == 0)
230                 return;
231         smsr->values[slot].curr = value;
232         wrmsrl(shared_msrs_global.msrs[slot], value);
233         if (!smsr->registered) {
234                 smsr->urn.on_user_return = kvm_on_user_return;
235                 user_return_notifier_register(&smsr->urn);
236                 smsr->registered = true;
237         }
238 }
239 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
240
241 static void drop_user_return_notifiers(void *ignore)
242 {
243         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
244
245         if (smsr->registered)
246                 kvm_on_user_return(&smsr->urn);
247 }
248
249 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
250 {
251         return vcpu->arch.apic_base;
252 }
253 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
254
255 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
256 {
257         /* TODO: reserve bits check */
258         kvm_lapic_set_base(vcpu, data);
259 }
260 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
261
262 #define EXCPT_BENIGN            0
263 #define EXCPT_CONTRIBUTORY      1
264 #define EXCPT_PF                2
265
266 static int exception_class(int vector)
267 {
268         switch (vector) {
269         case PF_VECTOR:
270                 return EXCPT_PF;
271         case DE_VECTOR:
272         case TS_VECTOR:
273         case NP_VECTOR:
274         case SS_VECTOR:
275         case GP_VECTOR:
276                 return EXCPT_CONTRIBUTORY;
277         default:
278                 break;
279         }
280         return EXCPT_BENIGN;
281 }
282
283 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
284                 unsigned nr, bool has_error, u32 error_code,
285                 bool reinject)
286 {
287         u32 prev_nr;
288         int class1, class2;
289
290         kvm_make_request(KVM_REQ_EVENT, vcpu);
291
292         if (!vcpu->arch.exception.pending) {
293         queue:
294                 vcpu->arch.exception.pending = true;
295                 vcpu->arch.exception.has_error_code = has_error;
296                 vcpu->arch.exception.nr = nr;
297                 vcpu->arch.exception.error_code = error_code;
298                 vcpu->arch.exception.reinject = reinject;
299                 return;
300         }
301
302         /* to check exception */
303         prev_nr = vcpu->arch.exception.nr;
304         if (prev_nr == DF_VECTOR) {
305                 /* triple fault -> shutdown */
306                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
307                 return;
308         }
309         class1 = exception_class(prev_nr);
310         class2 = exception_class(nr);
311         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
312                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
313                 /* generate double fault per SDM Table 5-5 */
314                 vcpu->arch.exception.pending = true;
315                 vcpu->arch.exception.has_error_code = true;
316                 vcpu->arch.exception.nr = DF_VECTOR;
317                 vcpu->arch.exception.error_code = 0;
318         } else
319                 /* replace previous exception with a new one in a hope
320                    that instruction re-execution will regenerate lost
321                    exception */
322                 goto queue;
323 }
324
325 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
326 {
327         kvm_multiple_exception(vcpu, nr, false, 0, false);
328 }
329 EXPORT_SYMBOL_GPL(kvm_queue_exception);
330
331 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
332 {
333         kvm_multiple_exception(vcpu, nr, false, 0, true);
334 }
335 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
336
337 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
338 {
339         if (err)
340                 kvm_inject_gp(vcpu, 0);
341         else
342                 kvm_x86_ops->skip_emulated_instruction(vcpu);
343 }
344 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
345
346 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
347 {
348         ++vcpu->stat.pf_guest;
349         vcpu->arch.cr2 = fault->address;
350         kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
351 }
352 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
353
354 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
355 {
356         if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
357                 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
358         else
359                 vcpu->arch.mmu.inject_page_fault(vcpu, fault);
360 }
361
362 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
363 {
364         atomic_inc(&vcpu->arch.nmi_queued);
365         kvm_make_request(KVM_REQ_NMI, vcpu);
366 }
367 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
368
369 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
370 {
371         kvm_multiple_exception(vcpu, nr, true, error_code, false);
372 }
373 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
374
375 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
376 {
377         kvm_multiple_exception(vcpu, nr, true, error_code, true);
378 }
379 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
380
381 /*
382  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
383  * a #GP and return false.
384  */
385 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
386 {
387         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
388                 return true;
389         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
390         return false;
391 }
392 EXPORT_SYMBOL_GPL(kvm_require_cpl);
393
394 /*
395  * This function will be used to read from the physical memory of the currently
396  * running guest. The difference to kvm_read_guest_page is that this function
397  * can read from guest physical or from the guest's guest physical memory.
398  */
399 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
400                             gfn_t ngfn, void *data, int offset, int len,
401                             u32 access)
402 {
403         gfn_t real_gfn;
404         gpa_t ngpa;
405
406         ngpa     = gfn_to_gpa(ngfn);
407         real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
408         if (real_gfn == UNMAPPED_GVA)
409                 return -EFAULT;
410
411         real_gfn = gpa_to_gfn(real_gfn);
412
413         return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
414 }
415 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
416
417 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
418                                void *data, int offset, int len, u32 access)
419 {
420         return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
421                                        data, offset, len, access);
422 }
423
424 /*
425  * Load the pae pdptrs.  Return true is they are all valid.
426  */
427 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
428 {
429         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
430         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
431         int i;
432         int ret;
433         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
434
435         ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
436                                       offset * sizeof(u64), sizeof(pdpte),
437                                       PFERR_USER_MASK|PFERR_WRITE_MASK);
438         if (ret < 0) {
439                 ret = 0;
440                 goto out;
441         }
442         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
443                 if (is_present_gpte(pdpte[i]) &&
444                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
445                         ret = 0;
446                         goto out;
447                 }
448         }
449         ret = 1;
450
451         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
452         __set_bit(VCPU_EXREG_PDPTR,
453                   (unsigned long *)&vcpu->arch.regs_avail);
454         __set_bit(VCPU_EXREG_PDPTR,
455                   (unsigned long *)&vcpu->arch.regs_dirty);
456 out:
457
458         return ret;
459 }
460 EXPORT_SYMBOL_GPL(load_pdptrs);
461
462 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
463 {
464         u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
465         bool changed = true;
466         int offset;
467         gfn_t gfn;
468         int r;
469
470         if (is_long_mode(vcpu) || !is_pae(vcpu))
471                 return false;
472
473         if (!test_bit(VCPU_EXREG_PDPTR,
474                       (unsigned long *)&vcpu->arch.regs_avail))
475                 return true;
476
477         gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
478         offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
479         r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
480                                        PFERR_USER_MASK | PFERR_WRITE_MASK);
481         if (r < 0)
482                 goto out;
483         changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
484 out:
485
486         return changed;
487 }
488
489 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
490 {
491         unsigned long old_cr0 = kvm_read_cr0(vcpu);
492         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
493                                     X86_CR0_CD | X86_CR0_NW;
494
495         cr0 |= X86_CR0_ET;
496
497 #ifdef CONFIG_X86_64
498         if (cr0 & 0xffffffff00000000UL)
499                 return 1;
500 #endif
501
502         cr0 &= ~CR0_RESERVED_BITS;
503
504         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
505                 return 1;
506
507         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
508                 return 1;
509
510         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
511 #ifdef CONFIG_X86_64
512                 if ((vcpu->arch.efer & EFER_LME)) {
513                         int cs_db, cs_l;
514
515                         if (!is_pae(vcpu))
516                                 return 1;
517                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
518                         if (cs_l)
519                                 return 1;
520                 } else
521 #endif
522                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
523                                                  kvm_read_cr3(vcpu)))
524                         return 1;
525         }
526
527         if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
528                 return 1;
529
530         kvm_x86_ops->set_cr0(vcpu, cr0);
531
532         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
533                 kvm_clear_async_pf_completion_queue(vcpu);
534                 kvm_async_pf_hash_reset(vcpu);
535         }
536
537         if ((cr0 ^ old_cr0) & update_bits)
538                 kvm_mmu_reset_context(vcpu);
539         return 0;
540 }
541 EXPORT_SYMBOL_GPL(kvm_set_cr0);
542
543 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
544 {
545         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
546 }
547 EXPORT_SYMBOL_GPL(kvm_lmsw);
548
549 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
550 {
551         u64 xcr0;
552
553         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
554         if (index != XCR_XFEATURE_ENABLED_MASK)
555                 return 1;
556         xcr0 = xcr;
557         if (kvm_x86_ops->get_cpl(vcpu) != 0)
558                 return 1;
559         if (!(xcr0 & XSTATE_FP))
560                 return 1;
561         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
562                 return 1;
563         if (xcr0 & ~host_xcr0)
564                 return 1;
565         vcpu->arch.xcr0 = xcr0;
566         vcpu->guest_xcr0_loaded = 0;
567         return 0;
568 }
569
570 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
571 {
572         if (__kvm_set_xcr(vcpu, index, xcr)) {
573                 kvm_inject_gp(vcpu, 0);
574                 return 1;
575         }
576         return 0;
577 }
578 EXPORT_SYMBOL_GPL(kvm_set_xcr);
579
580 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
581 {
582         unsigned long old_cr4 = kvm_read_cr4(vcpu);
583         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
584                                    X86_CR4_PAE | X86_CR4_SMEP;
585         if (cr4 & CR4_RESERVED_BITS)
586                 return 1;
587
588         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
589                 return 1;
590
591         if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
592                 return 1;
593
594         if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
595                 return 1;
596
597         if (is_long_mode(vcpu)) {
598                 if (!(cr4 & X86_CR4_PAE))
599                         return 1;
600         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
601                    && ((cr4 ^ old_cr4) & pdptr_bits)
602                    && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
603                                    kvm_read_cr3(vcpu)))
604                 return 1;
605
606         if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
607                 if (!guest_cpuid_has_pcid(vcpu))
608                         return 1;
609
610                 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
611                 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
612                         return 1;
613         }
614
615         if (kvm_x86_ops->set_cr4(vcpu, cr4))
616                 return 1;
617
618         if (((cr4 ^ old_cr4) & pdptr_bits) ||
619             (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
620                 kvm_mmu_reset_context(vcpu);
621
622         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
623                 kvm_update_cpuid(vcpu);
624
625         return 0;
626 }
627 EXPORT_SYMBOL_GPL(kvm_set_cr4);
628
629 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
630 {
631         if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
632                 kvm_mmu_sync_roots(vcpu);
633                 kvm_mmu_flush_tlb(vcpu);
634                 return 0;
635         }
636
637         if (is_long_mode(vcpu)) {
638                 if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
639                         if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
640                                 return 1;
641                 } else
642                         if (cr3 & CR3_L_MODE_RESERVED_BITS)
643                                 return 1;
644         } else {
645                 if (is_pae(vcpu)) {
646                         if (cr3 & CR3_PAE_RESERVED_BITS)
647                                 return 1;
648                         if (is_paging(vcpu) &&
649                             !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
650                                 return 1;
651                 }
652                 /*
653                  * We don't check reserved bits in nonpae mode, because
654                  * this isn't enforced, and VMware depends on this.
655                  */
656         }
657
658         /*
659          * Does the new cr3 value map to physical memory? (Note, we
660          * catch an invalid cr3 even in real-mode, because it would
661          * cause trouble later on when we turn on paging anyway.)
662          *
663          * A real CPU would silently accept an invalid cr3 and would
664          * attempt to use it - with largely undefined (and often hard
665          * to debug) behavior on the guest side.
666          */
667         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
668                 return 1;
669         vcpu->arch.cr3 = cr3;
670         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
671         vcpu->arch.mmu.new_cr3(vcpu);
672         return 0;
673 }
674 EXPORT_SYMBOL_GPL(kvm_set_cr3);
675
676 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
677 {
678         if (cr8 & CR8_RESERVED_BITS)
679                 return 1;
680         if (irqchip_in_kernel(vcpu->kvm))
681                 kvm_lapic_set_tpr(vcpu, cr8);
682         else
683                 vcpu->arch.cr8 = cr8;
684         return 0;
685 }
686 EXPORT_SYMBOL_GPL(kvm_set_cr8);
687
688 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
689 {
690         if (irqchip_in_kernel(vcpu->kvm))
691                 return kvm_lapic_get_cr8(vcpu);
692         else
693                 return vcpu->arch.cr8;
694 }
695 EXPORT_SYMBOL_GPL(kvm_get_cr8);
696
697 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
698 {
699         unsigned long dr7;
700
701         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
702                 dr7 = vcpu->arch.guest_debug_dr7;
703         else
704                 dr7 = vcpu->arch.dr7;
705         kvm_x86_ops->set_dr7(vcpu, dr7);
706         vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
707 }
708
709 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
710 {
711         switch (dr) {
712         case 0 ... 3:
713                 vcpu->arch.db[dr] = val;
714                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
715                         vcpu->arch.eff_db[dr] = val;
716                 break;
717         case 4:
718                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
719                         return 1; /* #UD */
720                 /* fall through */
721         case 6:
722                 if (val & 0xffffffff00000000ULL)
723                         return -1; /* #GP */
724                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
725                 break;
726         case 5:
727                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
728                         return 1; /* #UD */
729                 /* fall through */
730         default: /* 7 */
731                 if (val & 0xffffffff00000000ULL)
732                         return -1; /* #GP */
733                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
734                 kvm_update_dr7(vcpu);
735                 break;
736         }
737
738         return 0;
739 }
740
741 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
742 {
743         int res;
744
745         res = __kvm_set_dr(vcpu, dr, val);
746         if (res > 0)
747                 kvm_queue_exception(vcpu, UD_VECTOR);
748         else if (res < 0)
749                 kvm_inject_gp(vcpu, 0);
750
751         return res;
752 }
753 EXPORT_SYMBOL_GPL(kvm_set_dr);
754
755 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
756 {
757         switch (dr) {
758         case 0 ... 3:
759                 *val = vcpu->arch.db[dr];
760                 break;
761         case 4:
762                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
763                         return 1;
764                 /* fall through */
765         case 6:
766                 *val = vcpu->arch.dr6;
767                 break;
768         case 5:
769                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
770                         return 1;
771                 /* fall through */
772         default: /* 7 */
773                 *val = vcpu->arch.dr7;
774                 break;
775         }
776
777         return 0;
778 }
779
780 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
781 {
782         if (_kvm_get_dr(vcpu, dr, val)) {
783                 kvm_queue_exception(vcpu, UD_VECTOR);
784                 return 1;
785         }
786         return 0;
787 }
788 EXPORT_SYMBOL_GPL(kvm_get_dr);
789
790 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
791 {
792         u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
793         u64 data;
794         int err;
795
796         err = kvm_pmu_read_pmc(vcpu, ecx, &data);
797         if (err)
798                 return err;
799         kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
800         kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
801         return err;
802 }
803 EXPORT_SYMBOL_GPL(kvm_rdpmc);
804
805 /*
806  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
807  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
808  *
809  * This list is modified at module load time to reflect the
810  * capabilities of the host cpu. This capabilities test skips MSRs that are
811  * kvm-specific. Those are put in the beginning of the list.
812  */
813
814 #define KVM_SAVE_MSRS_BEGIN     10
815 static u32 msrs_to_save[] = {
816         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
817         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
818         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
819         HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
820         MSR_KVM_PV_EOI_EN,
821         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
822         MSR_STAR,
823 #ifdef CONFIG_X86_64
824         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
825 #endif
826         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
827 };
828
829 static unsigned num_msrs_to_save;
830
831 static const u32 emulated_msrs[] = {
832         MSR_IA32_TSCDEADLINE,
833         MSR_IA32_MISC_ENABLE,
834         MSR_IA32_MCG_STATUS,
835         MSR_IA32_MCG_CTL,
836 };
837
838 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
839 {
840         u64 old_efer = vcpu->arch.efer;
841
842         if (efer & efer_reserved_bits)
843                 return 1;
844
845         if (is_paging(vcpu)
846             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
847                 return 1;
848
849         if (efer & EFER_FFXSR) {
850                 struct kvm_cpuid_entry2 *feat;
851
852                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
853                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
854                         return 1;
855         }
856
857         if (efer & EFER_SVME) {
858                 struct kvm_cpuid_entry2 *feat;
859
860                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
861                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
862                         return 1;
863         }
864
865         efer &= ~EFER_LMA;
866         efer |= vcpu->arch.efer & EFER_LMA;
867
868         kvm_x86_ops->set_efer(vcpu, efer);
869
870         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
871
872         /* Update reserved bits */
873         if ((efer ^ old_efer) & EFER_NX)
874                 kvm_mmu_reset_context(vcpu);
875
876         return 0;
877 }
878
879 void kvm_enable_efer_bits(u64 mask)
880 {
881        efer_reserved_bits &= ~mask;
882 }
883 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
884
885
886 /*
887  * Writes msr value into into the appropriate "register".
888  * Returns 0 on success, non-0 otherwise.
889  * Assumes vcpu_load() was already called.
890  */
891 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
892 {
893         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
894 }
895
896 /*
897  * Adapt set_msr() to msr_io()'s calling convention
898  */
899 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
900 {
901         return kvm_set_msr(vcpu, index, *data);
902 }
903
904 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
905 {
906         int version;
907         int r;
908         struct pvclock_wall_clock wc;
909         struct timespec boot;
910
911         if (!wall_clock)
912                 return;
913
914         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
915         if (r)
916                 return;
917
918         if (version & 1)
919                 ++version;  /* first time write, random junk */
920
921         ++version;
922
923         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
924
925         /*
926          * The guest calculates current wall clock time by adding
927          * system time (updated by kvm_guest_time_update below) to the
928          * wall clock specified here.  guest system time equals host
929          * system time for us, thus we must fill in host boot time here.
930          */
931         getboottime(&boot);
932
933         if (kvm->arch.kvmclock_offset) {
934                 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
935                 boot = timespec_sub(boot, ts);
936         }
937         wc.sec = boot.tv_sec;
938         wc.nsec = boot.tv_nsec;
939         wc.version = version;
940
941         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
942
943         version++;
944         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
945 }
946
947 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
948 {
949         uint32_t quotient, remainder;
950
951         /* Don't try to replace with do_div(), this one calculates
952          * "(dividend << 32) / divisor" */
953         __asm__ ( "divl %4"
954                   : "=a" (quotient), "=d" (remainder)
955                   : "0" (0), "1" (dividend), "r" (divisor) );
956         return quotient;
957 }
958
959 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
960                                s8 *pshift, u32 *pmultiplier)
961 {
962         uint64_t scaled64;
963         int32_t  shift = 0;
964         uint64_t tps64;
965         uint32_t tps32;
966
967         tps64 = base_khz * 1000LL;
968         scaled64 = scaled_khz * 1000LL;
969         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
970                 tps64 >>= 1;
971                 shift--;
972         }
973
974         tps32 = (uint32_t)tps64;
975         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
976                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
977                         scaled64 >>= 1;
978                 else
979                         tps32 <<= 1;
980                 shift++;
981         }
982
983         *pshift = shift;
984         *pmultiplier = div_frac(scaled64, tps32);
985
986         pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
987                  __func__, base_khz, scaled_khz, shift, *pmultiplier);
988 }
989
990 static inline u64 get_kernel_ns(void)
991 {
992         struct timespec ts;
993
994         WARN_ON(preemptible());
995         ktime_get_ts(&ts);
996         monotonic_to_bootbased(&ts);
997         return timespec_to_ns(&ts);
998 }
999
1000 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1001 unsigned long max_tsc_khz;
1002
1003 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
1004 {
1005         return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
1006                                    vcpu->arch.virtual_tsc_shift);
1007 }
1008
1009 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1010 {
1011         u64 v = (u64)khz * (1000000 + ppm);
1012         do_div(v, 1000000);
1013         return v;
1014 }
1015
1016 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1017 {
1018         u32 thresh_lo, thresh_hi;
1019         int use_scaling = 0;
1020
1021         /* Compute a scale to convert nanoseconds in TSC cycles */
1022         kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
1023                            &vcpu->arch.virtual_tsc_shift,
1024                            &vcpu->arch.virtual_tsc_mult);
1025         vcpu->arch.virtual_tsc_khz = this_tsc_khz;
1026
1027         /*
1028          * Compute the variation in TSC rate which is acceptable
1029          * within the range of tolerance and decide if the
1030          * rate being applied is within that bounds of the hardware
1031          * rate.  If so, no scaling or compensation need be done.
1032          */
1033         thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1034         thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1035         if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
1036                 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
1037                 use_scaling = 1;
1038         }
1039         kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
1040 }
1041
1042 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1043 {
1044         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1045                                       vcpu->arch.virtual_tsc_mult,
1046                                       vcpu->arch.virtual_tsc_shift);
1047         tsc += vcpu->arch.this_tsc_write;
1048         return tsc;
1049 }
1050
1051 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1052 {
1053         struct kvm *kvm = vcpu->kvm;
1054         u64 offset, ns, elapsed;
1055         unsigned long flags;
1056         s64 usdiff;
1057
1058         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1059         offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1060         ns = get_kernel_ns();
1061         elapsed = ns - kvm->arch.last_tsc_nsec;
1062
1063         /* n.b - signed multiplication and division required */
1064         usdiff = data - kvm->arch.last_tsc_write;
1065 #ifdef CONFIG_X86_64
1066         usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1067 #else
1068         /* do_div() only does unsigned */
1069         asm("idivl %2; xor %%edx, %%edx"
1070             : "=A"(usdiff)
1071             : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
1072 #endif
1073         do_div(elapsed, 1000);
1074         usdiff -= elapsed;
1075         if (usdiff < 0)
1076                 usdiff = -usdiff;
1077
1078         /*
1079          * Special case: TSC write with a small delta (1 second) of virtual
1080          * cycle time against real time is interpreted as an attempt to
1081          * synchronize the CPU.
1082          *
1083          * For a reliable TSC, we can match TSC offsets, and for an unstable
1084          * TSC, we add elapsed time in this computation.  We could let the
1085          * compensation code attempt to catch up if we fall behind, but
1086          * it's better to try to match offsets from the beginning.
1087          */
1088         if (usdiff < USEC_PER_SEC &&
1089             vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1090                 if (!check_tsc_unstable()) {
1091                         offset = kvm->arch.cur_tsc_offset;
1092                         pr_debug("kvm: matched tsc offset for %llu\n", data);
1093                 } else {
1094                         u64 delta = nsec_to_cycles(vcpu, elapsed);
1095                         data += delta;
1096                         offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1097                         pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1098                 }
1099         } else {
1100                 /*
1101                  * We split periods of matched TSC writes into generations.
1102                  * For each generation, we track the original measured
1103                  * nanosecond time, offset, and write, so if TSCs are in
1104                  * sync, we can match exact offset, and if not, we can match
1105                  * exact software computation in compute_guest_tsc()
1106                  *
1107                  * These values are tracked in kvm->arch.cur_xxx variables.
1108                  */
1109                 kvm->arch.cur_tsc_generation++;
1110                 kvm->arch.cur_tsc_nsec = ns;
1111                 kvm->arch.cur_tsc_write = data;
1112                 kvm->arch.cur_tsc_offset = offset;
1113                 pr_debug("kvm: new tsc generation %u, clock %llu\n",
1114                          kvm->arch.cur_tsc_generation, data);
1115         }
1116
1117         /*
1118          * We also track th most recent recorded KHZ, write and time to
1119          * allow the matching interval to be extended at each write.
1120          */
1121         kvm->arch.last_tsc_nsec = ns;
1122         kvm->arch.last_tsc_write = data;
1123         kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1124
1125         /* Reset of TSC must disable overshoot protection below */
1126         vcpu->arch.hv_clock.tsc_timestamp = 0;
1127         vcpu->arch.last_guest_tsc = data;
1128
1129         /* Keep track of which generation this VCPU has synchronized to */
1130         vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1131         vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1132         vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1133
1134         kvm_x86_ops->write_tsc_offset(vcpu, offset);
1135         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1136 }
1137
1138 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1139
1140 static int kvm_guest_time_update(struct kvm_vcpu *v)
1141 {
1142         unsigned long flags;
1143         struct kvm_vcpu_arch *vcpu = &v->arch;
1144         void *shared_kaddr;
1145         unsigned long this_tsc_khz;
1146         s64 kernel_ns, max_kernel_ns;
1147         u64 tsc_timestamp;
1148         struct pvclock_vcpu_time_info *guest_hv_clock;
1149         u8 pvclock_flags;
1150
1151         /* Keep irq disabled to prevent changes to the clock */
1152         local_irq_save(flags);
1153         tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, native_read_tsc());
1154         kernel_ns = get_kernel_ns();
1155         this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1156         if (unlikely(this_tsc_khz == 0)) {
1157                 local_irq_restore(flags);
1158                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1159                 return 1;
1160         }
1161
1162         /*
1163          * We may have to catch up the TSC to match elapsed wall clock
1164          * time for two reasons, even if kvmclock is used.
1165          *   1) CPU could have been running below the maximum TSC rate
1166          *   2) Broken TSC compensation resets the base at each VCPU
1167          *      entry to avoid unknown leaps of TSC even when running
1168          *      again on the same CPU.  This may cause apparent elapsed
1169          *      time to disappear, and the guest to stand still or run
1170          *      very slowly.
1171          */
1172         if (vcpu->tsc_catchup) {
1173                 u64 tsc = compute_guest_tsc(v, kernel_ns);
1174                 if (tsc > tsc_timestamp) {
1175                         adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1176                         tsc_timestamp = tsc;
1177                 }
1178         }
1179
1180         local_irq_restore(flags);
1181
1182         if (!vcpu->time_page)
1183                 return 0;
1184
1185         /*
1186          * Time as measured by the TSC may go backwards when resetting the base
1187          * tsc_timestamp.  The reason for this is that the TSC resolution is
1188          * higher than the resolution of the other clock scales.  Thus, many
1189          * possible measurments of the TSC correspond to one measurement of any
1190          * other clock, and so a spread of values is possible.  This is not a
1191          * problem for the computation of the nanosecond clock; with TSC rates
1192          * around 1GHZ, there can only be a few cycles which correspond to one
1193          * nanosecond value, and any path through this code will inevitably
1194          * take longer than that.  However, with the kernel_ns value itself,
1195          * the precision may be much lower, down to HZ granularity.  If the
1196          * first sampling of TSC against kernel_ns ends in the low part of the
1197          * range, and the second in the high end of the range, we can get:
1198          *
1199          * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1200          *
1201          * As the sampling errors potentially range in the thousands of cycles,
1202          * it is possible such a time value has already been observed by the
1203          * guest.  To protect against this, we must compute the system time as
1204          * observed by the guest and ensure the new system time is greater.
1205          */
1206         max_kernel_ns = 0;
1207         if (vcpu->hv_clock.tsc_timestamp) {
1208                 max_kernel_ns = vcpu->last_guest_tsc -
1209                                 vcpu->hv_clock.tsc_timestamp;
1210                 max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1211                                     vcpu->hv_clock.tsc_to_system_mul,
1212                                     vcpu->hv_clock.tsc_shift);
1213                 max_kernel_ns += vcpu->last_kernel_ns;
1214         }
1215
1216         if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1217                 kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1218                                    &vcpu->hv_clock.tsc_shift,
1219                                    &vcpu->hv_clock.tsc_to_system_mul);
1220                 vcpu->hw_tsc_khz = this_tsc_khz;
1221         }
1222
1223         if (max_kernel_ns > kernel_ns)
1224                 kernel_ns = max_kernel_ns;
1225
1226         /* With all the info we got, fill in the values */
1227         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1228         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1229         vcpu->last_kernel_ns = kernel_ns;
1230         vcpu->last_guest_tsc = tsc_timestamp;
1231
1232
1233         /*
1234          * The interface expects us to write an even number signaling that the
1235          * update is finished. Since the guest won't see the intermediate
1236          * state, we just increase by 2 at the end.
1237          */
1238         vcpu->hv_clock.version += 2;
1239
1240         shared_kaddr = kmap_atomic(vcpu->time_page);
1241
1242         guest_hv_clock = shared_kaddr + vcpu->time_offset;
1243
1244         /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
1245         pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
1246
1247         if (vcpu->pvclock_set_guest_stopped_request) {
1248                 pvclock_flags |= PVCLOCK_GUEST_STOPPED;
1249                 vcpu->pvclock_set_guest_stopped_request = false;
1250         }
1251
1252         vcpu->hv_clock.flags = pvclock_flags;
1253
1254         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
1255                sizeof(vcpu->hv_clock));
1256
1257         kunmap_atomic(shared_kaddr);
1258
1259         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1260         return 0;
1261 }
1262
1263 static bool msr_mtrr_valid(unsigned msr)
1264 {
1265         switch (msr) {
1266         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1267         case MSR_MTRRfix64K_00000:
1268         case MSR_MTRRfix16K_80000:
1269         case MSR_MTRRfix16K_A0000:
1270         case MSR_MTRRfix4K_C0000:
1271         case MSR_MTRRfix4K_C8000:
1272         case MSR_MTRRfix4K_D0000:
1273         case MSR_MTRRfix4K_D8000:
1274         case MSR_MTRRfix4K_E0000:
1275         case MSR_MTRRfix4K_E8000:
1276         case MSR_MTRRfix4K_F0000:
1277         case MSR_MTRRfix4K_F8000:
1278         case MSR_MTRRdefType:
1279         case MSR_IA32_CR_PAT:
1280                 return true;
1281         case 0x2f8:
1282                 return true;
1283         }
1284         return false;
1285 }
1286
1287 static bool valid_pat_type(unsigned t)
1288 {
1289         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1290 }
1291
1292 static bool valid_mtrr_type(unsigned t)
1293 {
1294         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1295 }
1296
1297 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1298 {
1299         int i;
1300
1301         if (!msr_mtrr_valid(msr))
1302                 return false;
1303
1304         if (msr == MSR_IA32_CR_PAT) {
1305                 for (i = 0; i < 8; i++)
1306                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
1307                                 return false;
1308                 return true;
1309         } else if (msr == MSR_MTRRdefType) {
1310                 if (data & ~0xcff)
1311                         return false;
1312                 return valid_mtrr_type(data & 0xff);
1313         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1314                 for (i = 0; i < 8 ; i++)
1315                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1316                                 return false;
1317                 return true;
1318         }
1319
1320         /* variable MTRRs */
1321         return valid_mtrr_type(data & 0xff);
1322 }
1323
1324 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1325 {
1326         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1327
1328         if (!mtrr_valid(vcpu, msr, data))
1329                 return 1;
1330
1331         if (msr == MSR_MTRRdefType) {
1332                 vcpu->arch.mtrr_state.def_type = data;
1333                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1334         } else if (msr == MSR_MTRRfix64K_00000)
1335                 p[0] = data;
1336         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1337                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1338         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1339                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1340         else if (msr == MSR_IA32_CR_PAT)
1341                 vcpu->arch.pat = data;
1342         else {  /* Variable MTRRs */
1343                 int idx, is_mtrr_mask;
1344                 u64 *pt;
1345
1346                 idx = (msr - 0x200) / 2;
1347                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1348                 if (!is_mtrr_mask)
1349                         pt =
1350                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1351                 else
1352                         pt =
1353                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1354                 *pt = data;
1355         }
1356
1357         kvm_mmu_reset_context(vcpu);
1358         return 0;
1359 }
1360
1361 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1362 {
1363         u64 mcg_cap = vcpu->arch.mcg_cap;
1364         unsigned bank_num = mcg_cap & 0xff;
1365
1366         switch (msr) {
1367         case MSR_IA32_MCG_STATUS:
1368                 vcpu->arch.mcg_status = data;
1369                 break;
1370         case MSR_IA32_MCG_CTL:
1371                 if (!(mcg_cap & MCG_CTL_P))
1372                         return 1;
1373                 if (data != 0 && data != ~(u64)0)
1374                         return -1;
1375                 vcpu->arch.mcg_ctl = data;
1376                 break;
1377         default:
1378                 if (msr >= MSR_IA32_MC0_CTL &&
1379                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1380                         u32 offset = msr - MSR_IA32_MC0_CTL;
1381                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1382                          * some Linux kernels though clear bit 10 in bank 4 to
1383                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1384                          * this to avoid an uncatched #GP in the guest
1385                          */
1386                         if ((offset & 0x3) == 0 &&
1387                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1388                                 return -1;
1389                         vcpu->arch.mce_banks[offset] = data;
1390                         break;
1391                 }
1392                 return 1;
1393         }
1394         return 0;
1395 }
1396
1397 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1398 {
1399         struct kvm *kvm = vcpu->kvm;
1400         int lm = is_long_mode(vcpu);
1401         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1402                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1403         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1404                 : kvm->arch.xen_hvm_config.blob_size_32;
1405         u32 page_num = data & ~PAGE_MASK;
1406         u64 page_addr = data & PAGE_MASK;
1407         u8 *page;
1408         int r;
1409
1410         r = -E2BIG;
1411         if (page_num >= blob_size)
1412                 goto out;
1413         r = -ENOMEM;
1414         page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
1415         if (IS_ERR(page)) {
1416                 r = PTR_ERR(page);
1417                 goto out;
1418         }
1419         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1420                 goto out_free;
1421         r = 0;
1422 out_free:
1423         kfree(page);
1424 out:
1425         return r;
1426 }
1427
1428 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1429 {
1430         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1431 }
1432
1433 static bool kvm_hv_msr_partition_wide(u32 msr)
1434 {
1435         bool r = false;
1436         switch (msr) {
1437         case HV_X64_MSR_GUEST_OS_ID:
1438         case HV_X64_MSR_HYPERCALL:
1439                 r = true;
1440                 break;
1441         }
1442
1443         return r;
1444 }
1445
1446 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1447 {
1448         struct kvm *kvm = vcpu->kvm;
1449
1450         switch (msr) {
1451         case HV_X64_MSR_GUEST_OS_ID:
1452                 kvm->arch.hv_guest_os_id = data;
1453                 /* setting guest os id to zero disables hypercall page */
1454                 if (!kvm->arch.hv_guest_os_id)
1455                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1456                 break;
1457         case HV_X64_MSR_HYPERCALL: {
1458                 u64 gfn;
1459                 unsigned long addr;
1460                 u8 instructions[4];
1461
1462                 /* if guest os id is not set hypercall should remain disabled */
1463                 if (!kvm->arch.hv_guest_os_id)
1464                         break;
1465                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1466                         kvm->arch.hv_hypercall = data;
1467                         break;
1468                 }
1469                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1470                 addr = gfn_to_hva(kvm, gfn);
1471                 if (kvm_is_error_hva(addr))
1472                         return 1;
1473                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1474                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1475                 if (__copy_to_user((void __user *)addr, instructions, 4))
1476                         return 1;
1477                 kvm->arch.hv_hypercall = data;
1478                 break;
1479         }
1480         default:
1481                 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1482                             "data 0x%llx\n", msr, data);
1483                 return 1;
1484         }
1485         return 0;
1486 }
1487
1488 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1489 {
1490         switch (msr) {
1491         case HV_X64_MSR_APIC_ASSIST_PAGE: {
1492                 unsigned long addr;
1493
1494                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1495                         vcpu->arch.hv_vapic = data;
1496                         break;
1497                 }
1498                 addr = gfn_to_hva(vcpu->kvm, data >>
1499                                   HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1500                 if (kvm_is_error_hva(addr))
1501                         return 1;
1502                 if (__clear_user((void __user *)addr, PAGE_SIZE))
1503                         return 1;
1504                 vcpu->arch.hv_vapic = data;
1505                 break;
1506         }
1507         case HV_X64_MSR_EOI:
1508                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1509         case HV_X64_MSR_ICR:
1510                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1511         case HV_X64_MSR_TPR:
1512                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1513         default:
1514                 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1515                             "data 0x%llx\n", msr, data);
1516                 return 1;
1517         }
1518
1519         return 0;
1520 }
1521
1522 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1523 {
1524         gpa_t gpa = data & ~0x3f;
1525
1526         /* Bits 2:5 are reserved, Should be zero */
1527         if (data & 0x3c)
1528                 return 1;
1529
1530         vcpu->arch.apf.msr_val = data;
1531
1532         if (!(data & KVM_ASYNC_PF_ENABLED)) {
1533                 kvm_clear_async_pf_completion_queue(vcpu);
1534                 kvm_async_pf_hash_reset(vcpu);
1535                 return 0;
1536         }
1537
1538         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
1539                 return 1;
1540
1541         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1542         kvm_async_pf_wakeup_all(vcpu);
1543         return 0;
1544 }
1545
1546 static void kvmclock_reset(struct kvm_vcpu *vcpu)
1547 {
1548         if (vcpu->arch.time_page) {
1549                 kvm_release_page_dirty(vcpu->arch.time_page);
1550                 vcpu->arch.time_page = NULL;
1551         }
1552 }
1553
1554 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1555 {
1556         u64 delta;
1557
1558         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1559                 return;
1560
1561         delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1562         vcpu->arch.st.last_steal = current->sched_info.run_delay;
1563         vcpu->arch.st.accum_steal = delta;
1564 }
1565
1566 static void record_steal_time(struct kvm_vcpu *vcpu)
1567 {
1568         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1569                 return;
1570
1571         if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1572                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1573                 return;
1574
1575         vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1576         vcpu->arch.st.steal.version += 2;
1577         vcpu->arch.st.accum_steal = 0;
1578
1579         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1580                 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1581 }
1582
1583 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1584 {
1585         bool pr = false;
1586
1587         switch (msr) {
1588         case MSR_EFER:
1589                 return set_efer(vcpu, data);
1590         case MSR_K7_HWCR:
1591                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1592                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
1593                 data &= ~(u64)0x8;      /* ignore TLB cache disable */
1594                 if (data != 0) {
1595                         vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1596                                     data);
1597                         return 1;
1598                 }
1599                 break;
1600         case MSR_FAM10H_MMIO_CONF_BASE:
1601                 if (data != 0) {
1602                         vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1603                                     "0x%llx\n", data);
1604                         return 1;
1605                 }
1606                 break;
1607         case MSR_AMD64_NB_CFG:
1608                 break;
1609         case MSR_IA32_DEBUGCTLMSR:
1610                 if (!data) {
1611                         /* We support the non-activated case already */
1612                         break;
1613                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1614                         /* Values other than LBR and BTF are vendor-specific,
1615                            thus reserved and should throw a #GP */
1616                         return 1;
1617                 }
1618                 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1619                             __func__, data);
1620                 break;
1621         case MSR_IA32_UCODE_REV:
1622         case MSR_IA32_UCODE_WRITE:
1623         case MSR_VM_HSAVE_PA:
1624         case MSR_AMD64_PATCH_LOADER:
1625                 break;
1626         case 0x200 ... 0x2ff:
1627                 return set_msr_mtrr(vcpu, msr, data);
1628         case MSR_IA32_APICBASE:
1629                 kvm_set_apic_base(vcpu, data);
1630                 break;
1631         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1632                 return kvm_x2apic_msr_write(vcpu, msr, data);
1633         case MSR_IA32_TSCDEADLINE:
1634                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
1635                 break;
1636         case MSR_IA32_MISC_ENABLE:
1637                 vcpu->arch.ia32_misc_enable_msr = data;
1638                 break;
1639         case MSR_KVM_WALL_CLOCK_NEW:
1640         case MSR_KVM_WALL_CLOCK:
1641                 vcpu->kvm->arch.wall_clock = data;
1642                 kvm_write_wall_clock(vcpu->kvm, data);
1643                 break;
1644         case MSR_KVM_SYSTEM_TIME_NEW:
1645         case MSR_KVM_SYSTEM_TIME: {
1646                 kvmclock_reset(vcpu);
1647
1648                 vcpu->arch.time = data;
1649                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1650
1651                 /* we verify if the enable bit is set... */
1652                 if (!(data & 1))
1653                         break;
1654
1655                 /* ...but clean it before doing the actual write */
1656                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1657
1658                 vcpu->arch.time_page =
1659                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1660
1661                 if (is_error_page(vcpu->arch.time_page))
1662                         vcpu->arch.time_page = NULL;
1663
1664                 break;
1665         }
1666         case MSR_KVM_ASYNC_PF_EN:
1667                 if (kvm_pv_enable_async_pf(vcpu, data))
1668                         return 1;
1669                 break;
1670         case MSR_KVM_STEAL_TIME:
1671
1672                 if (unlikely(!sched_info_on()))
1673                         return 1;
1674
1675                 if (data & KVM_STEAL_RESERVED_MASK)
1676                         return 1;
1677
1678                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1679                                                         data & KVM_STEAL_VALID_BITS))
1680                         return 1;
1681
1682                 vcpu->arch.st.msr_val = data;
1683
1684                 if (!(data & KVM_MSR_ENABLED))
1685                         break;
1686
1687                 vcpu->arch.st.last_steal = current->sched_info.run_delay;
1688
1689                 preempt_disable();
1690                 accumulate_steal_time(vcpu);
1691                 preempt_enable();
1692
1693                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1694
1695                 break;
1696         case MSR_KVM_PV_EOI_EN:
1697                 if (kvm_lapic_enable_pv_eoi(vcpu, data))
1698                         return 1;
1699                 break;
1700
1701         case MSR_IA32_MCG_CTL:
1702         case MSR_IA32_MCG_STATUS:
1703         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1704                 return set_msr_mce(vcpu, msr, data);
1705
1706         /* Performance counters are not protected by a CPUID bit,
1707          * so we should check all of them in the generic path for the sake of
1708          * cross vendor migration.
1709          * Writing a zero into the event select MSRs disables them,
1710          * which we perfectly emulate ;-). Any other value should be at least
1711          * reported, some guests depend on them.
1712          */
1713         case MSR_K7_EVNTSEL0:
1714         case MSR_K7_EVNTSEL1:
1715         case MSR_K7_EVNTSEL2:
1716         case MSR_K7_EVNTSEL3:
1717                 if (data != 0)
1718                         vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1719                                     "0x%x data 0x%llx\n", msr, data);
1720                 break;
1721         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1722          * so we ignore writes to make it happy.
1723          */
1724         case MSR_K7_PERFCTR0:
1725         case MSR_K7_PERFCTR1:
1726         case MSR_K7_PERFCTR2:
1727         case MSR_K7_PERFCTR3:
1728                 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1729                             "0x%x data 0x%llx\n", msr, data);
1730                 break;
1731         case MSR_P6_PERFCTR0:
1732         case MSR_P6_PERFCTR1:
1733                 pr = true;
1734         case MSR_P6_EVNTSEL0:
1735         case MSR_P6_EVNTSEL1:
1736                 if (kvm_pmu_msr(vcpu, msr))
1737                         return kvm_pmu_set_msr(vcpu, msr, data);
1738
1739                 if (pr || data != 0)
1740                         vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
1741                                     "0x%x data 0x%llx\n", msr, data);
1742                 break;
1743         case MSR_K7_CLK_CTL:
1744                 /*
1745                  * Ignore all writes to this no longer documented MSR.
1746                  * Writes are only relevant for old K7 processors,
1747                  * all pre-dating SVM, but a recommended workaround from
1748                  * AMD for these chips. It is possible to specify the
1749                  * affected processor models on the command line, hence
1750                  * the need to ignore the workaround.
1751                  */
1752                 break;
1753         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1754                 if (kvm_hv_msr_partition_wide(msr)) {
1755                         int r;
1756                         mutex_lock(&vcpu->kvm->lock);
1757                         r = set_msr_hyperv_pw(vcpu, msr, data);
1758                         mutex_unlock(&vcpu->kvm->lock);
1759                         return r;
1760                 } else
1761                         return set_msr_hyperv(vcpu, msr, data);
1762                 break;
1763         case MSR_IA32_BBL_CR_CTL3:
1764                 /* Drop writes to this legacy MSR -- see rdmsr
1765                  * counterpart for further detail.
1766                  */
1767                 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
1768                 break;
1769         case MSR_AMD64_OSVW_ID_LENGTH:
1770                 if (!guest_cpuid_has_osvw(vcpu))
1771                         return 1;
1772                 vcpu->arch.osvw.length = data;
1773                 break;
1774         case MSR_AMD64_OSVW_STATUS:
1775                 if (!guest_cpuid_has_osvw(vcpu))
1776                         return 1;
1777                 vcpu->arch.osvw.status = data;
1778                 break;
1779         default:
1780                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1781                         return xen_hvm_config(vcpu, data);
1782                 if (kvm_pmu_msr(vcpu, msr))
1783                         return kvm_pmu_set_msr(vcpu, msr, data);
1784                 if (!ignore_msrs) {
1785                         vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1786                                     msr, data);
1787                         return 1;
1788                 } else {
1789                         vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1790                                     msr, data);
1791                         break;
1792                 }
1793         }
1794         return 0;
1795 }
1796 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1797
1798
1799 /*
1800  * Reads an msr value (of 'msr_index') into 'pdata'.
1801  * Returns 0 on success, non-0 otherwise.
1802  * Assumes vcpu_load() was already called.
1803  */
1804 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1805 {
1806         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1807 }
1808
1809 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1810 {
1811         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1812
1813         if (!msr_mtrr_valid(msr))
1814                 return 1;
1815
1816         if (msr == MSR_MTRRdefType)
1817                 *pdata = vcpu->arch.mtrr_state.def_type +
1818                          (vcpu->arch.mtrr_state.enabled << 10);
1819         else if (msr == MSR_MTRRfix64K_00000)
1820                 *pdata = p[0];
1821         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1822                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1823         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1824                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1825         else if (msr == MSR_IA32_CR_PAT)
1826                 *pdata = vcpu->arch.pat;
1827         else {  /* Variable MTRRs */
1828                 int idx, is_mtrr_mask;
1829                 u64 *pt;
1830
1831                 idx = (msr - 0x200) / 2;
1832                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1833                 if (!is_mtrr_mask)
1834                         pt =
1835                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1836                 else
1837                         pt =
1838                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1839                 *pdata = *pt;
1840         }
1841
1842         return 0;
1843 }
1844
1845 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1846 {
1847         u64 data;
1848         u64 mcg_cap = vcpu->arch.mcg_cap;
1849         unsigned bank_num = mcg_cap & 0xff;
1850
1851         switch (msr) {
1852         case MSR_IA32_P5_MC_ADDR:
1853         case MSR_IA32_P5_MC_TYPE:
1854                 data = 0;
1855                 break;
1856         case MSR_IA32_MCG_CAP:
1857                 data = vcpu->arch.mcg_cap;
1858                 break;
1859         case MSR_IA32_MCG_CTL:
1860                 if (!(mcg_cap & MCG_CTL_P))
1861                         return 1;
1862                 data = vcpu->arch.mcg_ctl;
1863                 break;
1864         case MSR_IA32_MCG_STATUS:
1865                 data = vcpu->arch.mcg_status;
1866                 break;
1867         default:
1868                 if (msr >= MSR_IA32_MC0_CTL &&
1869                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1870                         u32 offset = msr - MSR_IA32_MC0_CTL;
1871                         data = vcpu->arch.mce_banks[offset];
1872                         break;
1873                 }
1874                 return 1;
1875         }
1876         *pdata = data;
1877         return 0;
1878 }
1879
1880 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1881 {
1882         u64 data = 0;
1883         struct kvm *kvm = vcpu->kvm;
1884
1885         switch (msr) {
1886         case HV_X64_MSR_GUEST_OS_ID:
1887                 data = kvm->arch.hv_guest_os_id;
1888                 break;
1889         case HV_X64_MSR_HYPERCALL:
1890                 data = kvm->arch.hv_hypercall;
1891                 break;
1892         default:
1893                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1894                 return 1;
1895         }
1896
1897         *pdata = data;
1898         return 0;
1899 }
1900
1901 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1902 {
1903         u64 data = 0;
1904
1905         switch (msr) {
1906         case HV_X64_MSR_VP_INDEX: {
1907                 int r;
1908                 struct kvm_vcpu *v;
1909                 kvm_for_each_vcpu(r, v, vcpu->kvm)
1910                         if (v == vcpu)
1911                                 data = r;
1912                 break;
1913         }
1914         case HV_X64_MSR_EOI:
1915                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1916         case HV_X64_MSR_ICR:
1917                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1918         case HV_X64_MSR_TPR:
1919                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1920         case HV_X64_MSR_APIC_ASSIST_PAGE:
1921                 data = vcpu->arch.hv_vapic;
1922                 break;
1923         default:
1924                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1925                 return 1;
1926         }
1927         *pdata = data;
1928         return 0;
1929 }
1930
1931 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1932 {
1933         u64 data;
1934
1935         switch (msr) {
1936         case MSR_IA32_PLATFORM_ID:
1937         case MSR_IA32_EBL_CR_POWERON:
1938         case MSR_IA32_DEBUGCTLMSR:
1939         case MSR_IA32_LASTBRANCHFROMIP:
1940         case MSR_IA32_LASTBRANCHTOIP:
1941         case MSR_IA32_LASTINTFROMIP:
1942         case MSR_IA32_LASTINTTOIP:
1943         case MSR_K8_SYSCFG:
1944         case MSR_K7_HWCR:
1945         case MSR_VM_HSAVE_PA:
1946         case MSR_K7_EVNTSEL0:
1947         case MSR_K7_PERFCTR0:
1948         case MSR_K8_INT_PENDING_MSG:
1949         case MSR_AMD64_NB_CFG:
1950         case MSR_FAM10H_MMIO_CONF_BASE:
1951                 data = 0;
1952                 break;
1953         case MSR_P6_PERFCTR0:
1954         case MSR_P6_PERFCTR1:
1955         case MSR_P6_EVNTSEL0:
1956         case MSR_P6_EVNTSEL1:
1957                 if (kvm_pmu_msr(vcpu, msr))
1958                         return kvm_pmu_get_msr(vcpu, msr, pdata);
1959                 data = 0;
1960                 break;
1961         case MSR_IA32_UCODE_REV:
1962                 data = 0x100000000ULL;
1963                 break;
1964         case MSR_MTRRcap:
1965                 data = 0x500 | KVM_NR_VAR_MTRR;
1966                 break;
1967         case 0x200 ... 0x2ff:
1968                 return get_msr_mtrr(vcpu, msr, pdata);
1969         case 0xcd: /* fsb frequency */
1970                 data = 3;
1971                 break;
1972                 /*
1973                  * MSR_EBC_FREQUENCY_ID
1974                  * Conservative value valid for even the basic CPU models.
1975                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1976                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1977                  * and 266MHz for model 3, or 4. Set Core Clock
1978                  * Frequency to System Bus Frequency Ratio to 1 (bits
1979                  * 31:24) even though these are only valid for CPU
1980                  * models > 2, however guests may end up dividing or
1981                  * multiplying by zero otherwise.
1982                  */
1983         case MSR_EBC_FREQUENCY_ID:
1984                 data = 1 << 24;
1985                 break;
1986         case MSR_IA32_APICBASE:
1987                 data = kvm_get_apic_base(vcpu);
1988                 break;
1989         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1990                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1991                 break;
1992         case MSR_IA32_TSCDEADLINE:
1993                 data = kvm_get_lapic_tscdeadline_msr(vcpu);
1994                 break;
1995         case MSR_IA32_MISC_ENABLE:
1996                 data = vcpu->arch.ia32_misc_enable_msr;
1997                 break;
1998         case MSR_IA32_PERF_STATUS:
1999                 /* TSC increment by tick */
2000                 data = 1000ULL;
2001                 /* CPU multiplier */
2002                 data |= (((uint64_t)4ULL) << 40);
2003                 break;
2004         case MSR_EFER:
2005                 data = vcpu->arch.efer;
2006                 break;
2007         case MSR_KVM_WALL_CLOCK:
2008         case MSR_KVM_WALL_CLOCK_NEW:
2009                 data = vcpu->kvm->arch.wall_clock;
2010                 break;
2011         case MSR_KVM_SYSTEM_TIME:
2012         case MSR_KVM_SYSTEM_TIME_NEW:
2013                 data = vcpu->arch.time;
2014                 break;
2015         case MSR_KVM_ASYNC_PF_EN:
2016                 data = vcpu->arch.apf.msr_val;
2017                 break;
2018         case MSR_KVM_STEAL_TIME:
2019                 data = vcpu->arch.st.msr_val;
2020                 break;
2021         case MSR_KVM_PV_EOI_EN:
2022                 data = vcpu->arch.pv_eoi.msr_val;
2023                 break;
2024         case MSR_IA32_P5_MC_ADDR:
2025         case MSR_IA32_P5_MC_TYPE:
2026         case MSR_IA32_MCG_CAP:
2027         case MSR_IA32_MCG_CTL:
2028         case MSR_IA32_MCG_STATUS:
2029         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
2030                 return get_msr_mce(vcpu, msr, pdata);
2031         case MSR_K7_CLK_CTL:
2032                 /*
2033                  * Provide expected ramp-up count for K7. All other
2034                  * are set to zero, indicating minimum divisors for
2035                  * every field.
2036                  *
2037                  * This prevents guest kernels on AMD host with CPU
2038                  * type 6, model 8 and higher from exploding due to
2039                  * the rdmsr failing.
2040                  */
2041                 data = 0x20000000;
2042                 break;
2043         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2044                 if (kvm_hv_msr_partition_wide(msr)) {
2045                         int r;
2046                         mutex_lock(&vcpu->kvm->lock);
2047                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
2048                         mutex_unlock(&vcpu->kvm->lock);
2049                         return r;
2050                 } else
2051                         return get_msr_hyperv(vcpu, msr, pdata);
2052                 break;
2053         case MSR_IA32_BBL_CR_CTL3:
2054                 /* This legacy MSR exists but isn't fully documented in current
2055                  * silicon.  It is however accessed by winxp in very narrow
2056                  * scenarios where it sets bit #19, itself documented as
2057                  * a "reserved" bit.  Best effort attempt to source coherent
2058                  * read data here should the balance of the register be
2059                  * interpreted by the guest:
2060                  *
2061                  * L2 cache control register 3: 64GB range, 256KB size,
2062                  * enabled, latency 0x1, configured
2063                  */
2064                 data = 0xbe702111;
2065                 break;
2066         case MSR_AMD64_OSVW_ID_LENGTH:
2067                 if (!guest_cpuid_has_osvw(vcpu))
2068                         return 1;
2069                 data = vcpu->arch.osvw.length;
2070                 break;
2071         case MSR_AMD64_OSVW_STATUS:
2072                 if (!guest_cpuid_has_osvw(vcpu))
2073                         return 1;
2074                 data = vcpu->arch.osvw.status;
2075                 break;
2076         default:
2077                 if (kvm_pmu_msr(vcpu, msr))
2078                         return kvm_pmu_get_msr(vcpu, msr, pdata);
2079                 if (!ignore_msrs) {
2080                         vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
2081                         return 1;
2082                 } else {
2083                         vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
2084                         data = 0;
2085                 }
2086                 break;
2087         }
2088         *pdata = data;
2089         return 0;
2090 }
2091 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2092
2093 /*
2094  * Read or write a bunch of msrs. All parameters are kernel addresses.
2095  *
2096  * @return number of msrs set successfully.
2097  */
2098 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2099                     struct kvm_msr_entry *entries,
2100                     int (*do_msr)(struct kvm_vcpu *vcpu,
2101                                   unsigned index, u64 *data))
2102 {
2103         int i, idx;
2104
2105         idx = srcu_read_lock(&vcpu->kvm->srcu);
2106         for (i = 0; i < msrs->nmsrs; ++i)
2107                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2108                         break;
2109         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2110
2111         return i;
2112 }
2113
2114 /*
2115  * Read or write a bunch of msrs. Parameters are user addresses.
2116  *
2117  * @return number of msrs set successfully.
2118  */
2119 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2120                   int (*do_msr)(struct kvm_vcpu *vcpu,
2121                                 unsigned index, u64 *data),
2122                   int writeback)
2123 {
2124         struct kvm_msrs msrs;
2125         struct kvm_msr_entry *entries;
2126         int r, n;
2127         unsigned size;
2128
2129         r = -EFAULT;
2130         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2131                 goto out;
2132
2133         r = -E2BIG;
2134         if (msrs.nmsrs >= MAX_IO_MSRS)
2135                 goto out;
2136
2137         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2138         entries = memdup_user(user_msrs->entries, size);
2139         if (IS_ERR(entries)) {
2140                 r = PTR_ERR(entries);
2141                 goto out;
2142         }
2143
2144         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2145         if (r < 0)
2146                 goto out_free;
2147
2148         r = -EFAULT;
2149         if (writeback && copy_to_user(user_msrs->entries, entries, size))
2150                 goto out_free;
2151
2152         r = n;
2153
2154 out_free:
2155         kfree(entries);
2156 out:
2157         return r;
2158 }
2159
2160 int kvm_dev_ioctl_check_extension(long ext)
2161 {
2162         int r;
2163
2164         switch (ext) {
2165         case KVM_CAP_IRQCHIP:
2166         case KVM_CAP_HLT:
2167         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2168         case KVM_CAP_SET_TSS_ADDR:
2169         case KVM_CAP_EXT_CPUID:
2170         case KVM_CAP_CLOCKSOURCE:
2171         case KVM_CAP_PIT:
2172         case KVM_CAP_NOP_IO_DELAY:
2173         case KVM_CAP_MP_STATE:
2174         case KVM_CAP_SYNC_MMU:
2175         case KVM_CAP_USER_NMI:
2176         case KVM_CAP_REINJECT_CONTROL:
2177         case KVM_CAP_IRQ_INJECT_STATUS:
2178         case KVM_CAP_ASSIGN_DEV_IRQ:
2179         case KVM_CAP_IRQFD:
2180         case KVM_CAP_IOEVENTFD:
2181         case KVM_CAP_PIT2:
2182         case KVM_CAP_PIT_STATE2:
2183         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2184         case KVM_CAP_XEN_HVM:
2185         case KVM_CAP_ADJUST_CLOCK:
2186         case KVM_CAP_VCPU_EVENTS:
2187         case KVM_CAP_HYPERV:
2188         case KVM_CAP_HYPERV_VAPIC:
2189         case KVM_CAP_HYPERV_SPIN:
2190         case KVM_CAP_PCI_SEGMENT:
2191         case KVM_CAP_DEBUGREGS:
2192         case KVM_CAP_X86_ROBUST_SINGLESTEP:
2193         case KVM_CAP_XSAVE:
2194         case KVM_CAP_ASYNC_PF:
2195         case KVM_CAP_GET_TSC_KHZ:
2196         case KVM_CAP_PCI_2_3:
2197         case KVM_CAP_KVMCLOCK_CTRL:
2198         case KVM_CAP_READONLY_MEM:
2199         case KVM_CAP_IRQFD_RESAMPLE:
2200                 r = 1;
2201                 break;
2202         case KVM_CAP_COALESCED_MMIO:
2203                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2204                 break;
2205         case KVM_CAP_VAPIC:
2206                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2207                 break;
2208         case KVM_CAP_NR_VCPUS:
2209                 r = KVM_SOFT_MAX_VCPUS;
2210                 break;
2211         case KVM_CAP_MAX_VCPUS:
2212                 r = KVM_MAX_VCPUS;
2213                 break;
2214         case KVM_CAP_NR_MEMSLOTS:
2215                 r = KVM_MEMORY_SLOTS;
2216                 break;
2217         case KVM_CAP_PV_MMU:    /* obsolete */
2218                 r = 0;
2219                 break;
2220         case KVM_CAP_IOMMU:
2221                 r = iommu_present(&pci_bus_type);
2222                 break;
2223         case KVM_CAP_MCE:
2224                 r = KVM_MAX_MCE_BANKS;
2225                 break;
2226         case KVM_CAP_XCRS:
2227                 r = cpu_has_xsave;
2228                 break;
2229         case KVM_CAP_TSC_CONTROL:
2230                 r = kvm_has_tsc_control;
2231                 break;
2232         case KVM_CAP_TSC_DEADLINE_TIMER:
2233                 r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
2234                 break;
2235         default:
2236                 r = 0;
2237                 break;
2238         }
2239         return r;
2240
2241 }
2242
2243 long kvm_arch_dev_ioctl(struct file *filp,
2244                         unsigned int ioctl, unsigned long arg)
2245 {
2246         void __user *argp = (void __user *)arg;
2247         long r;
2248
2249         switch (ioctl) {
2250         case KVM_GET_MSR_INDEX_LIST: {
2251                 struct kvm_msr_list __user *user_msr_list = argp;
2252                 struct kvm_msr_list msr_list;
2253                 unsigned n;
2254
2255                 r = -EFAULT;
2256                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2257                         goto out;
2258                 n = msr_list.nmsrs;
2259                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2260                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2261                         goto out;
2262                 r = -E2BIG;
2263                 if (n < msr_list.nmsrs)
2264                         goto out;
2265                 r = -EFAULT;
2266                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2267                                  num_msrs_to_save * sizeof(u32)))
2268                         goto out;
2269                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2270                                  &emulated_msrs,
2271                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2272                         goto out;
2273                 r = 0;
2274                 break;
2275         }
2276         case KVM_GET_SUPPORTED_CPUID: {
2277                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2278                 struct kvm_cpuid2 cpuid;
2279
2280                 r = -EFAULT;
2281                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2282                         goto out;
2283                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
2284                                                       cpuid_arg->entries);
2285                 if (r)
2286                         goto out;
2287
2288                 r = -EFAULT;
2289                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2290                         goto out;
2291                 r = 0;
2292                 break;
2293         }
2294         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2295                 u64 mce_cap;
2296
2297                 mce_cap = KVM_MCE_CAP_SUPPORTED;
2298                 r = -EFAULT;
2299                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2300                         goto out;
2301                 r = 0;
2302                 break;
2303         }
2304         default:
2305                 r = -EINVAL;
2306         }
2307 out:
2308         return r;
2309 }
2310
2311 static void wbinvd_ipi(void *garbage)
2312 {
2313         wbinvd();
2314 }
2315
2316 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2317 {
2318         return vcpu->kvm->arch.iommu_domain &&
2319                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
2320 }
2321
2322 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2323 {
2324         /* Address WBINVD may be executed by guest */
2325         if (need_emulate_wbinvd(vcpu)) {
2326                 if (kvm_x86_ops->has_wbinvd_exit())
2327                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2328                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2329                         smp_call_function_single(vcpu->cpu,
2330                                         wbinvd_ipi, NULL, 1);
2331         }
2332
2333         kvm_x86_ops->vcpu_load(vcpu, cpu);
2334
2335         /* Apply any externally detected TSC adjustments (due to suspend) */
2336         if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2337                 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2338                 vcpu->arch.tsc_offset_adjustment = 0;
2339                 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
2340         }
2341
2342         if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2343                 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2344                                 native_read_tsc() - vcpu->arch.last_host_tsc;
2345                 if (tsc_delta < 0)
2346                         mark_tsc_unstable("KVM discovered backwards TSC");
2347                 if (check_tsc_unstable()) {
2348                         u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
2349                                                 vcpu->arch.last_guest_tsc);
2350                         kvm_x86_ops->write_tsc_offset(vcpu, offset);
2351                         vcpu->arch.tsc_catchup = 1;
2352                 }
2353                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2354                 if (vcpu->cpu != cpu)
2355                         kvm_migrate_timers(vcpu);
2356                 vcpu->cpu = cpu;
2357         }
2358
2359         accumulate_steal_time(vcpu);
2360         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2361 }
2362
2363 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2364 {
2365         kvm_x86_ops->vcpu_put(vcpu);
2366         kvm_put_guest_fpu(vcpu);
2367         vcpu->arch.last_host_tsc = native_read_tsc();
2368 }
2369
2370 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2371                                     struct kvm_lapic_state *s)
2372 {
2373         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2374
2375         return 0;
2376 }
2377
2378 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2379                                     struct kvm_lapic_state *s)
2380 {
2381         kvm_apic_post_state_restore(vcpu, s);
2382         update_cr8_intercept(vcpu);
2383
2384         return 0;
2385 }
2386
2387 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2388                                     struct kvm_interrupt *irq)
2389 {
2390         if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
2391                 return -EINVAL;
2392         if (irqchip_in_kernel(vcpu->kvm))
2393                 return -ENXIO;
2394
2395         kvm_queue_interrupt(vcpu, irq->irq, false);
2396         kvm_make_request(KVM_REQ_EVENT, vcpu);
2397
2398         return 0;
2399 }
2400
2401 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2402 {
2403         kvm_inject_nmi(vcpu);
2404
2405         return 0;
2406 }
2407
2408 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2409                                            struct kvm_tpr_access_ctl *tac)
2410 {
2411         if (tac->flags)
2412                 return -EINVAL;
2413         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2414         return 0;
2415 }
2416
2417 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2418                                         u64 mcg_cap)
2419 {
2420         int r;
2421         unsigned bank_num = mcg_cap & 0xff, bank;
2422
2423         r = -EINVAL;
2424         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2425                 goto out;
2426         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2427                 goto out;
2428         r = 0;
2429         vcpu->arch.mcg_cap = mcg_cap;
2430         /* Init IA32_MCG_CTL to all 1s */
2431         if (mcg_cap & MCG_CTL_P)
2432                 vcpu->arch.mcg_ctl = ~(u64)0;
2433         /* Init IA32_MCi_CTL to all 1s */
2434         for (bank = 0; bank < bank_num; bank++)
2435                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2436 out:
2437         return r;
2438 }
2439
2440 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2441                                       struct kvm_x86_mce *mce)
2442 {
2443         u64 mcg_cap = vcpu->arch.mcg_cap;
2444         unsigned bank_num = mcg_cap & 0xff;
2445         u64 *banks = vcpu->arch.mce_banks;
2446
2447         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2448                 return -EINVAL;
2449         /*
2450          * if IA32_MCG_CTL is not all 1s, the uncorrected error
2451          * reporting is disabled
2452          */
2453         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2454             vcpu->arch.mcg_ctl != ~(u64)0)
2455                 return 0;
2456         banks += 4 * mce->bank;
2457         /*
2458          * if IA32_MCi_CTL is not all 1s, the uncorrected error
2459          * reporting is disabled for the bank
2460          */
2461         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2462                 return 0;
2463         if (mce->status & MCI_STATUS_UC) {
2464                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2465                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2466                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2467                         return 0;
2468                 }
2469                 if (banks[1] & MCI_STATUS_VAL)
2470                         mce->status |= MCI_STATUS_OVER;
2471                 banks[2] = mce->addr;
2472                 banks[3] = mce->misc;
2473                 vcpu->arch.mcg_status = mce->mcg_status;
2474                 banks[1] = mce->status;
2475                 kvm_queue_exception(vcpu, MC_VECTOR);
2476         } else if (!(banks[1] & MCI_STATUS_VAL)
2477                    || !(banks[1] & MCI_STATUS_UC)) {
2478                 if (banks[1] & MCI_STATUS_VAL)
2479                         mce->status |= MCI_STATUS_OVER;
2480                 banks[2] = mce->addr;
2481                 banks[3] = mce->misc;
2482                 banks[1] = mce->status;
2483         } else
2484                 banks[1] |= MCI_STATUS_OVER;
2485         return 0;
2486 }
2487
2488 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2489                                                struct kvm_vcpu_events *events)
2490 {
2491         process_nmi(vcpu);
2492         events->exception.injected =
2493                 vcpu->arch.exception.pending &&
2494                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2495         events->exception.nr = vcpu->arch.exception.nr;
2496         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2497         events->exception.pad = 0;
2498         events->exception.error_code = vcpu->arch.exception.error_code;
2499
2500         events->interrupt.injected =
2501                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2502         events->interrupt.nr = vcpu->arch.interrupt.nr;
2503         events->interrupt.soft = 0;
2504         events->interrupt.shadow =
2505                 kvm_x86_ops->get_interrupt_shadow(vcpu,
2506                         KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2507
2508         events->nmi.injected = vcpu->arch.nmi_injected;
2509         events->nmi.pending = vcpu->arch.nmi_pending != 0;
2510         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2511         events->nmi.pad = 0;
2512
2513         events->sipi_vector = vcpu->arch.sipi_vector;
2514
2515         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2516                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2517                          | KVM_VCPUEVENT_VALID_SHADOW);
2518         memset(&events->reserved, 0, sizeof(events->reserved));
2519 }
2520
2521 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2522                                               struct kvm_vcpu_events *events)
2523 {
2524         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2525                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2526                               | KVM_VCPUEVENT_VALID_SHADOW))
2527                 return -EINVAL;
2528
2529         process_nmi(vcpu);
2530         vcpu->arch.exception.pending = events->exception.injected;
2531         vcpu->arch.exception.nr = events->exception.nr;
2532         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2533         vcpu->arch.exception.error_code = events->exception.error_code;
2534
2535         vcpu->arch.interrupt.pending = events->interrupt.injected;
2536         vcpu->arch.interrupt.nr = events->interrupt.nr;
2537         vcpu->arch.interrupt.soft = events->interrupt.soft;
2538         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2539                 kvm_x86_ops->set_interrupt_shadow(vcpu,
2540                                                   events->interrupt.shadow);
2541
2542         vcpu->arch.nmi_injected = events->nmi.injected;
2543         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2544                 vcpu->arch.nmi_pending = events->nmi.pending;
2545         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2546
2547         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2548                 vcpu->arch.sipi_vector = events->sipi_vector;
2549
2550         kvm_make_request(KVM_REQ_EVENT, vcpu);
2551
2552         return 0;
2553 }
2554
2555 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2556                                              struct kvm_debugregs *dbgregs)
2557 {
2558         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2559         dbgregs->dr6 = vcpu->arch.dr6;
2560         dbgregs->dr7 = vcpu->arch.dr7;
2561         dbgregs->flags = 0;
2562         memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2563 }
2564
2565 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2566                                             struct kvm_debugregs *dbgregs)
2567 {
2568         if (dbgregs->flags)
2569                 return -EINVAL;
2570
2571         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2572         vcpu->arch.dr6 = dbgregs->dr6;
2573         vcpu->arch.dr7 = dbgregs->dr7;
2574
2575         return 0;
2576 }
2577
2578 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2579                                          struct kvm_xsave *guest_xsave)
2580 {
2581         if (cpu_has_xsave)
2582                 memcpy(guest_xsave->region,
2583                         &vcpu->arch.guest_fpu.state->xsave,
2584                         xstate_size);
2585         else {
2586                 memcpy(guest_xsave->region,
2587                         &vcpu->arch.guest_fpu.state->fxsave,
2588                         sizeof(struct i387_fxsave_struct));
2589                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2590                         XSTATE_FPSSE;
2591         }
2592 }
2593
2594 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2595                                         struct kvm_xsave *guest_xsave)
2596 {
2597         u64 xstate_bv =
2598                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2599
2600         if (cpu_has_xsave)
2601                 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2602                         guest_xsave->region, xstate_size);
2603         else {
2604                 if (xstate_bv & ~XSTATE_FPSSE)
2605                         return -EINVAL;
2606                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2607                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
2608         }
2609         return 0;
2610 }
2611
2612 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2613                                         struct kvm_xcrs *guest_xcrs)
2614 {
2615         if (!cpu_has_xsave) {
2616                 guest_xcrs->nr_xcrs = 0;
2617                 return;
2618         }
2619
2620         guest_xcrs->nr_xcrs = 1;
2621         guest_xcrs->flags = 0;
2622         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2623         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2624 }
2625
2626 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2627                                        struct kvm_xcrs *guest_xcrs)
2628 {
2629         int i, r = 0;
2630
2631         if (!cpu_has_xsave)
2632                 return -EINVAL;
2633
2634         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2635                 return -EINVAL;
2636
2637         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2638                 /* Only support XCR0 currently */
2639                 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2640                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2641                                 guest_xcrs->xcrs[0].value);
2642                         break;
2643                 }
2644         if (r)
2645                 r = -EINVAL;
2646         return r;
2647 }
2648
2649 /*
2650  * kvm_set_guest_paused() indicates to the guest kernel that it has been
2651  * stopped by the hypervisor.  This function will be called from the host only.
2652  * EINVAL is returned when the host attempts to set the flag for a guest that
2653  * does not support pv clocks.
2654  */
2655 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
2656 {
2657         if (!vcpu->arch.time_page)
2658                 return -EINVAL;
2659         vcpu->arch.pvclock_set_guest_stopped_request = true;
2660         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2661         return 0;
2662 }
2663
2664 long kvm_arch_vcpu_ioctl(struct file *filp,
2665                          unsigned int ioctl, unsigned long arg)
2666 {
2667         struct kvm_vcpu *vcpu = filp->private_data;
2668         void __user *argp = (void __user *)arg;
2669         int r;
2670         union {
2671                 struct kvm_lapic_state *lapic;
2672                 struct kvm_xsave *xsave;
2673                 struct kvm_xcrs *xcrs;
2674                 void *buffer;
2675         } u;
2676
2677         u.buffer = NULL;
2678         switch (ioctl) {
2679         case KVM_GET_LAPIC: {
2680                 r = -EINVAL;
2681                 if (!vcpu->arch.apic)
2682                         goto out;
2683                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2684
2685                 r = -ENOMEM;
2686                 if (!u.lapic)
2687                         goto out;
2688                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2689                 if (r)
2690                         goto out;
2691                 r = -EFAULT;
2692                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2693                         goto out;
2694                 r = 0;
2695                 break;
2696         }
2697         case KVM_SET_LAPIC: {
2698                 if (!vcpu->arch.apic)
2699                         goto out;
2700                 u.lapic = memdup_user(argp, sizeof(*u.lapic));
2701                 if (IS_ERR(u.lapic))
2702                         return PTR_ERR(u.lapic);
2703
2704                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2705                 break;
2706         }
2707         case KVM_INTERRUPT: {
2708                 struct kvm_interrupt irq;
2709
2710                 r = -EFAULT;
2711                 if (copy_from_user(&irq, argp, sizeof irq))
2712                         goto out;
2713                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2714                 break;
2715         }
2716         case KVM_NMI: {
2717                 r = kvm_vcpu_ioctl_nmi(vcpu);
2718                 break;
2719         }
2720         case KVM_SET_CPUID: {
2721                 struct kvm_cpuid __user *cpuid_arg = argp;
2722                 struct kvm_cpuid cpuid;
2723
2724                 r = -EFAULT;
2725                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2726                         goto out;
2727                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2728                 break;
2729         }
2730         case KVM_SET_CPUID2: {
2731                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2732                 struct kvm_cpuid2 cpuid;
2733
2734                 r = -EFAULT;
2735                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2736                         goto out;
2737                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2738                                               cpuid_arg->entries);
2739                 break;
2740         }
2741         case KVM_GET_CPUID2: {
2742                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2743                 struct kvm_cpuid2 cpuid;
2744
2745                 r = -EFAULT;
2746                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2747                         goto out;
2748                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2749                                               cpuid_arg->entries);
2750                 if (r)
2751                         goto out;
2752                 r = -EFAULT;
2753                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2754                         goto out;
2755                 r = 0;
2756                 break;
2757         }
2758         case KVM_GET_MSRS:
2759                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2760                 break;
2761         case KVM_SET_MSRS:
2762                 r = msr_io(vcpu, argp, do_set_msr, 0);
2763                 break;
2764         case KVM_TPR_ACCESS_REPORTING: {
2765                 struct kvm_tpr_access_ctl tac;
2766
2767                 r = -EFAULT;
2768                 if (copy_from_user(&tac, argp, sizeof tac))
2769                         goto out;
2770                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2771                 if (r)
2772                         goto out;
2773                 r = -EFAULT;
2774                 if (copy_to_user(argp, &tac, sizeof tac))
2775                         goto out;
2776                 r = 0;
2777                 break;
2778         };
2779         case KVM_SET_VAPIC_ADDR: {
2780                 struct kvm_vapic_addr va;
2781
2782                 r = -EINVAL;
2783                 if (!irqchip_in_kernel(vcpu->kvm))
2784                         goto out;
2785                 r = -EFAULT;
2786                 if (copy_from_user(&va, argp, sizeof va))
2787                         goto out;
2788                 r = 0;
2789                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2790                 break;
2791         }
2792         case KVM_X86_SETUP_MCE: {
2793                 u64 mcg_cap;
2794
2795                 r = -EFAULT;
2796                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2797                         goto out;
2798                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2799                 break;
2800         }
2801         case KVM_X86_SET_MCE: {
2802                 struct kvm_x86_mce mce;
2803
2804                 r = -EFAULT;
2805                 if (copy_from_user(&mce, argp, sizeof mce))
2806                         goto out;
2807                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2808                 break;
2809         }
2810         case KVM_GET_VCPU_EVENTS: {
2811                 struct kvm_vcpu_events events;
2812
2813                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2814
2815                 r = -EFAULT;
2816                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2817                         break;
2818                 r = 0;
2819                 break;
2820         }
2821         case KVM_SET_VCPU_EVENTS: {
2822                 struct kvm_vcpu_events events;
2823
2824                 r = -EFAULT;
2825                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2826                         break;
2827
2828                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2829                 break;
2830         }
2831         case KVM_GET_DEBUGREGS: {
2832                 struct kvm_debugregs dbgregs;
2833
2834                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2835
2836                 r = -EFAULT;
2837                 if (copy_to_user(argp, &dbgregs,
2838                                  sizeof(struct kvm_debugregs)))
2839                         break;
2840                 r = 0;
2841                 break;
2842         }
2843         case KVM_SET_DEBUGREGS: {
2844                 struct kvm_debugregs dbgregs;
2845
2846                 r = -EFAULT;
2847                 if (copy_from_user(&dbgregs, argp,
2848                                    sizeof(struct kvm_debugregs)))
2849                         break;
2850
2851                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2852                 break;
2853         }
2854         case KVM_GET_XSAVE: {
2855                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2856                 r = -ENOMEM;
2857                 if (!u.xsave)
2858                         break;
2859
2860                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2861
2862                 r = -EFAULT;
2863                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2864                         break;
2865                 r = 0;
2866                 break;
2867         }
2868         case KVM_SET_XSAVE: {
2869                 u.xsave = memdup_user(argp, sizeof(*u.xsave));
2870                 if (IS_ERR(u.xsave))
2871                         return PTR_ERR(u.xsave);
2872
2873                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2874                 break;
2875         }
2876         case KVM_GET_XCRS: {
2877                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2878                 r = -ENOMEM;
2879                 if (!u.xcrs)
2880                         break;
2881
2882                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2883
2884                 r = -EFAULT;
2885                 if (copy_to_user(argp, u.xcrs,
2886                                  sizeof(struct kvm_xcrs)))
2887                         break;
2888                 r = 0;
2889                 break;
2890         }
2891         case KVM_SET_XCRS: {
2892                 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
2893                 if (IS_ERR(u.xcrs))
2894                         return PTR_ERR(u.xcrs);
2895
2896                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2897                 break;
2898         }
2899         case KVM_SET_TSC_KHZ: {
2900                 u32 user_tsc_khz;
2901
2902                 r = -EINVAL;
2903                 user_tsc_khz = (u32)arg;
2904
2905                 if (user_tsc_khz >= kvm_max_guest_tsc_khz)
2906                         goto out;
2907
2908                 if (user_tsc_khz == 0)
2909                         user_tsc_khz = tsc_khz;
2910
2911                 kvm_set_tsc_khz(vcpu, user_tsc_khz);
2912
2913                 r = 0;
2914                 goto out;
2915         }
2916         case KVM_GET_TSC_KHZ: {
2917                 r = vcpu->arch.virtual_tsc_khz;
2918                 goto out;
2919         }
2920         case KVM_KVMCLOCK_CTRL: {
2921                 r = kvm_set_guest_paused(vcpu);
2922                 goto out;
2923         }
2924         default:
2925                 r = -EINVAL;
2926         }
2927 out:
2928         kfree(u.buffer);
2929         return r;
2930 }
2931
2932 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2933 {
2934         return VM_FAULT_SIGBUS;
2935 }
2936
2937 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2938 {
2939         int ret;
2940
2941         if (addr > (unsigned int)(-3 * PAGE_SIZE))
2942                 return -EINVAL;
2943         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2944         return ret;
2945 }
2946
2947 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2948                                               u64 ident_addr)
2949 {
2950         kvm->arch.ept_identity_map_addr = ident_addr;
2951         return 0;
2952 }
2953
2954 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2955                                           u32 kvm_nr_mmu_pages)
2956 {
2957         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2958                 return -EINVAL;
2959
2960         mutex_lock(&kvm->slots_lock);
2961         spin_lock(&kvm->mmu_lock);
2962
2963         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2964         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2965
2966         spin_unlock(&kvm->mmu_lock);
2967         mutex_unlock(&kvm->slots_lock);
2968         return 0;
2969 }
2970
2971 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2972 {
2973         return kvm->arch.n_max_mmu_pages;
2974 }
2975
2976 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2977 {
2978         int r;
2979
2980         r = 0;
2981         switch (chip->chip_id) {
2982         case KVM_IRQCHIP_PIC_MASTER:
2983                 memcpy(&chip->chip.pic,
2984                         &pic_irqchip(kvm)->pics[0],
2985                         sizeof(struct kvm_pic_state));
2986                 break;
2987         case KVM_IRQCHIP_PIC_SLAVE:
2988                 memcpy(&chip->chip.pic,
2989                         &pic_irqchip(kvm)->pics[1],
2990                         sizeof(struct kvm_pic_state));
2991                 break;
2992         case KVM_IRQCHIP_IOAPIC:
2993                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2994                 break;
2995         default:
2996                 r = -EINVAL;
2997                 break;
2998         }
2999         return r;
3000 }
3001
3002 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
3003 {
3004         int r;
3005
3006         r = 0;
3007         switch (chip->chip_id) {
3008         case KVM_IRQCHIP_PIC_MASTER:
3009                 spin_lock(&pic_irqchip(kvm)->lock);
3010                 memcpy(&pic_irqchip(kvm)->pics[0],
3011                         &chip->chip.pic,
3012                         sizeof(struct kvm_pic_state));
3013                 spin_unlock(&pic_irqchip(kvm)->lock);
3014                 break;
3015         case KVM_IRQCHIP_PIC_SLAVE:
3016                 spin_lock(&pic_irqchip(kvm)->lock);
3017                 memcpy(&pic_irqchip(kvm)->pics[1],
3018                         &chip->chip.pic,
3019                         sizeof(struct kvm_pic_state));
3020                 spin_unlock(&pic_irqchip(kvm)->lock);
3021                 break;
3022         case KVM_IRQCHIP_IOAPIC:
3023                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3024                 break;
3025         default:
3026                 r = -EINVAL;
3027                 break;
3028         }
3029         kvm_pic_update_irq(pic_irqchip(kvm));
3030         return r;
3031 }
3032
3033 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3034 {
3035         int r = 0;
3036
3037         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3038         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
3039         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3040         return r;
3041 }
3042
3043 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3044 {
3045         int r = 0;
3046
3047         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3048         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3049         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
3050         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3051         return r;
3052 }
3053
3054 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3055 {
3056         int r = 0;
3057
3058         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3059         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3060                 sizeof(ps->channels));
3061         ps->flags = kvm->arch.vpit->pit_state.flags;
3062         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3063         memset(&ps->reserved, 0, sizeof(ps->reserved));
3064         return r;
3065 }
3066
3067 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3068 {
3069         int r = 0, start = 0;
3070         u32 prev_legacy, cur_legacy;
3071         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3072         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3073         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3074         if (!prev_legacy && cur_legacy)
3075                 start = 1;
3076         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3077                sizeof(kvm->arch.vpit->pit_state.channels));
3078         kvm->arch.vpit->pit_state.flags = ps->flags;
3079         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3080         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3081         return r;
3082 }
3083
3084 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3085                                  struct kvm_reinject_control *control)
3086 {
3087         if (!kvm->arch.vpit)
3088                 return -ENXIO;
3089         mutex_lock(&kvm->arch.vpit->pit_state.lock);
3090         kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
3091         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3092         return 0;
3093 }
3094
3095 /**
3096  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
3097  * @kvm: kvm instance
3098  * @log: slot id and address to which we copy the log
3099  *
3100  * We need to keep it in mind that VCPU threads can write to the bitmap
3101  * concurrently.  So, to avoid losing data, we keep the following order for
3102  * each bit:
3103  *
3104  *   1. Take a snapshot of the bit and clear it if needed.
3105  *   2. Write protect the corresponding page.
3106  *   3. Flush TLB's if needed.
3107  *   4. Copy the snapshot to the userspace.
3108  *
3109  * Between 2 and 3, the guest may write to the page using the remaining TLB
3110  * entry.  This is not a problem because the page will be reported dirty at
3111  * step 4 using the snapshot taken before and step 3 ensures that successive
3112  * writes will be logged for the next call.
3113  */
3114 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3115 {
3116         int r;
3117         struct kvm_memory_slot *memslot;
3118         unsigned long n, i;
3119         unsigned long *dirty_bitmap;
3120         unsigned long *dirty_bitmap_buffer;
3121         bool is_dirty = false;
3122
3123         mutex_lock(&kvm->slots_lock);
3124
3125         r = -EINVAL;
3126         if (log->slot >= KVM_MEMORY_SLOTS)
3127                 goto out;
3128
3129         memslot = id_to_memslot(kvm->memslots, log->slot);
3130
3131         dirty_bitmap = memslot->dirty_bitmap;
3132         r = -ENOENT;
3133         if (!dirty_bitmap)
3134                 goto out;
3135
3136         n = kvm_dirty_bitmap_bytes(memslot);
3137
3138         dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
3139         memset(dirty_bitmap_buffer, 0, n);
3140
3141         spin_lock(&kvm->mmu_lock);
3142
3143         for (i = 0; i < n / sizeof(long); i++) {
3144                 unsigned long mask;
3145                 gfn_t offset;
3146
3147                 if (!dirty_bitmap[i])
3148                         continue;
3149
3150                 is_dirty = true;
3151
3152                 mask = xchg(&dirty_bitmap[i], 0);
3153                 dirty_bitmap_buffer[i] = mask;
3154
3155                 offset = i * BITS_PER_LONG;
3156                 kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
3157         }
3158         if (is_dirty)
3159                 kvm_flush_remote_tlbs(kvm);
3160
3161         spin_unlock(&kvm->mmu_lock);
3162
3163         r = -EFAULT;
3164         if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
3165                 goto out;
3166
3167         r = 0;
3168 out:
3169         mutex_unlock(&kvm->slots_lock);
3170         return r;
3171 }
3172
3173 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
3174 {
3175         if (!irqchip_in_kernel(kvm))
3176                 return -ENXIO;
3177
3178         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3179                                         irq_event->irq, irq_event->level);
3180         return 0;
3181 }
3182
3183 long kvm_arch_vm_ioctl(struct file *filp,
3184                        unsigned int ioctl, unsigned long arg)
3185 {
3186         struct kvm *kvm = filp->private_data;
3187         void __user *argp = (void __user *)arg;
3188         int r = -ENOTTY;
3189         /*
3190          * This union makes it completely explicit to gcc-3.x
3191          * that these two variables' stack usage should be
3192          * combined, not added together.
3193          */
3194         union {
3195                 struct kvm_pit_state ps;
3196                 struct kvm_pit_state2 ps2;
3197                 struct kvm_pit_config pit_config;
3198         } u;
3199
3200         switch (ioctl) {
3201         case KVM_SET_TSS_ADDR:
3202                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3203                 break;
3204         case KVM_SET_IDENTITY_MAP_ADDR: {
3205                 u64 ident_addr;
3206
3207                 r = -EFAULT;
3208                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3209                         goto out;
3210                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3211                 break;
3212         }
3213         case KVM_SET_NR_MMU_PAGES:
3214                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3215                 break;
3216         case KVM_GET_NR_MMU_PAGES:
3217                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3218                 break;
3219         case KVM_CREATE_IRQCHIP: {
3220                 struct kvm_pic *vpic;
3221
3222                 mutex_lock(&kvm->lock);
3223                 r = -EEXIST;
3224                 if (kvm->arch.vpic)
3225                         goto create_irqchip_unlock;
3226                 r = -EINVAL;
3227                 if (atomic_read(&kvm->online_vcpus))
3228                         goto create_irqchip_unlock;
3229                 r = -ENOMEM;
3230                 vpic = kvm_create_pic(kvm);
3231                 if (vpic) {
3232                         r = kvm_ioapic_init(kvm);
3233                         if (r) {
3234                                 mutex_lock(&kvm->slots_lock);
3235                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3236                                                           &vpic->dev_master);
3237                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3238                                                           &vpic->dev_slave);
3239                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3240                                                           &vpic->dev_eclr);
3241                                 mutex_unlock(&kvm->slots_lock);
3242                                 kfree(vpic);
3243                                 goto create_irqchip_unlock;
3244                         }
3245                 } else
3246                         goto create_irqchip_unlock;
3247                 smp_wmb();
3248                 kvm->arch.vpic = vpic;
3249                 smp_wmb();
3250                 r = kvm_setup_default_irq_routing(kvm);
3251                 if (r) {
3252                         mutex_lock(&kvm->slots_lock);
3253                         mutex_lock(&kvm->irq_lock);
3254                         kvm_ioapic_destroy(kvm);
3255                         kvm_destroy_pic(kvm);
3256                         mutex_unlock(&kvm->irq_lock);
3257                         mutex_unlock(&kvm->slots_lock);
3258                 }
3259         create_irqchip_unlock:
3260                 mutex_unlock(&kvm->lock);
3261                 break;
3262         }
3263         case KVM_CREATE_PIT:
3264                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3265                 goto create_pit;
3266         case KVM_CREATE_PIT2:
3267                 r = -EFAULT;
3268                 if (copy_from_user(&u.pit_config, argp,
3269                                    sizeof(struct kvm_pit_config)))
3270                         goto out;
3271         create_pit:
3272                 mutex_lock(&kvm->slots_lock);
3273                 r = -EEXIST;
3274                 if (kvm->arch.vpit)
3275                         goto create_pit_unlock;
3276                 r = -ENOMEM;
3277                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3278                 if (kvm->arch.vpit)
3279                         r = 0;
3280         create_pit_unlock:
3281                 mutex_unlock(&kvm->slots_lock);
3282                 break;
3283         case KVM_GET_IRQCHIP: {
3284                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3285                 struct kvm_irqchip *chip;
3286
3287                 chip = memdup_user(argp, sizeof(*chip));
3288                 if (IS_ERR(chip)) {
3289                         r = PTR_ERR(chip);
3290                         goto out;
3291                 }
3292
3293                 r = -ENXIO;
3294                 if (!irqchip_in_kernel(kvm))
3295                         goto get_irqchip_out;
3296                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3297                 if (r)
3298                         goto get_irqchip_out;
3299                 r = -EFAULT;
3300                 if (copy_to_user(argp, chip, sizeof *chip))
3301                         goto get_irqchip_out;
3302                 r = 0;
3303         get_irqchip_out:
3304                 kfree(chip);
3305                 break;
3306         }
3307         case KVM_SET_IRQCHIP: {
3308                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3309                 struct kvm_irqchip *chip;
3310
3311                 chip = memdup_user(argp, sizeof(*chip));
3312                 if (IS_ERR(chip)) {
3313                         r = PTR_ERR(chip);
3314                         goto out;
3315                 }
3316
3317                 r = -ENXIO;
3318                 if (!irqchip_in_kernel(kvm))
3319                         goto set_irqchip_out;
3320                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3321                 if (r)
3322                         goto set_irqchip_out;
3323                 r = 0;
3324         set_irqchip_out:
3325                 kfree(chip);
3326                 break;
3327         }
3328         case KVM_GET_PIT: {
3329                 r = -EFAULT;
3330                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3331                         goto out;
3332                 r = -ENXIO;
3333                 if (!kvm->arch.vpit)
3334                         goto out;
3335                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3336                 if (r)
3337                         goto out;
3338                 r = -EFAULT;
3339                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3340                         goto out;
3341                 r = 0;
3342                 break;
3343         }
3344         case KVM_SET_PIT: {
3345                 r = -EFAULT;
3346                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3347                         goto out;
3348                 r = -ENXIO;
3349                 if (!kvm->arch.vpit)
3350                         goto out;
3351                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3352                 break;
3353         }
3354         case KVM_GET_PIT2: {
3355                 r = -ENXIO;
3356                 if (!kvm->arch.vpit)
3357                         goto out;
3358                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3359                 if (r)
3360                         goto out;
3361                 r = -EFAULT;
3362                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3363                         goto out;
3364                 r = 0;
3365                 break;
3366         }
3367         case KVM_SET_PIT2: {
3368                 r = -EFAULT;
3369                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3370                         goto out;
3371                 r = -ENXIO;
3372                 if (!kvm->arch.vpit)
3373                         goto out;
3374                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3375                 break;
3376         }
3377         case KVM_REINJECT_CONTROL: {
3378                 struct kvm_reinject_control control;
3379                 r =  -EFAULT;
3380                 if (copy_from_user(&control, argp, sizeof(control)))
3381                         goto out;
3382                 r = kvm_vm_ioctl_reinject(kvm, &control);
3383                 break;
3384         }
3385         case KVM_XEN_HVM_CONFIG: {
3386                 r = -EFAULT;
3387                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3388                                    sizeof(struct kvm_xen_hvm_config)))
3389                         goto out;
3390                 r = -EINVAL;
3391                 if (kvm->arch.xen_hvm_config.flags)
3392                         goto out;
3393                 r = 0;
3394                 break;
3395         }
3396         case KVM_SET_CLOCK: {
3397                 struct kvm_clock_data user_ns;
3398                 u64 now_ns;
3399                 s64 delta;
3400
3401                 r = -EFAULT;
3402                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3403                         goto out;
3404
3405                 r = -EINVAL;
3406                 if (user_ns.flags)
3407                         goto out;
3408
3409                 r = 0;
3410                 local_irq_disable();
3411                 now_ns = get_kernel_ns();
3412                 delta = user_ns.clock - now_ns;
3413                 local_irq_enable();
3414                 kvm->arch.kvmclock_offset = delta;
3415                 break;
3416         }
3417         case KVM_GET_CLOCK: {
3418                 struct kvm_clock_data user_ns;
3419                 u64 now_ns;
3420
3421                 local_irq_disable();
3422                 now_ns = get_kernel_ns();
3423                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3424                 local_irq_enable();
3425                 user_ns.flags = 0;
3426                 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3427
3428                 r = -EFAULT;
3429                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3430                         goto out;
3431                 r = 0;
3432                 break;
3433         }
3434
3435         default:
3436                 ;
3437         }
3438 out:
3439         return r;
3440 }
3441
3442 static void kvm_init_msr_list(void)
3443 {
3444         u32 dummy[2];
3445         unsigned i, j;
3446
3447         /* skip the first msrs in the list. KVM-specific */
3448         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3449                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3450                         continue;
3451                 if (j < i)
3452                         msrs_to_save[j] = msrs_to_save[i];
3453                 j++;
3454         }
3455         num_msrs_to_save = j;
3456 }
3457
3458 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3459                            const void *v)
3460 {
3461         int handled = 0;
3462         int n;
3463
3464         do {
3465                 n = min(len, 8);
3466                 if (!(vcpu->arch.apic &&
3467                       !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
3468                     && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3469                         break;
3470                 handled += n;
3471                 addr += n;
3472                 len -= n;
3473                 v += n;
3474         } while (len);
3475
3476         return handled;
3477 }
3478
3479 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3480 {
3481         int handled = 0;
3482         int n;
3483
3484         do {
3485                 n = min(len, 8);
3486                 if (!(vcpu->arch.apic &&
3487                       !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
3488                     && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3489                         break;
3490                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
3491                 handled += n;
3492                 addr += n;
3493                 len -= n;
3494                 v += n;
3495         } while (len);
3496
3497         return handled;
3498 }
3499
3500 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3501                         struct kvm_segment *var, int seg)
3502 {
3503         kvm_x86_ops->set_segment(vcpu, var, seg);
3504 }
3505
3506 void kvm_get_segment(struct kvm_vcpu *vcpu,
3507                      struct kvm_segment *var, int seg)
3508 {
3509         kvm_x86_ops->get_segment(vcpu, var, seg);
3510 }
3511
3512 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3513 {
3514         gpa_t t_gpa;
3515         struct x86_exception exception;
3516
3517         BUG_ON(!mmu_is_nested(vcpu));
3518
3519         /* NPT walks are always user-walks */
3520         access |= PFERR_USER_MASK;
3521         t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3522
3523         return t_gpa;
3524 }
3525
3526 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3527                               struct x86_exception *exception)
3528 {
3529         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3530         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3531 }
3532
3533  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3534                                 struct x86_exception *exception)
3535 {
3536         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3537         access |= PFERR_FETCH_MASK;
3538         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3539 }
3540
3541 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
3542                                struct x86_exception *exception)
3543 {
3544         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3545         access |= PFERR_WRITE_MASK;
3546         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3547 }
3548
3549 /* uses this to access any guest's mapped memory without checking CPL */
3550 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
3551                                 struct x86_exception *exception)
3552 {
3553         return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
3554 }
3555
3556 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3557                                       struct kvm_vcpu *vcpu, u32 access,
3558                                       struct x86_exception *exception)
3559 {
3560         void *data = val;
3561         int r = X86EMUL_CONTINUE;
3562
3563         while (bytes) {
3564                 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3565                                                             exception);
3566                 unsigned offset = addr & (PAGE_SIZE-1);
3567                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3568                 int ret;
3569
3570                 if (gpa == UNMAPPED_GVA)
3571                         return X86EMUL_PROPAGATE_FAULT;
3572                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3573                 if (ret < 0) {
3574                         r = X86EMUL_IO_NEEDED;
3575                         goto out;
3576                 }
3577
3578                 bytes -= toread;
3579                 data += toread;
3580                 addr += toread;
3581         }
3582 out:
3583         return r;
3584 }
3585
3586 /* used for instruction fetching */
3587 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
3588                                 gva_t addr, void *val, unsigned int bytes,
3589                                 struct x86_exception *exception)
3590 {
3591         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3592         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3593
3594         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3595                                           access | PFERR_FETCH_MASK,
3596                                           exception);
3597 }
3598
3599 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
3600                                gva_t addr, void *val, unsigned int bytes,
3601                                struct x86_exception *exception)
3602 {
3603         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3604         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3605
3606         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3607                                           exception);
3608 }
3609 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
3610
3611 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3612                                       gva_t addr, void *val, unsigned int bytes,
3613                                       struct x86_exception *exception)
3614 {
3615         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3616         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
3617 }
3618
3619 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3620                                        gva_t addr, void *val,
3621                                        unsigned int bytes,
3622                                        struct x86_exception *exception)
3623 {
3624         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3625         void *data = val;
3626         int r = X86EMUL_CONTINUE;
3627
3628         while (bytes) {
3629                 gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3630                                                              PFERR_WRITE_MASK,
3631                                                              exception);
3632                 unsigned offset = addr & (PAGE_SIZE-1);
3633                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3634                 int ret;
3635
3636                 if (gpa == UNMAPPED_GVA)
3637                         return X86EMUL_PROPAGATE_FAULT;
3638                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3639                 if (ret < 0) {
3640                         r = X86EMUL_IO_NEEDED;
3641                         goto out;
3642                 }
3643
3644                 bytes -= towrite;
3645                 data += towrite;
3646                 addr += towrite;
3647         }
3648 out:
3649         return r;
3650 }
3651 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
3652
3653 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
3654                                 gpa_t *gpa, struct x86_exception *exception,
3655                                 bool write)
3656 {
3657         u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
3658                 | (write ? PFERR_WRITE_MASK : 0);
3659
3660         if (vcpu_match_mmio_gva(vcpu, gva)
3661             && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
3662                 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
3663                                         (gva & (PAGE_SIZE - 1));
3664                 trace_vcpu_match_mmio(gva, *gpa, write, false);
3665                 return 1;
3666         }
3667
3668         *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3669
3670         if (*gpa == UNMAPPED_GVA)
3671                 return -1;
3672
3673         /* For APIC access vmexit */
3674         if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3675                 return 1;
3676
3677         if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
3678                 trace_vcpu_match_mmio(gva, *gpa, write, true);
3679                 return 1;
3680         }
3681
3682         return 0;
3683 }
3684
3685 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3686                         const void *val, int bytes)
3687 {
3688         int ret;
3689
3690         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3691         if (ret < 0)
3692                 return 0;
3693         kvm_mmu_pte_write(vcpu, gpa, val, bytes);
3694         return 1;
3695 }
3696
3697 struct read_write_emulator_ops {
3698         int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
3699                                   int bytes);
3700         int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
3701                                   void *val, int bytes);
3702         int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
3703                                int bytes, void *val);
3704         int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
3705                                     void *val, int bytes);
3706         bool write;
3707 };
3708
3709 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
3710 {
3711         if (vcpu->mmio_read_completed) {
3712                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3713                                vcpu->mmio_fragments[0].gpa, *(u64 *)val);
3714                 vcpu->mmio_read_completed = 0;
3715                 return 1;
3716         }
3717
3718         return 0;
3719 }
3720
3721 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
3722                         void *val, int bytes)
3723 {
3724         return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
3725 }
3726
3727 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
3728                          void *val, int bytes)
3729 {
3730         return emulator_write_phys(vcpu, gpa, val, bytes);
3731 }
3732
3733 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
3734 {
3735         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3736         return vcpu_mmio_write(vcpu, gpa, bytes, val);
3737 }
3738
3739 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3740                           void *val, int bytes)
3741 {
3742         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3743         return X86EMUL_IO_NEEDED;
3744 }
3745
3746 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3747                            void *val, int bytes)
3748 {
3749         struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
3750
3751         memcpy(vcpu->run->mmio.data, frag->data, frag->len);
3752         return X86EMUL_CONTINUE;
3753 }
3754
3755 static const struct read_write_emulator_ops read_emultor = {
3756         .read_write_prepare = read_prepare,
3757         .read_write_emulate = read_emulate,
3758         .read_write_mmio = vcpu_mmio_read,
3759         .read_write_exit_mmio = read_exit_mmio,
3760 };
3761
3762 static const struct read_write_emulator_ops write_emultor = {
3763         .read_write_emulate = write_emulate,
3764         .read_write_mmio = write_mmio,
3765         .read_write_exit_mmio = write_exit_mmio,
3766         .write = true,
3767 };
3768
3769 static int emulator_read_write_onepage(unsigned long addr, void *val,
3770                                        unsigned int bytes,
3771                                        struct x86_exception *exception,
3772                                        struct kvm_vcpu *vcpu,
3773                                        const struct read_write_emulator_ops *ops)
3774 {
3775         gpa_t gpa;
3776         int handled, ret;
3777         bool write = ops->write;
3778         struct kvm_mmio_fragment *frag;
3779
3780         ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
3781
3782         if (ret < 0)
3783                 return X86EMUL_PROPAGATE_FAULT;
3784
3785         /* For APIC access vmexit */
3786         if (ret)
3787                 goto mmio;
3788
3789         if (ops->read_write_emulate(vcpu, gpa, val, bytes))
3790                 return X86EMUL_CONTINUE;
3791
3792 mmio:
3793         /*
3794          * Is this MMIO handled locally?
3795          */
3796         handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
3797         if (handled == bytes)
3798                 return X86EMUL_CONTINUE;
3799
3800         gpa += handled;
3801         bytes -= handled;
3802         val += handled;
3803
3804         while (bytes) {
3805                 unsigned now = min(bytes, 8U);
3806
3807                 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
3808                 frag->gpa = gpa;
3809                 frag->data = val;
3810                 frag->len = now;
3811
3812                 gpa += now;
3813                 val += now;
3814                 bytes -= now;
3815         }
3816         return X86EMUL_CONTINUE;
3817 }
3818
3819 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3820                         void *val, unsigned int bytes,
3821                         struct x86_exception *exception,
3822                         const struct read_write_emulator_ops *ops)
3823 {
3824         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3825         gpa_t gpa;
3826         int rc;
3827
3828         if (ops->read_write_prepare &&
3829                   ops->read_write_prepare(vcpu, val, bytes))
3830                 return X86EMUL_CONTINUE;
3831
3832         vcpu->mmio_nr_fragments = 0;
3833
3834         /* Crossing a page boundary? */
3835         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3836                 int now;
3837
3838                 now = -addr & ~PAGE_MASK;
3839                 rc = emulator_read_write_onepage(addr, val, now, exception,
3840                                                  vcpu, ops);
3841
3842                 if (rc != X86EMUL_CONTINUE)
3843                         return rc;
3844                 addr += now;
3845                 val += now;
3846                 bytes -= now;
3847         }
3848
3849         rc = emulator_read_write_onepage(addr, val, bytes, exception,
3850                                          vcpu, ops);
3851         if (rc != X86EMUL_CONTINUE)
3852                 return rc;
3853
3854         if (!vcpu->mmio_nr_fragments)
3855                 return rc;
3856
3857         gpa = vcpu->mmio_fragments[0].gpa;
3858
3859         vcpu->mmio_needed = 1;
3860         vcpu->mmio_cur_fragment = 0;
3861
3862         vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
3863         vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
3864         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3865         vcpu->run->mmio.phys_addr = gpa;
3866
3867         return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
3868 }
3869
3870 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
3871                                   unsigned long addr,
3872                                   void *val,
3873                                   unsigned int bytes,
3874                                   struct x86_exception *exception)
3875 {
3876         return emulator_read_write(ctxt, addr, val, bytes,
3877                                    exception, &read_emultor);
3878 }
3879
3880 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
3881                             unsigned long addr,
3882                             const void *val,
3883                             unsigned int bytes,
3884                             struct x86_exception *exception)
3885 {
3886         return emulator_read_write(ctxt, addr, (void *)val, bytes,
3887                                    exception, &write_emultor);
3888 }
3889
3890 #define CMPXCHG_TYPE(t, ptr, old, new) \
3891         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3892
3893 #ifdef CONFIG_X86_64
3894 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3895 #else
3896 #  define CMPXCHG64(ptr, old, new) \
3897         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3898 #endif
3899
3900 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
3901                                      unsigned long addr,
3902                                      const void *old,
3903                                      const void *new,
3904                                      unsigned int bytes,
3905                                      struct x86_exception *exception)
3906 {
3907         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3908         gpa_t gpa;
3909         struct page *page;
3910         char *kaddr;
3911         bool exchanged;
3912
3913         /* guests cmpxchg8b have to be emulated atomically */
3914         if (bytes > 8 || (bytes & (bytes - 1)))
3915                 goto emul_write;
3916
3917         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3918
3919         if (gpa == UNMAPPED_GVA ||
3920             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3921                 goto emul_write;
3922
3923         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3924                 goto emul_write;
3925
3926         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3927         if (is_error_page(page))
3928                 goto emul_write;
3929
3930         kaddr = kmap_atomic(page);
3931         kaddr += offset_in_page(gpa);
3932         switch (bytes) {
3933         case 1:
3934                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3935                 break;
3936         case 2:
3937                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3938                 break;
3939         case 4:
3940                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3941                 break;
3942         case 8:
3943                 exchanged = CMPXCHG64(kaddr, old, new);
3944                 break;
3945         default:
3946                 BUG();
3947         }
3948         kunmap_atomic(kaddr);
3949         kvm_release_page_dirty(page);
3950
3951         if (!exchanged)
3952                 return X86EMUL_CMPXCHG_FAILED;
3953
3954         kvm_mmu_pte_write(vcpu, gpa, new, bytes);
3955
3956         return X86EMUL_CONTINUE;
3957
3958 emul_write:
3959         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3960
3961         return emulator_write_emulated(ctxt, addr, new, bytes, exception);
3962 }
3963
3964 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3965 {
3966         /* TODO: String I/O for in kernel device */
3967         int r;
3968
3969         if (vcpu->arch.pio.in)
3970                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3971                                     vcpu->arch.pio.size, pd);
3972         else
3973                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3974                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
3975                                      pd);
3976         return r;
3977 }
3978
3979 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
3980                                unsigned short port, void *val,
3981                                unsigned int count, bool in)
3982 {
3983         trace_kvm_pio(!in, port, size, count);
3984
3985         vcpu->arch.pio.port = port;
3986         vcpu->arch.pio.in = in;
3987         vcpu->arch.pio.count  = count;
3988         vcpu->arch.pio.size = size;
3989
3990         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3991                 vcpu->arch.pio.count = 0;
3992                 return 1;
3993         }
3994
3995         vcpu->run->exit_reason = KVM_EXIT_IO;
3996         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3997         vcpu->run->io.size = size;
3998         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3999         vcpu->run->io.count = count;
4000         vcpu->run->io.port = port;
4001
4002         return 0;
4003 }
4004
4005 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
4006                                     int size, unsigned short port, void *val,
4007                                     unsigned int count)
4008 {
4009         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4010         int ret;
4011
4012         if (vcpu->arch.pio.count)
4013                 goto data_avail;
4014
4015         ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4016         if (ret) {
4017 data_avail:
4018                 memcpy(val, vcpu->arch.pio_data, size * count);
4019                 vcpu->arch.pio.count = 0;
4020                 return 1;
4021         }
4022
4023         return 0;
4024 }
4025
4026 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4027                                      int size, unsigned short port,
4028                                      const void *val, unsigned int count)
4029 {
4030         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4031
4032         memcpy(vcpu->arch.pio_data, val, size * count);
4033         return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4034 }
4035
4036 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4037 {
4038         return kvm_x86_ops->get_segment_base(vcpu, seg);
4039 }
4040
4041 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4042 {
4043         kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4044 }
4045
4046 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4047 {
4048         if (!need_emulate_wbinvd(vcpu))
4049                 return X86EMUL_CONTINUE;
4050
4051         if (kvm_x86_ops->has_wbinvd_exit()) {
4052                 int cpu = get_cpu();
4053
4054                 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4055                 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4056                                 wbinvd_ipi, NULL, 1);
4057                 put_cpu();
4058                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4059         } else
4060                 wbinvd();
4061         return X86EMUL_CONTINUE;
4062 }
4063 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4064
4065 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4066 {
4067         kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
4068 }
4069
4070 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4071 {
4072         return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4073 }
4074
4075 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4076 {
4077
4078         return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4079 }
4080
4081 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4082 {
4083         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4084 }
4085
4086 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4087 {
4088         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4089         unsigned long value;
4090
4091         switch (cr) {
4092         case 0:
4093                 value = kvm_read_cr0(vcpu);
4094                 break;
4095         case 2:
4096                 value = vcpu->arch.cr2;
4097                 break;
4098         case 3:
4099                 value = kvm_read_cr3(vcpu);
4100                 break;
4101         case 4:
4102                 value = kvm_read_cr4(vcpu);
4103                 break;
4104         case 8:
4105                 value = kvm_get_cr8(vcpu);
4106                 break;
4107         default:
4108                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
4109                 return 0;
4110         }
4111
4112         return value;
4113 }
4114
4115 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4116 {
4117         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4118         int res = 0;
4119
4120         switch (cr) {
4121         case 0:
4122                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4123                 break;
4124         case 2:
4125                 vcpu->arch.cr2 = val;
4126                 break;
4127         case 3:
4128                 res = kvm_set_cr3(vcpu, val);
4129                 break;
4130         case 4:
4131                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4132                 break;
4133         case 8:
4134                 res = kvm_set_cr8(vcpu, val);
4135                 break;
4136         default:
4137                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
4138                 res = -1;
4139         }
4140
4141         return res;
4142 }
4143
4144 static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
4145 {
4146         kvm_set_rflags(emul_to_vcpu(ctxt), val);
4147 }
4148
4149 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4150 {
4151         return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4152 }
4153
4154 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4155 {
4156         kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4157 }
4158
4159 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4160 {
4161         kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4162 }
4163
4164 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4165 {
4166         kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4167 }
4168
4169 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4170 {
4171         kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4172 }
4173
4174 static unsigned long emulator_get_cached_segment_base(
4175         struct x86_emulate_ctxt *ctxt, int seg)
4176 {
4177         return get_segment_base(emul_to_vcpu(ctxt), seg);
4178 }
4179
4180 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4181                                  struct desc_struct *desc, u32 *base3,
4182                                  int seg)
4183 {
4184         struct kvm_segment var;
4185
4186         kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4187         *selector = var.selector;
4188
4189         if (var.unusable)
4190                 return false;
4191
4192         if (var.g)
4193                 var.limit >>= 12;
4194         set_desc_limit(desc, var.limit);
4195         set_desc_base(desc, (unsigned long)var.base);
4196 #ifdef CONFIG_X86_64
4197         if (base3)
4198                 *base3 = var.base >> 32;
4199 #endif
4200         desc->type = var.type;
4201         desc->s = var.s;
4202         desc->dpl = var.dpl;
4203         desc->p = var.present;
4204         desc->avl = var.avl;
4205         desc->l = var.l;
4206         desc->d = var.db;
4207         desc->g = var.g;
4208
4209         return true;
4210 }
4211
4212 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4213                                  struct desc_struct *desc, u32 base3,
4214                                  int seg)
4215 {
4216         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4217         struct kvm_segment var;
4218
4219         var.selector = selector;
4220         var.base = get_desc_base(desc);
4221 #ifdef CONFIG_X86_64
4222         var.base |= ((u64)base3) << 32;
4223 #endif
4224         var.limit = get_desc_limit(desc);
4225         if (desc->g)
4226                 var.limit = (var.limit << 12) | 0xfff;
4227         var.type = desc->type;
4228         var.present = desc->p;
4229         var.dpl = desc->dpl;
4230         var.db = desc->d;
4231         var.s = desc->s;
4232         var.l = desc->l;
4233         var.g = desc->g;
4234         var.avl = desc->avl;
4235         var.present = desc->p;
4236         var.unusable = !var.present;
4237         var.padding = 0;
4238
4239         kvm_set_segment(vcpu, &var, seg);
4240         return;
4241 }
4242
4243 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4244                             u32 msr_index, u64 *pdata)
4245 {
4246         return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4247 }
4248
4249 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4250                             u32 msr_index, u64 data)
4251 {
4252         return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
4253 }
4254
4255 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
4256                              u32 pmc, u64 *pdata)
4257 {
4258         return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
4259 }
4260
4261 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4262 {
4263         emul_to_vcpu(ctxt)->arch.halt_request = 1;
4264 }
4265
4266 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4267 {
4268         preempt_disable();
4269         kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4270         /*
4271          * CR0.TS may reference the host fpu state, not the guest fpu state,
4272          * so it may be clear at this point.
4273          */
4274         clts();
4275 }
4276
4277 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4278 {
4279         preempt_enable();
4280 }
4281
4282 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4283                               struct x86_instruction_info *info,
4284                               enum x86_intercept_stage stage)
4285 {
4286         return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4287 }
4288
4289 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4290                                u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4291 {
4292         kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
4293 }
4294
4295 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
4296 {
4297         return kvm_register_read(emul_to_vcpu(ctxt), reg);
4298 }
4299
4300 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
4301 {
4302         kvm_register_write(emul_to_vcpu(ctxt), reg, val);
4303 }
4304
4305 static const struct x86_emulate_ops emulate_ops = {
4306         .read_gpr            = emulator_read_gpr,
4307         .write_gpr           = emulator_write_gpr,
4308         .read_std            = kvm_read_guest_virt_system,
4309         .write_std           = kvm_write_guest_virt_system,
4310         .fetch               = kvm_fetch_guest_virt,
4311         .read_emulated       = emulator_read_emulated,
4312         .write_emulated      = emulator_write_emulated,
4313         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
4314         .invlpg              = emulator_invlpg,
4315         .pio_in_emulated     = emulator_pio_in_emulated,
4316         .pio_out_emulated    = emulator_pio_out_emulated,
4317         .get_segment         = emulator_get_segment,
4318         .set_segment         = emulator_set_segment,
4319         .get_cached_segment_base = emulator_get_cached_segment_base,
4320         .get_gdt             = emulator_get_gdt,
4321         .get_idt             = emulator_get_idt,
4322         .set_gdt             = emulator_set_gdt,
4323         .set_idt             = emulator_set_idt,
4324         .get_cr              = emulator_get_cr,
4325         .set_cr              = emulator_set_cr,
4326         .set_rflags          = emulator_set_rflags,
4327         .cpl                 = emulator_get_cpl,
4328         .get_dr              = emulator_get_dr,
4329         .set_dr              = emulator_set_dr,
4330         .set_msr             = emulator_set_msr,
4331         .get_msr             = emulator_get_msr,
4332         .read_pmc            = emulator_read_pmc,
4333         .halt                = emulator_halt,
4334         .wbinvd              = emulator_wbinvd,
4335         .fix_hypercall       = emulator_fix_hypercall,
4336         .get_fpu             = emulator_get_fpu,
4337         .put_fpu             = emulator_put_fpu,
4338         .intercept           = emulator_intercept,
4339         .get_cpuid           = emulator_get_cpuid,
4340 };
4341
4342 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4343 {
4344         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
4345         /*
4346          * an sti; sti; sequence only disable interrupts for the first
4347          * instruction. So, if the last instruction, be it emulated or
4348          * not, left the system with the INT_STI flag enabled, it
4349          * means that the last instruction is an sti. We should not
4350          * leave the flag on in this case. The same goes for mov ss
4351          */
4352         if (!(int_shadow & mask))
4353                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4354 }
4355
4356 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4357 {
4358         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4359         if (ctxt->exception.vector == PF_VECTOR)
4360                 kvm_propagate_fault(vcpu, &ctxt->exception);
4361         else if (ctxt->exception.error_code_valid)
4362                 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4363                                       ctxt->exception.error_code);
4364         else
4365                 kvm_queue_exception(vcpu, ctxt->exception.vector);
4366 }
4367
4368 static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4369 {
4370         memset(&ctxt->twobyte, 0,
4371                (void *)&ctxt->_regs - (void *)&ctxt->twobyte);
4372
4373         ctxt->fetch.start = 0;
4374         ctxt->fetch.end = 0;
4375         ctxt->io_read.pos = 0;
4376         ctxt->io_read.end = 0;
4377         ctxt->mem_read.pos = 0;
4378         ctxt->mem_read.end = 0;
4379 }
4380
4381 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4382 {
4383         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4384         int cs_db, cs_l;
4385
4386         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4387
4388         ctxt->eflags = kvm_get_rflags(vcpu);
4389         ctxt->eip = kvm_rip_read(vcpu);
4390         ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
4391                      (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
4392                      cs_l                               ? X86EMUL_MODE_PROT64 :
4393                      cs_db                              ? X86EMUL_MODE_PROT32 :
4394                                                           X86EMUL_MODE_PROT16;
4395         ctxt->guest_mode = is_guest_mode(vcpu);
4396
4397         init_decode_cache(ctxt);
4398         vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4399 }
4400
4401 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4402 {
4403         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4404         int ret;
4405
4406         init_emulate_ctxt(vcpu);
4407
4408         ctxt->op_bytes = 2;
4409         ctxt->ad_bytes = 2;
4410         ctxt->_eip = ctxt->eip + inc_eip;
4411         ret = emulate_int_real(ctxt, irq);
4412
4413         if (ret != X86EMUL_CONTINUE)
4414                 return EMULATE_FAIL;
4415
4416         ctxt->eip = ctxt->_eip;
4417         kvm_rip_write(vcpu, ctxt->eip);
4418         kvm_set_rflags(vcpu, ctxt->eflags);
4419
4420         if (irq == NMI_VECTOR)
4421                 vcpu->arch.nmi_pending = 0;
4422         else
4423                 vcpu->arch.interrupt.pending = false;
4424
4425         return EMULATE_DONE;
4426 }
4427 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4428
4429 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4430 {
4431         int r = EMULATE_DONE;
4432
4433         ++vcpu->stat.insn_emulation_fail;
4434         trace_kvm_emulate_insn_failed(vcpu);
4435         if (!is_guest_mode(vcpu)) {
4436                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4437                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4438                 vcpu->run->internal.ndata = 0;
4439                 r = EMULATE_FAIL;
4440         }
4441         kvm_queue_exception(vcpu, UD_VECTOR);
4442
4443         return r;
4444 }
4445
4446 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4447 {
4448         gpa_t gpa;
4449         pfn_t pfn;
4450
4451         if (tdp_enabled)
4452                 return false;
4453
4454         /*
4455          * if emulation was due to access to shadowed page table
4456          * and it failed try to unshadow page and re-enter the
4457          * guest to let CPU execute the instruction.
4458          */
4459         if (kvm_mmu_unprotect_page_virt(vcpu, gva))
4460                 return true;
4461
4462         gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
4463
4464         if (gpa == UNMAPPED_GVA)
4465                 return true; /* let cpu generate fault */
4466
4467         /*
4468          * Do not retry the unhandleable instruction if it faults on the
4469          * readonly host memory, otherwise it will goto a infinite loop:
4470          * retry instruction -> write #PF -> emulation fail -> retry
4471          * instruction -> ...
4472          */
4473         pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
4474         if (!is_error_noslot_pfn(pfn)) {
4475                 kvm_release_pfn_clean(pfn);
4476                 return true;
4477         }
4478
4479         return false;
4480 }
4481
4482 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
4483                               unsigned long cr2,  int emulation_type)
4484 {
4485         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4486         unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
4487
4488         last_retry_eip = vcpu->arch.last_retry_eip;
4489         last_retry_addr = vcpu->arch.last_retry_addr;
4490
4491         /*
4492          * If the emulation is caused by #PF and it is non-page_table
4493          * writing instruction, it means the VM-EXIT is caused by shadow
4494          * page protected, we can zap the shadow page and retry this
4495          * instruction directly.
4496          *
4497          * Note: if the guest uses a non-page-table modifying instruction
4498          * on the PDE that points to the instruction, then we will unmap
4499          * the instruction and go to an infinite loop. So, we cache the
4500          * last retried eip and the last fault address, if we meet the eip
4501          * and the address again, we can break out of the potential infinite
4502          * loop.
4503          */
4504         vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
4505
4506         if (!(emulation_type & EMULTYPE_RETRY))
4507                 return false;
4508
4509         if (x86_page_table_writing_insn(ctxt))
4510                 return false;
4511
4512         if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
4513                 return false;
4514
4515         vcpu->arch.last_retry_eip = ctxt->eip;
4516         vcpu->arch.last_retry_addr = cr2;
4517
4518         if (!vcpu->arch.mmu.direct_map)
4519                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
4520
4521         kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4522
4523         return true;
4524 }
4525
4526 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
4527 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
4528
4529 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4530                             unsigned long cr2,
4531                             int emulation_type,
4532                             void *insn,
4533                             int insn_len)
4534 {
4535         int r;
4536         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4537         bool writeback = true;
4538
4539         kvm_clear_exception_queue(vcpu);
4540
4541         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4542                 init_emulate_ctxt(vcpu);
4543                 ctxt->interruptibility = 0;
4544                 ctxt->have_exception = false;
4545                 ctxt->perm_ok = false;
4546
4547                 ctxt->only_vendor_specific_insn
4548                         = emulation_type & EMULTYPE_TRAP_UD;
4549
4550                 r = x86_decode_insn(ctxt, insn, insn_len);
4551
4552                 trace_kvm_emulate_insn_start(vcpu);
4553                 ++vcpu->stat.insn_emulation;
4554                 if (r != EMULATION_OK)  {
4555                         if (emulation_type & EMULTYPE_TRAP_UD)
4556                                 return EMULATE_FAIL;
4557                         if (reexecute_instruction(vcpu, cr2))
4558                                 return EMULATE_DONE;
4559                         if (emulation_type & EMULTYPE_SKIP)
4560                                 return EMULATE_FAIL;
4561                         return handle_emulation_failure(vcpu);
4562                 }
4563         }
4564
4565         if (emulation_type & EMULTYPE_SKIP) {
4566                 kvm_rip_write(vcpu, ctxt->_eip);
4567                 return EMULATE_DONE;
4568         }
4569
4570         if (retry_instruction(ctxt, cr2, emulation_type))
4571                 return EMULATE_DONE;
4572
4573         /* this is needed for vmware backdoor interface to work since it
4574            changes registers values  during IO operation */
4575         if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
4576                 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4577                 emulator_invalidate_register_cache(ctxt);
4578         }
4579
4580 restart:
4581         r = x86_emulate_insn(ctxt);
4582
4583         if (r == EMULATION_INTERCEPTED)
4584                 return EMULATE_DONE;
4585
4586         if (r == EMULATION_FAILED) {
4587                 if (reexecute_instruction(vcpu, cr2))
4588                         return EMULATE_DONE;
4589
4590                 return handle_emulation_failure(vcpu);
4591         }
4592
4593         if (ctxt->have_exception) {
4594                 inject_emulated_exception(vcpu);
4595                 r = EMULATE_DONE;
4596         } else if (vcpu->arch.pio.count) {
4597                 if (!vcpu->arch.pio.in)
4598                         vcpu->arch.pio.count = 0;
4599                 else {
4600                         writeback = false;
4601                         vcpu->arch.complete_userspace_io = complete_emulated_pio;
4602                 }
4603                 r = EMULATE_DO_MMIO;
4604         } else if (vcpu->mmio_needed) {
4605                 if (!vcpu->mmio_is_write)
4606                         writeback = false;
4607                 r = EMULATE_DO_MMIO;
4608                 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
4609         } else if (r == EMULATION_RESTART)
4610                 goto restart;
4611         else
4612                 r = EMULATE_DONE;
4613
4614         if (writeback) {
4615                 toggle_interruptibility(vcpu, ctxt->interruptibility);
4616                 kvm_set_rflags(vcpu, ctxt->eflags);
4617                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4618                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
4619                 kvm_rip_write(vcpu, ctxt->eip);
4620         } else
4621                 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
4622
4623         return r;
4624 }
4625 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
4626
4627 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4628 {
4629         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4630         int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
4631                                             size, port, &val, 1);
4632         /* do not return to emulator after return from userspace */
4633         vcpu->arch.pio.count = 0;
4634         return ret;
4635 }
4636 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4637
4638 static void tsc_bad(void *info)
4639 {
4640         __this_cpu_write(cpu_tsc_khz, 0);
4641 }
4642
4643 static void tsc_khz_changed(void *data)
4644 {
4645         struct cpufreq_freqs *freq = data;
4646         unsigned long khz = 0;
4647
4648         if (data)
4649                 khz = freq->new;
4650         else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4651                 khz = cpufreq_quick_get(raw_smp_processor_id());
4652         if (!khz)
4653                 khz = tsc_khz;
4654         __this_cpu_write(cpu_tsc_khz, khz);
4655 }
4656
4657 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4658                                      void *data)
4659 {
4660         struct cpufreq_freqs *freq = data;
4661         struct kvm *kvm;
4662         struct kvm_vcpu *vcpu;
4663         int i, send_ipi = 0;
4664
4665         /*
4666          * We allow guests to temporarily run on slowing clocks,
4667          * provided we notify them after, or to run on accelerating
4668          * clocks, provided we notify them before.  Thus time never
4669          * goes backwards.
4670          *
4671          * However, we have a problem.  We can't atomically update
4672          * the frequency of a given CPU from this function; it is
4673          * merely a notifier, which can be called from any CPU.
4674          * Changing the TSC frequency at arbitrary points in time
4675          * requires a recomputation of local variables related to
4676          * the TSC for each VCPU.  We must flag these local variables
4677          * to be updated and be sure the update takes place with the
4678          * new frequency before any guests proceed.
4679          *
4680          * Unfortunately, the combination of hotplug CPU and frequency
4681          * change creates an intractable locking scenario; the order
4682          * of when these callouts happen is undefined with respect to
4683          * CPU hotplug, and they can race with each other.  As such,
4684          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
4685          * undefined; you can actually have a CPU frequency change take
4686          * place in between the computation of X and the setting of the
4687          * variable.  To protect against this problem, all updates of
4688          * the per_cpu tsc_khz variable are done in an interrupt
4689          * protected IPI, and all callers wishing to update the value
4690          * must wait for a synchronous IPI to complete (which is trivial
4691          * if the caller is on the CPU already).  This establishes the
4692          * necessary total order on variable updates.
4693          *
4694          * Note that because a guest time update may take place
4695          * anytime after the setting of the VCPU's request bit, the
4696          * correct TSC value must be set before the request.  However,
4697          * to ensure the update actually makes it to any guest which
4698          * starts running in hardware virtualization between the set
4699          * and the acquisition of the spinlock, we must also ping the
4700          * CPU after setting the request bit.
4701          *
4702          */
4703
4704         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4705                 return 0;
4706         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4707                 return 0;
4708
4709         smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4710
4711         raw_spin_lock(&kvm_lock);
4712         list_for_each_entry(kvm, &vm_list, vm_list) {
4713                 kvm_for_each_vcpu(i, vcpu, kvm) {
4714                         if (vcpu->cpu != freq->cpu)
4715                                 continue;
4716                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4717                         if (vcpu->cpu != smp_processor_id())
4718                                 send_ipi = 1;
4719                 }
4720         }
4721         raw_spin_unlock(&kvm_lock);
4722
4723         if (freq->old < freq->new && send_ipi) {
4724                 /*
4725                  * We upscale the frequency.  Must make the guest
4726                  * doesn't see old kvmclock values while running with
4727                  * the new frequency, otherwise we risk the guest sees
4728                  * time go backwards.
4729                  *
4730                  * In case we update the frequency for another cpu
4731                  * (which might be in guest context) send an interrupt
4732                  * to kick the cpu out of guest context.  Next time
4733                  * guest context is entered kvmclock will be updated,
4734                  * so the guest will not see stale values.
4735                  */
4736                 smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4737         }
4738         return 0;
4739 }
4740
4741 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4742         .notifier_call  = kvmclock_cpufreq_notifier
4743 };
4744
4745 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
4746                                         unsigned long action, void *hcpu)
4747 {
4748         unsigned int cpu = (unsigned long)hcpu;
4749
4750         switch (action) {
4751                 case CPU_ONLINE:
4752                 case CPU_DOWN_FAILED:
4753                         smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4754                         break;
4755                 case CPU_DOWN_PREPARE:
4756                         smp_call_function_single(cpu, tsc_bad, NULL, 1);
4757                         break;
4758         }
4759         return NOTIFY_OK;
4760 }
4761
4762 static struct notifier_block kvmclock_cpu_notifier_block = {
4763         .notifier_call  = kvmclock_cpu_notifier,
4764         .priority = -INT_MAX
4765 };
4766
4767 static void kvm_timer_init(void)
4768 {
4769         int cpu;
4770
4771         max_tsc_khz = tsc_khz;
4772         register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4773         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4774 #ifdef CONFIG_CPU_FREQ
4775                 struct cpufreq_policy policy;
4776                 memset(&policy, 0, sizeof(policy));
4777                 cpu = get_cpu();
4778                 cpufreq_get_policy(&policy, cpu);
4779                 if (policy.cpuinfo.max_freq)
4780                         max_tsc_khz = policy.cpuinfo.max_freq;
4781                 put_cpu();
4782 #endif
4783                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4784                                           CPUFREQ_TRANSITION_NOTIFIER);
4785         }
4786         pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
4787         for_each_online_cpu(cpu)
4788                 smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4789 }
4790
4791 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4792
4793 int kvm_is_in_guest(void)
4794 {
4795         return __this_cpu_read(current_vcpu) != NULL;
4796 }
4797
4798 static int kvm_is_user_mode(void)
4799 {
4800         int user_mode = 3;
4801
4802         if (__this_cpu_read(current_vcpu))
4803                 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
4804
4805         return user_mode != 0;
4806 }
4807
4808 static unsigned long kvm_get_guest_ip(void)
4809 {
4810         unsigned long ip = 0;
4811
4812         if (__this_cpu_read(current_vcpu))
4813                 ip = kvm_rip_read(__this_cpu_read(current_vcpu));
4814
4815         return ip;
4816 }
4817
4818 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4819         .is_in_guest            = kvm_is_in_guest,
4820         .is_user_mode           = kvm_is_user_mode,
4821         .get_guest_ip           = kvm_get_guest_ip,
4822 };
4823
4824 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4825 {
4826         __this_cpu_write(current_vcpu, vcpu);
4827 }
4828 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4829
4830 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4831 {
4832         __this_cpu_write(current_vcpu, NULL);
4833 }
4834 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4835
4836 static void kvm_set_mmio_spte_mask(void)
4837 {
4838         u64 mask;
4839         int maxphyaddr = boot_cpu_data.x86_phys_bits;
4840
4841         /*
4842          * Set the reserved bits and the present bit of an paging-structure
4843          * entry to generate page fault with PFER.RSV = 1.
4844          */
4845         mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
4846         mask |= 1ull;
4847
4848 #ifdef CONFIG_X86_64
4849         /*
4850          * If reserved bit is not supported, clear the present bit to disable
4851          * mmio page fault.
4852          */
4853         if (maxphyaddr == 52)
4854                 mask &= ~1ull;
4855 #endif
4856
4857         kvm_mmu_set_mmio_spte_mask(mask);
4858 }
4859
4860 int kvm_arch_init(void *opaque)
4861 {
4862         int r;
4863         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4864
4865         if (kvm_x86_ops) {
4866                 printk(KERN_ERR "kvm: already loaded the other module\n");
4867                 r = -EEXIST;
4868                 goto out;
4869         }
4870
4871         if (!ops->cpu_has_kvm_support()) {
4872                 printk(KERN_ERR "kvm: no hardware support\n");
4873                 r = -EOPNOTSUPP;
4874                 goto out;
4875         }
4876         if (ops->disabled_by_bios()) {
4877                 printk(KERN_ERR "kvm: disabled by bios\n");
4878                 r = -EOPNOTSUPP;
4879                 goto out;
4880         }
4881
4882         r = kvm_mmu_module_init();
4883         if (r)
4884                 goto out;
4885
4886         kvm_set_mmio_spte_mask();
4887         kvm_init_msr_list();
4888
4889         kvm_x86_ops = ops;
4890         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4891                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
4892
4893         kvm_timer_init();
4894
4895         perf_register_guest_info_callbacks(&kvm_guest_cbs);
4896
4897         if (cpu_has_xsave)
4898                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4899
4900         kvm_lapic_init();
4901         return 0;
4902
4903 out:
4904         return r;
4905 }
4906
4907 void kvm_arch_exit(void)
4908 {
4909         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4910
4911         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4912                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4913                                             CPUFREQ_TRANSITION_NOTIFIER);
4914         unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4915         kvm_x86_ops = NULL;
4916         kvm_mmu_module_exit();
4917 }
4918
4919 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4920 {
4921         ++vcpu->stat.halt_exits;
4922         if (irqchip_in_kernel(vcpu->kvm)) {
4923                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4924                 return 1;
4925         } else {
4926                 vcpu->run->exit_reason = KVM_EXIT_HLT;
4927                 return 0;
4928         }
4929 }
4930 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4931
4932 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4933 {
4934         u64 param, ingpa, outgpa, ret;
4935         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4936         bool fast, longmode;
4937         int cs_db, cs_l;
4938
4939         /*
4940          * hypercall generates UD from non zero cpl and real mode
4941          * per HYPER-V spec
4942          */
4943         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4944                 kvm_queue_exception(vcpu, UD_VECTOR);
4945                 return 0;
4946         }
4947
4948         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4949         longmode = is_long_mode(vcpu) && cs_l == 1;
4950
4951         if (!longmode) {
4952                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4953                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4954                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4955                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4956                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4957                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4958         }
4959 #ifdef CONFIG_X86_64
4960         else {
4961                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4962                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4963                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4964         }
4965 #endif
4966
4967         code = param & 0xffff;
4968         fast = (param >> 16) & 0x1;
4969         rep_cnt = (param >> 32) & 0xfff;
4970         rep_idx = (param >> 48) & 0xfff;
4971
4972         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4973
4974         switch (code) {
4975         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4976                 kvm_vcpu_on_spin(vcpu);
4977                 break;
4978         default:
4979                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
4980                 break;
4981         }
4982
4983         ret = res | (((u64)rep_done & 0xfff) << 32);
4984         if (longmode) {
4985                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4986         } else {
4987                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4988                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4989         }
4990
4991         return 1;
4992 }
4993
4994 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4995 {
4996         unsigned long nr, a0, a1, a2, a3, ret;
4997         int r = 1;
4998
4999         if (kvm_hv_hypercall_enabled(vcpu->kvm))
5000                 return kvm_hv_hypercall(vcpu);
5001
5002         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
5003         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
5004         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
5005         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
5006         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
5007
5008         trace_kvm_hypercall(nr, a0, a1, a2, a3);
5009
5010         if (!is_long_mode(vcpu)) {
5011                 nr &= 0xFFFFFFFF;
5012                 a0 &= 0xFFFFFFFF;
5013                 a1 &= 0xFFFFFFFF;
5014                 a2 &= 0xFFFFFFFF;
5015                 a3 &= 0xFFFFFFFF;
5016         }
5017
5018         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5019                 ret = -KVM_EPERM;
5020                 goto out;
5021         }
5022
5023         switch (nr) {
5024         case KVM_HC_VAPIC_POLL_IRQ:
5025                 ret = 0;
5026                 break;
5027         default:
5028                 ret = -KVM_ENOSYS;
5029                 break;
5030         }
5031 out:
5032         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5033         ++vcpu->stat.hypercalls;
5034         return r;
5035 }
5036 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5037
5038 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5039 {
5040         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5041         char instruction[3];
5042         unsigned long rip = kvm_rip_read(vcpu);
5043
5044         /*
5045          * Blow out the MMU to ensure that no other VCPU has an active mapping
5046          * to ensure that the updated hypercall appears atomically across all
5047          * VCPUs.
5048          */
5049         kvm_mmu_zap_all(vcpu->kvm);
5050
5051         kvm_x86_ops->patch_hypercall(vcpu, instruction);
5052
5053         return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5054 }
5055
5056 /*
5057  * Check if userspace requested an interrupt window, and that the
5058  * interrupt window is open.
5059  *
5060  * No need to exit to userspace if we already have an interrupt queued.
5061  */
5062 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5063 {
5064         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
5065                 vcpu->run->request_interrupt_window &&
5066                 kvm_arch_interrupt_allowed(vcpu));
5067 }
5068
5069 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5070 {
5071         struct kvm_run *kvm_run = vcpu->run;
5072
5073         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
5074         kvm_run->cr8 = kvm_get_cr8(vcpu);
5075         kvm_run->apic_base = kvm_get_apic_base(vcpu);
5076         if (irqchip_in_kernel(vcpu->kvm))
5077                 kvm_run->ready_for_interrupt_injection = 1;
5078         else
5079                 kvm_run->ready_for_interrupt_injection =
5080                         kvm_arch_interrupt_allowed(vcpu) &&
5081                         !kvm_cpu_has_interrupt(vcpu) &&
5082                         !kvm_event_needs_reinjection(vcpu);
5083 }
5084
5085 static int vapic_enter(struct kvm_vcpu *vcpu)
5086 {
5087         struct kvm_lapic *apic = vcpu->arch.apic;
5088         struct page *page;
5089
5090         if (!apic || !apic->vapic_addr)
5091                 return 0;
5092
5093         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5094         if (is_error_page(page))
5095                 return -EFAULT;
5096
5097         vcpu->arch.apic->vapic_page = page;
5098         return 0;
5099 }
5100
5101 static void vapic_exit(struct kvm_vcpu *vcpu)
5102 {
5103         struct kvm_lapic *apic = vcpu->arch.apic;
5104         int idx;
5105
5106         if (!apic || !apic->vapic_addr)
5107                 return;
5108
5109         idx = srcu_read_lock(&vcpu->kvm->srcu);
5110         kvm_release_page_dirty(apic->vapic_page);
5111         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
5112         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5113 }
5114
5115 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5116 {
5117         int max_irr, tpr;
5118
5119         if (!kvm_x86_ops->update_cr8_intercept)
5120                 return;
5121
5122         if (!vcpu->arch.apic)
5123                 return;
5124
5125         if (!vcpu->arch.apic->vapic_addr)
5126                 max_irr = kvm_lapic_find_highest_irr(vcpu);
5127         else
5128                 max_irr = -1;
5129
5130         if (max_irr != -1)
5131                 max_irr >>= 4;
5132
5133         tpr = kvm_lapic_get_cr8(vcpu);
5134
5135         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
5136 }
5137
5138 static void inject_pending_event(struct kvm_vcpu *vcpu)
5139 {
5140         /* try to reinject previous events if any */
5141         if (vcpu->arch.exception.pending) {
5142                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
5143                                         vcpu->arch.exception.has_error_code,
5144                                         vcpu->arch.exception.error_code);
5145                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
5146                                           vcpu->arch.exception.has_error_code,
5147                                           vcpu->arch.exception.error_code,
5148                                           vcpu->arch.exception.reinject);
5149                 return;
5150         }
5151
5152         if (vcpu->arch.nmi_injected) {
5153                 kvm_x86_ops->set_nmi(vcpu);
5154                 return;
5155         }
5156
5157         if (vcpu->arch.interrupt.pending) {
5158                 kvm_x86_ops->set_irq(vcpu);
5159                 return;
5160         }
5161
5162         /* try to inject new event if pending */
5163         if (vcpu->arch.nmi_pending) {
5164                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
5165                         --vcpu->arch.nmi_pending;
5166                         vcpu->arch.nmi_injected = true;
5167                         kvm_x86_ops->set_nmi(vcpu);
5168                 }
5169         } else if (kvm_cpu_has_interrupt(vcpu)) {
5170                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5171                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5172                                             false);
5173                         kvm_x86_ops->set_irq(vcpu);
5174                 }
5175         }
5176 }
5177
5178 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
5179 {
5180         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
5181                         !vcpu->guest_xcr0_loaded) {
5182                 /* kvm_set_xcr() also depends on this */
5183                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
5184                 vcpu->guest_xcr0_loaded = 1;
5185         }
5186 }
5187
5188 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
5189 {
5190         if (vcpu->guest_xcr0_loaded) {
5191                 if (vcpu->arch.xcr0 != host_xcr0)
5192                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
5193                 vcpu->guest_xcr0_loaded = 0;
5194         }
5195 }
5196
5197 static void process_nmi(struct kvm_vcpu *vcpu)
5198 {
5199         unsigned limit = 2;
5200
5201         /*
5202          * x86 is limited to one NMI running, and one NMI pending after it.
5203          * If an NMI is already in progress, limit further NMIs to just one.
5204          * Otherwise, allow two (and we'll inject the first one immediately).
5205          */
5206         if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
5207                 limit = 1;
5208
5209         vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
5210         vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
5211         kvm_make_request(KVM_REQ_EVENT, vcpu);
5212 }
5213
5214 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5215 {
5216         int r;
5217         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5218                 vcpu->run->request_interrupt_window;
5219         bool req_immediate_exit = 0;
5220
5221         if (vcpu->requests) {
5222                 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
5223                         kvm_mmu_unload(vcpu);
5224                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
5225                         __kvm_migrate_timers(vcpu);
5226                 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
5227                         r = kvm_guest_time_update(vcpu);
5228                         if (unlikely(r))
5229                                 goto out;
5230                 }
5231                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
5232                         kvm_mmu_sync_roots(vcpu);
5233                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
5234                         kvm_x86_ops->tlb_flush(vcpu);
5235                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
5236                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
5237                         r = 0;
5238                         goto out;
5239                 }
5240                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
5241                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5242                         r = 0;
5243                         goto out;
5244                 }
5245                 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
5246                         vcpu->fpu_active = 0;
5247                         kvm_x86_ops->fpu_deactivate(vcpu);
5248                 }
5249                 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
5250                         /* Page is swapped out. Do synthetic halt */
5251                         vcpu->arch.apf.halted = true;
5252                         r = 1;
5253                         goto out;
5254                 }
5255                 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
5256                         record_steal_time(vcpu);
5257                 if (kvm_check_request(KVM_REQ_NMI, vcpu))
5258                         process_nmi(vcpu);
5259                 req_immediate_exit =
5260                         kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
5261                 if (kvm_check_request(KVM_REQ_PMU, vcpu))
5262                         kvm_handle_pmu_event(vcpu);
5263                 if (kvm_check_request(KVM_REQ_PMI, vcpu))
5264                         kvm_deliver_pmi(vcpu);
5265         }
5266
5267         if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5268                 inject_pending_event(vcpu);
5269
5270                 /* enable NMI/IRQ window open exits if needed */
5271                 if (vcpu->arch.nmi_pending)
5272                         kvm_x86_ops->enable_nmi_window(vcpu);
5273                 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5274                         kvm_x86_ops->enable_irq_window(vcpu);
5275
5276                 if (kvm_lapic_enabled(vcpu)) {
5277                         update_cr8_intercept(vcpu);
5278                         kvm_lapic_sync_to_vapic(vcpu);
5279                 }
5280         }
5281
5282         r = kvm_mmu_reload(vcpu);
5283         if (unlikely(r)) {
5284                 goto cancel_injection;
5285         }
5286
5287         preempt_disable();
5288
5289         kvm_x86_ops->prepare_guest_switch(vcpu);
5290         if (vcpu->fpu_active)
5291                 kvm_load_guest_fpu(vcpu);
5292         kvm_load_guest_xcr0(vcpu);
5293
5294         vcpu->mode = IN_GUEST_MODE;
5295
5296         /* We should set ->mode before check ->requests,
5297          * see the comment in make_all_cpus_request.
5298          */
5299         smp_mb();
5300
5301         local_irq_disable();
5302
5303         if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
5304             || need_resched() || signal_pending(current)) {
5305                 vcpu->mode = OUTSIDE_GUEST_MODE;
5306                 smp_wmb();
5307                 local_irq_enable();
5308                 preempt_enable();
5309                 r = 1;
5310                 goto cancel_injection;
5311         }
5312
5313         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5314
5315         if (req_immediate_exit)
5316                 smp_send_reschedule(vcpu->cpu);
5317
5318         kvm_guest_enter();
5319
5320         if (unlikely(vcpu->arch.switch_db_regs)) {
5321                 set_debugreg(0, 7);
5322                 set_debugreg(vcpu->arch.eff_db[0], 0);
5323                 set_debugreg(vcpu->arch.eff_db[1], 1);
5324                 set_debugreg(vcpu->arch.eff_db[2], 2);
5325                 set_debugreg(vcpu->arch.eff_db[3], 3);
5326         }
5327
5328         trace_kvm_entry(vcpu->vcpu_id);
5329         kvm_x86_ops->run(vcpu);
5330
5331         /*
5332          * If the guest has used debug registers, at least dr7
5333          * will be disabled while returning to the host.
5334          * If we don't have active breakpoints in the host, we don't
5335          * care about the messed up debug address registers. But if
5336          * we have some of them active, restore the old state.
5337          */
5338         if (hw_breakpoint_active())
5339                 hw_breakpoint_restore();
5340
5341         vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
5342                                                            native_read_tsc());
5343
5344         vcpu->mode = OUTSIDE_GUEST_MODE;
5345         smp_wmb();
5346         local_irq_enable();
5347
5348         ++vcpu->stat.exits;
5349
5350         /*
5351          * We must have an instruction between local_irq_enable() and
5352          * kvm_guest_exit(), so the timer interrupt isn't delayed by
5353          * the interrupt shadow.  The stat.exits increment will do nicely.
5354          * But we need to prevent reordering, hence this barrier():
5355          */
5356         barrier();
5357
5358         kvm_guest_exit();
5359
5360         preempt_enable();
5361
5362         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5363
5364         /*
5365          * Profile KVM exit RIPs:
5366          */
5367         if (unlikely(prof_on == KVM_PROFILING)) {
5368                 unsigned long rip = kvm_rip_read(vcpu);
5369                 profile_hit(KVM_PROFILING, (void *)rip);
5370         }
5371
5372         if (unlikely(vcpu->arch.tsc_always_catchup))
5373                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5374
5375         if (vcpu->arch.apic_attention)
5376                 kvm_lapic_sync_from_vapic(vcpu);
5377
5378         r = kvm_x86_ops->handle_exit(vcpu);
5379         return r;
5380
5381 cancel_injection:
5382         kvm_x86_ops->cancel_injection(vcpu);
5383         if (unlikely(vcpu->arch.apic_attention))
5384                 kvm_lapic_sync_from_vapic(vcpu);
5385 out:
5386         return r;
5387 }
5388
5389
5390 static int __vcpu_run(struct kvm_vcpu *vcpu)
5391 {
5392         int r;
5393         struct kvm *kvm = vcpu->kvm;
5394
5395         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5396                 pr_debug("vcpu %d received sipi with vector # %x\n",
5397                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
5398                 kvm_lapic_reset(vcpu);
5399                 r = kvm_vcpu_reset(vcpu);
5400                 if (r)
5401                         return r;
5402                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5403         }
5404
5405         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5406         r = vapic_enter(vcpu);
5407         if (r) {
5408                 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5409                 return r;
5410         }
5411
5412         r = 1;
5413         while (r > 0) {
5414                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
5415                     !vcpu->arch.apf.halted)
5416                         r = vcpu_enter_guest(vcpu);
5417                 else {
5418                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5419                         kvm_vcpu_block(vcpu);
5420                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5421                         if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
5422                         {
5423                                 switch(vcpu->arch.mp_state) {
5424                                 case KVM_MP_STATE_HALTED:
5425                                         vcpu->arch.mp_state =
5426                                                 KVM_MP_STATE_RUNNABLE;
5427                                 case KVM_MP_STATE_RUNNABLE:
5428                                         vcpu->arch.apf.halted = false;
5429                                         break;
5430                                 case KVM_MP_STATE_SIPI_RECEIVED:
5431                                 default:
5432                                         r = -EINTR;
5433                                         break;
5434                                 }
5435                         }
5436                 }
5437
5438                 if (r <= 0)
5439                         break;
5440
5441                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
5442                 if (kvm_cpu_has_pending_timer(vcpu))
5443                         kvm_inject_pending_timer_irqs(vcpu);
5444
5445                 if (dm_request_for_irq_injection(vcpu)) {
5446                         r = -EINTR;
5447                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5448                         ++vcpu->stat.request_irq_exits;
5449                 }
5450
5451                 kvm_check_async_pf_completion(vcpu);
5452
5453                 if (signal_pending(current)) {
5454                         r = -EINTR;
5455                         vcpu->run->exit_reason = KVM_EXIT_INTR;
5456                         ++vcpu->stat.signal_exits;
5457                 }
5458                 if (need_resched()) {
5459                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5460                         kvm_resched(vcpu);
5461                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5462                 }
5463         }
5464
5465         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5466
5467         vapic_exit(vcpu);
5468
5469         return r;
5470 }
5471
5472 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
5473 {
5474         int r;
5475         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5476         r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5477         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5478         if (r != EMULATE_DONE)
5479                 return 0;
5480         return 1;
5481 }
5482
5483 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
5484 {
5485         BUG_ON(!vcpu->arch.pio.count);
5486
5487         return complete_emulated_io(vcpu);
5488 }
5489
5490 /*
5491  * Implements the following, as a state machine:
5492  *
5493  * read:
5494  *   for each fragment
5495  *     write gpa, len
5496  *     exit
5497  *     copy data
5498  *   execute insn
5499  *
5500  * write:
5501  *   for each fragment
5502  *      write gpa, len
5503  *      copy data
5504  *      exit
5505  */
5506 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
5507 {
5508         struct kvm_run *run = vcpu->run;
5509         struct kvm_mmio_fragment *frag;
5510
5511         BUG_ON(!vcpu->mmio_needed);
5512
5513         /* Complete previous fragment */
5514         frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
5515         if (!vcpu->mmio_is_write)
5516                 memcpy(frag->data, run->mmio.data, frag->len);
5517         if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
5518                 vcpu->mmio_needed = 0;
5519                 if (vcpu->mmio_is_write)
5520                         return 1;
5521                 vcpu->mmio_read_completed = 1;
5522                 return complete_emulated_io(vcpu);
5523         }
5524         /* Initiate next fragment */
5525         ++frag;
5526         run->exit_reason = KVM_EXIT_MMIO;
5527         run->mmio.phys_addr = frag->gpa;
5528         if (vcpu->mmio_is_write)
5529                 memcpy(run->mmio.data, frag->data, frag->len);
5530         run->mmio.len = frag->len;
5531         run->mmio.is_write = vcpu->mmio_is_write;
5532         vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5533         return 0;
5534 }
5535
5536
5537 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5538 {
5539         int r;
5540         sigset_t sigsaved;
5541
5542         if (!tsk_used_math(current) && init_fpu(current))
5543                 return -ENOMEM;
5544
5545         if (vcpu->sigset_active)
5546                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5547
5548         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
5549                 kvm_vcpu_block(vcpu);
5550                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
5551                 r = -EAGAIN;
5552                 goto out;
5553         }
5554
5555         /* re-sync apic's tpr */
5556         if (!irqchip_in_kernel(vcpu->kvm)) {
5557                 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
5558                         r = -EINVAL;
5559                         goto out;
5560                 }
5561         }
5562
5563         if (unlikely(vcpu->arch.complete_userspace_io)) {
5564                 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
5565                 vcpu->arch.complete_userspace_io = NULL;
5566                 r = cui(vcpu);
5567                 if (r <= 0)
5568                         goto out;
5569         } else
5570                 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
5571
5572         r = __vcpu_run(vcpu);
5573
5574 out:
5575         post_kvm_run_save(vcpu);
5576         if (vcpu->sigset_active)
5577                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5578
5579         return r;
5580 }
5581
5582 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5583 {
5584         if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
5585                 /*
5586                  * We are here if userspace calls get_regs() in the middle of
5587                  * instruction emulation. Registers state needs to be copied
5588                  * back from emulation context to vcpu. Userspace shouldn't do
5589                  * that usually, but some bad designed PV devices (vmware
5590                  * backdoor interface) need this to work
5591                  */
5592                 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
5593                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5594         }
5595         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5596         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5597         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5598         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5599         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
5600         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
5601         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5602         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5603 #ifdef CONFIG_X86_64
5604         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
5605         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
5606         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
5607         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
5608         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
5609         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
5610         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
5611         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
5612 #endif
5613
5614         regs->rip = kvm_rip_read(vcpu);
5615         regs->rflags = kvm_get_rflags(vcpu);
5616
5617         return 0;
5618 }
5619
5620 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5621 {
5622         vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
5623         vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5624
5625         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
5626         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
5627         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
5628         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
5629         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
5630         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
5631         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
5632         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
5633 #ifdef CONFIG_X86_64
5634         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
5635         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
5636         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
5637         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
5638         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
5639         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
5640         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
5641         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
5642 #endif
5643
5644         kvm_rip_write(vcpu, regs->rip);
5645         kvm_set_rflags(vcpu, regs->rflags);
5646
5647         vcpu->arch.exception.pending = false;
5648
5649         kvm_make_request(KVM_REQ_EVENT, vcpu);
5650
5651         return 0;
5652 }
5653
5654 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5655 {
5656         struct kvm_segment cs;
5657
5658         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
5659         *db = cs.db;
5660         *l = cs.l;
5661 }
5662 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
5663
5664 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
5665                                   struct kvm_sregs *sregs)
5666 {
5667         struct desc_ptr dt;
5668
5669         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5670         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5671         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5672         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5673         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5674         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5675
5676         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5677         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5678
5679         kvm_x86_ops->get_idt(vcpu, &dt);
5680         sregs->idt.limit = dt.size;
5681         sregs->idt.base = dt.address;
5682         kvm_x86_ops->get_gdt(vcpu, &dt);
5683         sregs->gdt.limit = dt.size;
5684         sregs->gdt.base = dt.address;
5685
5686         sregs->cr0 = kvm_read_cr0(vcpu);
5687         sregs->cr2 = vcpu->arch.cr2;
5688         sregs->cr3 = kvm_read_cr3(vcpu);
5689         sregs->cr4 = kvm_read_cr4(vcpu);
5690         sregs->cr8 = kvm_get_cr8(vcpu);
5691         sregs->efer = vcpu->arch.efer;
5692         sregs->apic_base = kvm_get_apic_base(vcpu);
5693
5694         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5695
5696         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5697                 set_bit(vcpu->arch.interrupt.nr,
5698                         (unsigned long *)sregs->interrupt_bitmap);
5699
5700         return 0;
5701 }
5702
5703 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5704                                     struct kvm_mp_state *mp_state)
5705 {
5706         mp_state->mp_state = vcpu->arch.mp_state;
5707         return 0;
5708 }
5709
5710 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5711                                     struct kvm_mp_state *mp_state)
5712 {
5713         vcpu->arch.mp_state = mp_state->mp_state;
5714         kvm_make_request(KVM_REQ_EVENT, vcpu);
5715         return 0;
5716 }
5717
5718 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
5719                     int reason, bool has_error_code, u32 error_code)
5720 {
5721         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5722         int ret;
5723
5724         init_emulate_ctxt(vcpu);
5725
5726         ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
5727                                    has_error_code, error_code);
5728
5729         if (ret)
5730                 return EMULATE_FAIL;
5731
5732         kvm_rip_write(vcpu, ctxt->eip);
5733         kvm_set_rflags(vcpu, ctxt->eflags);
5734         kvm_make_request(KVM_REQ_EVENT, vcpu);
5735         return EMULATE_DONE;
5736 }
5737 EXPORT_SYMBOL_GPL(kvm_task_switch);
5738
5739 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5740                                   struct kvm_sregs *sregs)
5741 {
5742         int mmu_reset_needed = 0;
5743         int pending_vec, max_bits, idx;
5744         struct desc_ptr dt;
5745
5746         dt.size = sregs->idt.limit;
5747         dt.address = sregs->idt.base;
5748         kvm_x86_ops->set_idt(vcpu, &dt);
5749         dt.size = sregs->gdt.limit;
5750         dt.address = sregs->gdt.base;
5751         kvm_x86_ops->set_gdt(vcpu, &dt);
5752
5753         vcpu->arch.cr2 = sregs->cr2;
5754         mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
5755         vcpu->arch.cr3 = sregs->cr3;
5756         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
5757
5758         kvm_set_cr8(vcpu, sregs->cr8);
5759
5760         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5761         kvm_x86_ops->set_efer(vcpu, sregs->efer);
5762         kvm_set_apic_base(vcpu, sregs->apic_base);
5763
5764         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5765         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5766         vcpu->arch.cr0 = sregs->cr0;
5767
5768         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5769         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5770         if (sregs->cr4 & X86_CR4_OSXSAVE)
5771                 kvm_update_cpuid(vcpu);
5772
5773         idx = srcu_read_lock(&vcpu->kvm->srcu);
5774         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5775                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
5776                 mmu_reset_needed = 1;
5777         }
5778         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5779
5780         if (mmu_reset_needed)
5781                 kvm_mmu_reset_context(vcpu);
5782
5783         max_bits = KVM_NR_INTERRUPTS;
5784         pending_vec = find_first_bit(
5785                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5786         if (pending_vec < max_bits) {
5787                 kvm_queue_interrupt(vcpu, pending_vec, false);
5788                 pr_debug("Set back pending irq %d\n", pending_vec);
5789         }
5790
5791         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5792         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5793         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5794         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5795         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5796         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5797
5798         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5799         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5800
5801         update_cr8_intercept(vcpu);
5802
5803         /* Older userspace won't unhalt the vcpu on reset. */
5804         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5805             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5806             !is_protmode(vcpu))
5807                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5808
5809         kvm_make_request(KVM_REQ_EVENT, vcpu);
5810
5811         return 0;
5812 }
5813
5814 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5815                                         struct kvm_guest_debug *dbg)
5816 {
5817         unsigned long rflags;
5818         int i, r;
5819
5820         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5821                 r = -EBUSY;
5822                 if (vcpu->arch.exception.pending)
5823                         goto out;
5824                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5825                         kvm_queue_exception(vcpu, DB_VECTOR);
5826                 else
5827                         kvm_queue_exception(vcpu, BP_VECTOR);
5828         }
5829
5830         /*
5831          * Read rflags as long as potentially injected trace flags are still
5832          * filtered out.
5833          */
5834         rflags = kvm_get_rflags(vcpu);
5835
5836         vcpu->guest_debug = dbg->control;
5837         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5838                 vcpu->guest_debug = 0;
5839
5840         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5841                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5842                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5843                 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
5844         } else {
5845                 for (i = 0; i < KVM_NR_DB_REGS; i++)
5846                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5847         }
5848         kvm_update_dr7(vcpu);
5849
5850         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5851                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5852                         get_segment_base(vcpu, VCPU_SREG_CS);
5853
5854         /*
5855          * Trigger an rflags update that will inject or remove the trace
5856          * flags.
5857          */
5858         kvm_set_rflags(vcpu, rflags);
5859
5860         kvm_x86_ops->update_db_bp_intercept(vcpu);
5861
5862         r = 0;
5863
5864 out:
5865
5866         return r;
5867 }
5868
5869 /*
5870  * Translate a guest virtual address to a guest physical address.
5871  */
5872 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5873                                     struct kvm_translation *tr)
5874 {
5875         unsigned long vaddr = tr->linear_address;
5876         gpa_t gpa;
5877         int idx;
5878
5879         idx = srcu_read_lock(&vcpu->kvm->srcu);
5880         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5881         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5882         tr->physical_address = gpa;
5883         tr->valid = gpa != UNMAPPED_GVA;
5884         tr->writeable = 1;
5885         tr->usermode = 0;
5886
5887         return 0;
5888 }
5889
5890 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5891 {
5892         struct i387_fxsave_struct *fxsave =
5893                         &vcpu->arch.guest_fpu.state->fxsave;
5894
5895         memcpy(fpu->fpr, fxsave->st_space, 128);
5896         fpu->fcw = fxsave->cwd;
5897         fpu->fsw = fxsave->swd;
5898         fpu->ftwx = fxsave->twd;
5899         fpu->last_opcode = fxsave->fop;
5900         fpu->last_ip = fxsave->rip;
5901         fpu->last_dp = fxsave->rdp;
5902         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5903
5904         return 0;
5905 }
5906
5907 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5908 {
5909         struct i387_fxsave_struct *fxsave =
5910                         &vcpu->arch.guest_fpu.state->fxsave;
5911
5912         memcpy(fxsave->st_space, fpu->fpr, 128);
5913         fxsave->cwd = fpu->fcw;
5914         fxsave->swd = fpu->fsw;
5915         fxsave->twd = fpu->ftwx;
5916         fxsave->fop = fpu->last_opcode;
5917         fxsave->rip = fpu->last_ip;
5918         fxsave->rdp = fpu->last_dp;
5919         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5920
5921         return 0;
5922 }
5923
5924 int fx_init(struct kvm_vcpu *vcpu)
5925 {
5926         int err;
5927
5928         err = fpu_alloc(&vcpu->arch.guest_fpu);
5929         if (err)
5930                 return err;
5931
5932         fpu_finit(&vcpu->arch.guest_fpu);
5933
5934         /*
5935          * Ensure guest xcr0 is valid for loading
5936          */
5937         vcpu->arch.xcr0 = XSTATE_FP;
5938
5939         vcpu->arch.cr0 |= X86_CR0_ET;
5940
5941         return 0;
5942 }
5943 EXPORT_SYMBOL_GPL(fx_init);
5944
5945 static void fx_free(struct kvm_vcpu *vcpu)
5946 {
5947         fpu_free(&vcpu->arch.guest_fpu);
5948 }
5949
5950 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5951 {
5952         if (vcpu->guest_fpu_loaded)
5953                 return;
5954
5955         /*
5956          * Restore all possible states in the guest,
5957          * and assume host would use all available bits.
5958          * Guest xcr0 would be loaded later.
5959          */
5960         kvm_put_guest_xcr0(vcpu);
5961         vcpu->guest_fpu_loaded = 1;
5962         __kernel_fpu_begin();
5963         fpu_restore_checking(&vcpu->arch.guest_fpu);
5964         trace_kvm_fpu(1);
5965 }
5966
5967 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5968 {
5969         kvm_put_guest_xcr0(vcpu);
5970
5971         if (!vcpu->guest_fpu_loaded)
5972                 return;
5973
5974         vcpu->guest_fpu_loaded = 0;
5975         fpu_save_init(&vcpu->arch.guest_fpu);
5976         __kernel_fpu_end();
5977         ++vcpu->stat.fpu_reload;
5978         kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5979         trace_kvm_fpu(0);
5980 }
5981
5982 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5983 {
5984         kvmclock_reset(vcpu);
5985
5986         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5987         fx_free(vcpu);
5988         kvm_x86_ops->vcpu_free(vcpu);
5989 }
5990
5991 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5992                                                 unsigned int id)
5993 {
5994         if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
5995                 printk_once(KERN_WARNING
5996                 "kvm: SMP vm created on host with unstable TSC; "
5997                 "guest TSC will not be reliable\n");
5998         return kvm_x86_ops->vcpu_create(kvm, id);
5999 }
6000
6001 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
6002 {
6003         int r;
6004
6005         vcpu->arch.mtrr_state.have_fixed = 1;
6006         r = vcpu_load(vcpu);
6007         if (r)
6008                 return r;
6009         r = kvm_vcpu_reset(vcpu);
6010         if (r == 0)
6011                 r = kvm_mmu_setup(vcpu);
6012         vcpu_put(vcpu);
6013
6014         return r;
6015 }
6016
6017 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
6018 {
6019         int r;
6020         vcpu->arch.apf.msr_val = 0;
6021
6022         r = vcpu_load(vcpu);
6023         BUG_ON(r);
6024         kvm_mmu_unload(vcpu);
6025         vcpu_put(vcpu);
6026
6027         fx_free(vcpu);
6028         kvm_x86_ops->vcpu_free(vcpu);
6029 }
6030
6031 static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
6032 {
6033         atomic_set(&vcpu->arch.nmi_queued, 0);
6034         vcpu->arch.nmi_pending = 0;
6035         vcpu->arch.nmi_injected = false;
6036
6037         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
6038         vcpu->arch.dr6 = DR6_FIXED_1;
6039         vcpu->arch.dr7 = DR7_FIXED_1;
6040         kvm_update_dr7(vcpu);
6041
6042         kvm_make_request(KVM_REQ_EVENT, vcpu);
6043         vcpu->arch.apf.msr_val = 0;
6044         vcpu->arch.st.msr_val = 0;
6045
6046         kvmclock_reset(vcpu);
6047
6048         kvm_clear_async_pf_completion_queue(vcpu);
6049         kvm_async_pf_hash_reset(vcpu);
6050         vcpu->arch.apf.halted = false;
6051
6052         kvm_pmu_reset(vcpu);
6053
6054         return kvm_x86_ops->vcpu_reset(vcpu);
6055 }
6056
6057 int kvm_arch_hardware_enable(void *garbage)
6058 {
6059         struct kvm *kvm;
6060         struct kvm_vcpu *vcpu;
6061         int i;
6062         int ret;
6063         u64 local_tsc;
6064         u64 max_tsc = 0;
6065         bool stable, backwards_tsc = false;
6066
6067         kvm_shared_msr_cpu_online();
6068         ret = kvm_x86_ops->hardware_enable(garbage);
6069         if (ret != 0)
6070                 return ret;
6071
6072         local_tsc = native_read_tsc();
6073         stable = !check_tsc_unstable();
6074         list_for_each_entry(kvm, &vm_list, vm_list) {
6075                 kvm_for_each_vcpu(i, vcpu, kvm) {
6076                         if (!stable && vcpu->cpu == smp_processor_id())
6077                                 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
6078                         if (stable && vcpu->arch.last_host_tsc > local_tsc) {
6079                                 backwards_tsc = true;
6080                                 if (vcpu->arch.last_host_tsc > max_tsc)
6081                                         max_tsc = vcpu->arch.last_host_tsc;
6082                         }
6083                 }
6084         }
6085
6086         /*
6087          * Sometimes, even reliable TSCs go backwards.  This happens on
6088          * platforms that reset TSC during suspend or hibernate actions, but
6089          * maintain synchronization.  We must compensate.  Fortunately, we can
6090          * detect that condition here, which happens early in CPU bringup,
6091          * before any KVM threads can be running.  Unfortunately, we can't
6092          * bring the TSCs fully up to date with real time, as we aren't yet far
6093          * enough into CPU bringup that we know how much real time has actually
6094          * elapsed; our helper function, get_kernel_ns() will be using boot
6095          * variables that haven't been updated yet.
6096          *
6097          * So we simply find the maximum observed TSC above, then record the
6098          * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
6099          * the adjustment will be applied.  Note that we accumulate
6100          * adjustments, in case multiple suspend cycles happen before some VCPU
6101          * gets a chance to run again.  In the event that no KVM threads get a
6102          * chance to run, we will miss the entire elapsed period, as we'll have
6103          * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
6104          * loose cycle time.  This isn't too big a deal, since the loss will be
6105          * uniform across all VCPUs (not to mention the scenario is extremely
6106          * unlikely). It is possible that a second hibernate recovery happens
6107          * much faster than a first, causing the observed TSC here to be
6108          * smaller; this would require additional padding adjustment, which is
6109          * why we set last_host_tsc to the local tsc observed here.
6110          *
6111          * N.B. - this code below runs only on platforms with reliable TSC,
6112          * as that is the only way backwards_tsc is set above.  Also note
6113          * that this runs for ALL vcpus, which is not a bug; all VCPUs should
6114          * have the same delta_cyc adjustment applied if backwards_tsc
6115          * is detected.  Note further, this adjustment is only done once,
6116          * as we reset last_host_tsc on all VCPUs to stop this from being
6117          * called multiple times (one for each physical CPU bringup).
6118          *
6119          * Platforms with unreliable TSCs don't have to deal with this, they
6120          * will be compensated by the logic in vcpu_load, which sets the TSC to
6121          * catchup mode.  This will catchup all VCPUs to real time, but cannot
6122          * guarantee that they stay in perfect synchronization.
6123          */
6124         if (backwards_tsc) {
6125                 u64 delta_cyc = max_tsc - local_tsc;
6126                 list_for_each_entry(kvm, &vm_list, vm_list) {
6127                         kvm_for_each_vcpu(i, vcpu, kvm) {
6128                                 vcpu->arch.tsc_offset_adjustment += delta_cyc;
6129                                 vcpu->arch.last_host_tsc = local_tsc;
6130                         }
6131
6132                         /*
6133                          * We have to disable TSC offset matching.. if you were
6134                          * booting a VM while issuing an S4 host suspend....
6135                          * you may have some problem.  Solving this issue is
6136                          * left as an exercise to the reader.
6137                          */
6138                         kvm->arch.last_tsc_nsec = 0;
6139                         kvm->arch.last_tsc_write = 0;
6140                 }
6141
6142         }
6143         return 0;
6144 }
6145
6146 void kvm_arch_hardware_disable(void *garbage)
6147 {
6148         kvm_x86_ops->hardware_disable(garbage);
6149         drop_user_return_notifiers(garbage);
6150 }
6151
6152 int kvm_arch_hardware_setup(void)
6153 {
6154         return kvm_x86_ops->hardware_setup();
6155 }
6156
6157 void kvm_arch_hardware_unsetup(void)
6158 {
6159         kvm_x86_ops->hardware_unsetup();
6160 }
6161
6162 void kvm_arch_check_processor_compat(void *rtn)
6163 {
6164         kvm_x86_ops->check_processor_compatibility(rtn);
6165 }
6166
6167 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
6168 {
6169         return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
6170 }
6171
6172 struct static_key kvm_no_apic_vcpu __read_mostly;
6173
6174 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6175 {
6176         struct page *page;
6177         struct kvm *kvm;
6178         int r;
6179
6180         BUG_ON(vcpu->kvm == NULL);
6181         kvm = vcpu->kvm;
6182
6183         vcpu->arch.emulate_ctxt.ops = &emulate_ops;
6184         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
6185                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6186         else
6187                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
6188
6189         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
6190         if (!page) {
6191                 r = -ENOMEM;
6192                 goto fail;
6193         }
6194         vcpu->arch.pio_data = page_address(page);
6195
6196         kvm_set_tsc_khz(vcpu, max_tsc_khz);
6197
6198         r = kvm_mmu_create(vcpu);
6199         if (r < 0)
6200                 goto fail_free_pio_data;
6201
6202         if (irqchip_in_kernel(kvm)) {
6203                 r = kvm_create_lapic(vcpu);
6204                 if (r < 0)
6205                         goto fail_mmu_destroy;
6206         } else
6207                 static_key_slow_inc(&kvm_no_apic_vcpu);
6208
6209         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
6210                                        GFP_KERNEL);
6211         if (!vcpu->arch.mce_banks) {
6212                 r = -ENOMEM;
6213                 goto fail_free_lapic;
6214         }
6215         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
6216
6217         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6218                 goto fail_free_mce_banks;
6219
6220         kvm_async_pf_hash_reset(vcpu);
6221         kvm_pmu_init(vcpu);
6222
6223         return 0;
6224 fail_free_mce_banks:
6225         kfree(vcpu->arch.mce_banks);
6226 fail_free_lapic:
6227         kvm_free_lapic(vcpu);
6228 fail_mmu_destroy:
6229         kvm_mmu_destroy(vcpu);
6230 fail_free_pio_data:
6231         free_page((unsigned long)vcpu->arch.pio_data);
6232 fail:
6233         return r;
6234 }
6235
6236 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
6237 {
6238         int idx;
6239
6240         kvm_pmu_destroy(vcpu);
6241         kfree(vcpu->arch.mce_banks);
6242         kvm_free_lapic(vcpu);
6243         idx = srcu_read_lock(&vcpu->kvm->srcu);
6244         kvm_mmu_destroy(vcpu);
6245         srcu_read_unlock(&vcpu->kvm->srcu, idx);
6246         free_page((unsigned long)vcpu->arch.pio_data);
6247         if (!irqchip_in_kernel(vcpu->kvm))
6248                 static_key_slow_dec(&kvm_no_apic_vcpu);
6249 }
6250
6251 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
6252 {
6253         if (type)
6254                 return -EINVAL;
6255
6256         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6257         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
6258
6259         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
6260         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
6261         /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
6262         set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
6263                 &kvm->arch.irq_sources_bitmap);
6264
6265         raw_spin_lock_init(&kvm->arch.tsc_write_lock);
6266         mutex_init(&kvm->arch.apic_map_lock);
6267
6268         return 0;
6269 }
6270
6271 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
6272 {
6273         int r;
6274         r = vcpu_load(vcpu);
6275         BUG_ON(r);
6276         kvm_mmu_unload(vcpu);
6277         vcpu_put(vcpu);
6278 }
6279
6280 static void kvm_free_vcpus(struct kvm *kvm)
6281 {
6282         unsigned int i;
6283         struct kvm_vcpu *vcpu;
6284
6285         /*
6286          * Unpin any mmu pages first.
6287          */
6288         kvm_for_each_vcpu(i, vcpu, kvm) {
6289                 kvm_clear_async_pf_completion_queue(vcpu);
6290                 kvm_unload_vcpu_mmu(vcpu);
6291         }
6292         kvm_for_each_vcpu(i, vcpu, kvm)
6293                 kvm_arch_vcpu_free(vcpu);
6294
6295         mutex_lock(&kvm->lock);
6296         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
6297                 kvm->vcpus[i] = NULL;
6298
6299         atomic_set(&kvm->online_vcpus, 0);
6300         mutex_unlock(&kvm->lock);
6301 }
6302
6303 void kvm_arch_sync_events(struct kvm *kvm)
6304 {
6305         kvm_free_all_assigned_devices(kvm);
6306         kvm_free_pit(kvm);
6307 }
6308
6309 void kvm_arch_destroy_vm(struct kvm *kvm)
6310 {
6311         kvm_iommu_unmap_guest(kvm);
6312         kfree(kvm->arch.vpic);
6313         kfree(kvm->arch.vioapic);
6314         kvm_free_vcpus(kvm);
6315         if (kvm->arch.apic_access_page)
6316                 put_page(kvm->arch.apic_access_page);
6317         if (kvm->arch.ept_identity_pagetable)
6318                 put_page(kvm->arch.ept_identity_pagetable);
6319         kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
6320 }
6321
6322 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
6323                            struct kvm_memory_slot *dont)
6324 {
6325         int i;
6326
6327         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6328                 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
6329                         kvm_kvfree(free->arch.rmap[i]);
6330                         free->arch.rmap[i] = NULL;
6331                 }
6332                 if (i == 0)
6333                         continue;
6334
6335                 if (!dont || free->arch.lpage_info[i - 1] !=
6336                              dont->arch.lpage_info[i - 1]) {
6337                         kvm_kvfree(free->arch.lpage_info[i - 1]);
6338                         free->arch.lpage_info[i - 1] = NULL;
6339                 }
6340         }
6341 }
6342
6343 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6344 {
6345         int i;
6346
6347         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6348                 unsigned long ugfn;
6349                 int lpages;
6350                 int level = i + 1;
6351
6352                 lpages = gfn_to_index(slot->base_gfn + npages - 1,
6353                                       slot->base_gfn, level) + 1;
6354
6355                 slot->arch.rmap[i] =
6356                         kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
6357                 if (!slot->arch.rmap[i])
6358                         goto out_free;
6359                 if (i == 0)
6360                         continue;
6361
6362                 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
6363                                         sizeof(*slot->arch.lpage_info[i - 1]));
6364                 if (!slot->arch.lpage_info[i - 1])
6365                         goto out_free;
6366
6367                 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6368                         slot->arch.lpage_info[i - 1][0].write_count = 1;
6369                 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6370                         slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
6371                 ugfn = slot->userspace_addr >> PAGE_SHIFT;
6372                 /*
6373                  * If the gfn and userspace address are not aligned wrt each
6374                  * other, or if explicitly asked to, disable large page
6375                  * support for this slot
6376                  */
6377                 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
6378                     !kvm_largepages_enabled()) {
6379                         unsigned long j;
6380
6381                         for (j = 0; j < lpages; ++j)
6382                                 slot->arch.lpage_info[i - 1][j].write_count = 1;
6383                 }
6384         }
6385
6386         return 0;
6387
6388 out_free:
6389         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6390                 kvm_kvfree(slot->arch.rmap[i]);
6391                 slot->arch.rmap[i] = NULL;
6392                 if (i == 0)
6393                         continue;
6394
6395                 kvm_kvfree(slot->arch.lpage_info[i - 1]);
6396                 slot->arch.lpage_info[i - 1] = NULL;
6397         }
6398         return -ENOMEM;
6399 }
6400
6401 int kvm_arch_prepare_memory_region(struct kvm *kvm,
6402                                 struct kvm_memory_slot *memslot,
6403                                 struct kvm_memory_slot old,
6404                                 struct kvm_userspace_memory_region *mem,
6405                                 int user_alloc)
6406 {
6407         int npages = memslot->npages;
6408         int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
6409
6410         /* Prevent internal slot pages from being moved by fork()/COW. */
6411         if (memslot->id >= KVM_MEMORY_SLOTS)
6412                 map_flags = MAP_SHARED | MAP_ANONYMOUS;
6413
6414         /*To keep backward compatibility with older userspace,
6415          *x86 needs to handle !user_alloc case.
6416          */
6417         if (!user_alloc) {
6418                 if (npages && !old.npages) {
6419                         unsigned long userspace_addr;
6420
6421                         userspace_addr = vm_mmap(NULL, 0,
6422                                                  npages * PAGE_SIZE,
6423                                                  PROT_READ | PROT_WRITE,
6424                                                  map_flags,
6425                                                  0);
6426
6427                         if (IS_ERR((void *)userspace_addr))
6428                                 return PTR_ERR((void *)userspace_addr);
6429
6430                         memslot->userspace_addr = userspace_addr;
6431                 }
6432         }
6433
6434
6435         return 0;
6436 }
6437
6438 void kvm_arch_commit_memory_region(struct kvm *kvm,
6439                                 struct kvm_userspace_memory_region *mem,
6440                                 struct kvm_memory_slot old,
6441                                 int user_alloc)
6442 {
6443
6444         int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
6445
6446         if (!user_alloc && !old.user_alloc && old.npages && !npages) {
6447                 int ret;
6448
6449                 ret = vm_munmap(old.userspace_addr,
6450                                 old.npages * PAGE_SIZE);
6451                 if (ret < 0)
6452                         printk(KERN_WARNING
6453                                "kvm_vm_ioctl_set_memory_region: "
6454                                "failed to munmap memory\n");
6455         }
6456
6457         if (!kvm->arch.n_requested_mmu_pages)
6458                 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
6459
6460         spin_lock(&kvm->mmu_lock);
6461         if (nr_mmu_pages)
6462                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
6463         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
6464         spin_unlock(&kvm->mmu_lock);
6465         /*
6466          * If memory slot is created, or moved, we need to clear all
6467          * mmio sptes.
6468          */
6469         if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) {
6470                 kvm_mmu_zap_all(kvm);
6471                 kvm_reload_remote_mmus(kvm);
6472         }
6473 }
6474
6475 void kvm_arch_flush_shadow_all(struct kvm *kvm)
6476 {
6477         kvm_mmu_zap_all(kvm);
6478         kvm_reload_remote_mmus(kvm);
6479 }
6480
6481 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
6482                                    struct kvm_memory_slot *slot)
6483 {
6484         kvm_arch_flush_shadow_all(kvm);
6485 }
6486
6487 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6488 {
6489         return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6490                 !vcpu->arch.apf.halted)
6491                 || !list_empty_careful(&vcpu->async_pf.done)
6492                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
6493                 || atomic_read(&vcpu->arch.nmi_queued) ||
6494                 (kvm_arch_interrupt_allowed(vcpu) &&
6495                  kvm_cpu_has_interrupt(vcpu));
6496 }
6497
6498 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
6499 {
6500         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
6501 }
6502
6503 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
6504 {
6505         return kvm_x86_ops->interrupt_allowed(vcpu);
6506 }
6507
6508 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
6509 {
6510         unsigned long current_rip = kvm_rip_read(vcpu) +
6511                 get_segment_base(vcpu, VCPU_SREG_CS);
6512
6513         return current_rip == linear_rip;
6514 }
6515 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
6516
6517 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
6518 {
6519         unsigned long rflags;
6520
6521         rflags = kvm_x86_ops->get_rflags(vcpu);
6522         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6523                 rflags &= ~X86_EFLAGS_TF;
6524         return rflags;
6525 }
6526 EXPORT_SYMBOL_GPL(kvm_get_rflags);
6527
6528 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6529 {
6530         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
6531             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6532                 rflags |= X86_EFLAGS_TF;
6533         kvm_x86_ops->set_rflags(vcpu, rflags);
6534         kvm_make_request(KVM_REQ_EVENT, vcpu);
6535 }
6536 EXPORT_SYMBOL_GPL(kvm_set_rflags);
6537
6538 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
6539 {
6540         int r;
6541
6542         if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
6543               is_error_page(work->page))
6544                 return;
6545
6546         r = kvm_mmu_reload(vcpu);
6547         if (unlikely(r))
6548                 return;
6549
6550         if (!vcpu->arch.mmu.direct_map &&
6551               work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
6552                 return;
6553
6554         vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
6555 }
6556
6557 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
6558 {
6559         return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
6560 }
6561
6562 static inline u32 kvm_async_pf_next_probe(u32 key)
6563 {
6564         return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
6565 }
6566
6567 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6568 {
6569         u32 key = kvm_async_pf_hash_fn(gfn);
6570
6571         while (vcpu->arch.apf.gfns[key] != ~0)
6572                 key = kvm_async_pf_next_probe(key);
6573
6574         vcpu->arch.apf.gfns[key] = gfn;
6575 }
6576
6577 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
6578 {
6579         int i;
6580         u32 key = kvm_async_pf_hash_fn(gfn);
6581
6582         for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
6583                      (vcpu->arch.apf.gfns[key] != gfn &&
6584                       vcpu->arch.apf.gfns[key] != ~0); i++)
6585                 key = kvm_async_pf_next_probe(key);
6586
6587         return key;
6588 }
6589
6590 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6591 {
6592         return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
6593 }
6594
6595 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6596 {
6597         u32 i, j, k;
6598
6599         i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
6600         while (true) {
6601                 vcpu->arch.apf.gfns[i] = ~0;
6602                 do {
6603                         j = kvm_async_pf_next_probe(j);
6604                         if (vcpu->arch.apf.gfns[j] == ~0)
6605                                 return;
6606                         k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
6607                         /*
6608                          * k lies cyclically in ]i,j]
6609                          * |    i.k.j |
6610                          * |....j i.k.| or  |.k..j i...|
6611                          */
6612                 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
6613                 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
6614                 i = j;
6615         }
6616 }
6617
6618 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6619 {
6620
6621         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
6622                                       sizeof(val));
6623 }
6624
6625 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6626                                      struct kvm_async_pf *work)
6627 {
6628         struct x86_exception fault;
6629
6630         trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6631         kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6632
6633         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
6634             (vcpu->arch.apf.send_user_only &&
6635              kvm_x86_ops->get_cpl(vcpu) == 0))
6636                 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6637         else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6638                 fault.vector = PF_VECTOR;
6639                 fault.error_code_valid = true;
6640                 fault.error_code = 0;
6641                 fault.nested_page_fault = false;
6642                 fault.address = work->arch.token;
6643                 kvm_inject_page_fault(vcpu, &fault);
6644         }
6645 }
6646
6647 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6648                                  struct kvm_async_pf *work)
6649 {
6650         struct x86_exception fault;
6651
6652         trace_kvm_async_pf_ready(work->arch.token, work->gva);
6653         if (is_error_page(work->page))
6654                 work->arch.token = ~0; /* broadcast wakeup */
6655         else
6656                 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
6657
6658         if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6659             !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6660                 fault.vector = PF_VECTOR;
6661                 fault.error_code_valid = true;
6662                 fault.error_code = 0;
6663                 fault.nested_page_fault = false;
6664                 fault.address = work->arch.token;
6665                 kvm_inject_page_fault(vcpu, &fault);
6666         }
6667         vcpu->arch.apf.halted = false;
6668         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6669 }
6670
6671 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
6672 {
6673         if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
6674                 return true;
6675         else
6676                 return !kvm_event_needs_reinjection(vcpu) &&
6677                         kvm_x86_ops->interrupt_allowed(vcpu);
6678 }
6679
6680 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
6681 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
6682 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
6683 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
6684 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
6685 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
6686 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
6687 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
6688 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
6689 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
6690 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
6691 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);