]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/xen/enlighten.c
Merge tag 'stable/for-linus-3.10-rc0-tag-two' of git://git.kernel.org/pub/scm/linux...
[karo-tx-linux.git] / arch / x86 / xen / enlighten.c
1 /*
2  * Core of Xen paravirt_ops implementation.
3  *
4  * This file contains the xen_paravirt_ops structure itself, and the
5  * implementations for:
6  * - privileged instructions
7  * - interrupt flags
8  * - segment operations
9  * - booting and setup
10  *
11  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
12  */
13
14 #include <linux/cpu.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp.h>
18 #include <linux/preempt.h>
19 #include <linux/hardirq.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/start_kernel.h>
23 #include <linux/sched.h>
24 #include <linux/kprobes.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <linux/mm.h>
28 #include <linux/page-flags.h>
29 #include <linux/highmem.h>
30 #include <linux/console.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/memblock.h>
34 #include <linux/edd.h>
35
36 #include <xen/xen.h>
37 #include <xen/events.h>
38 #include <xen/interface/xen.h>
39 #include <xen/interface/version.h>
40 #include <xen/interface/physdev.h>
41 #include <xen/interface/vcpu.h>
42 #include <xen/interface/memory.h>
43 #include <xen/interface/xen-mca.h>
44 #include <xen/features.h>
45 #include <xen/page.h>
46 #include <xen/hvm.h>
47 #include <xen/hvc-console.h>
48 #include <xen/acpi.h>
49
50 #include <asm/paravirt.h>
51 #include <asm/apic.h>
52 #include <asm/page.h>
53 #include <asm/xen/pci.h>
54 #include <asm/xen/hypercall.h>
55 #include <asm/xen/hypervisor.h>
56 #include <asm/fixmap.h>
57 #include <asm/processor.h>
58 #include <asm/proto.h>
59 #include <asm/msr-index.h>
60 #include <asm/traps.h>
61 #include <asm/setup.h>
62 #include <asm/desc.h>
63 #include <asm/pgalloc.h>
64 #include <asm/pgtable.h>
65 #include <asm/tlbflush.h>
66 #include <asm/reboot.h>
67 #include <asm/stackprotector.h>
68 #include <asm/hypervisor.h>
69 #include <asm/mwait.h>
70 #include <asm/pci_x86.h>
71 #include <asm/pat.h>
72
73 #ifdef CONFIG_ACPI
74 #include <linux/acpi.h>
75 #include <asm/acpi.h>
76 #include <acpi/pdc_intel.h>
77 #include <acpi/processor.h>
78 #include <xen/interface/platform.h>
79 #endif
80
81 #include "xen-ops.h"
82 #include "mmu.h"
83 #include "smp.h"
84 #include "multicalls.h"
85
86 EXPORT_SYMBOL_GPL(hypercall_page);
87
88 /*
89  * Pointer to the xen_vcpu_info structure or
90  * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
91  * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
92  * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
93  * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
94  * acknowledge pending events.
95  * Also more subtly it is used by the patched version of irq enable/disable
96  * e.g. xen_irq_enable_direct and xen_iret in PV mode.
97  *
98  * The desire to be able to do those mask/unmask operations as a single
99  * instruction by using the per-cpu offset held in %gs is the real reason
100  * vcpu info is in a per-cpu pointer and the original reason for this
101  * hypercall.
102  *
103  */
104 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
105
106 /*
107  * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
108  * hypercall. This can be used both in PV and PVHVM mode. The structure
109  * overrides the default per_cpu(xen_vcpu, cpu) value.
110  */
111 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
112
113 enum xen_domain_type xen_domain_type = XEN_NATIVE;
114 EXPORT_SYMBOL_GPL(xen_domain_type);
115
116 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
117 EXPORT_SYMBOL(machine_to_phys_mapping);
118 unsigned long  machine_to_phys_nr;
119 EXPORT_SYMBOL(machine_to_phys_nr);
120
121 struct start_info *xen_start_info;
122 EXPORT_SYMBOL_GPL(xen_start_info);
123
124 struct shared_info xen_dummy_shared_info;
125
126 void *xen_initial_gdt;
127
128 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
129 __read_mostly int xen_have_vector_callback;
130 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
131
132 /*
133  * Point at some empty memory to start with. We map the real shared_info
134  * page as soon as fixmap is up and running.
135  */
136 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
137
138 /*
139  * Flag to determine whether vcpu info placement is available on all
140  * VCPUs.  We assume it is to start with, and then set it to zero on
141  * the first failure.  This is because it can succeed on some VCPUs
142  * and not others, since it can involve hypervisor memory allocation,
143  * or because the guest failed to guarantee all the appropriate
144  * constraints on all VCPUs (ie buffer can't cross a page boundary).
145  *
146  * Note that any particular CPU may be using a placed vcpu structure,
147  * but we can only optimise if the all are.
148  *
149  * 0: not available, 1: available
150  */
151 static int have_vcpu_info_placement = 1;
152
153 struct tls_descs {
154         struct desc_struct desc[3];
155 };
156
157 /*
158  * Updating the 3 TLS descriptors in the GDT on every task switch is
159  * surprisingly expensive so we avoid updating them if they haven't
160  * changed.  Since Xen writes different descriptors than the one
161  * passed in the update_descriptor hypercall we keep shadow copies to
162  * compare against.
163  */
164 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
165
166 static void clamp_max_cpus(void)
167 {
168 #ifdef CONFIG_SMP
169         if (setup_max_cpus > MAX_VIRT_CPUS)
170                 setup_max_cpus = MAX_VIRT_CPUS;
171 #endif
172 }
173
174 static void xen_vcpu_setup(int cpu)
175 {
176         struct vcpu_register_vcpu_info info;
177         int err;
178         struct vcpu_info *vcpup;
179
180         BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
181
182         /*
183          * This path is called twice on PVHVM - first during bootup via
184          * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
185          * hotplugged: cpu_up -> xen_hvm_cpu_notify.
186          * As we can only do the VCPUOP_register_vcpu_info once lets
187          * not over-write its result.
188          *
189          * For PV it is called during restore (xen_vcpu_restore) and bootup
190          * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
191          * use this function.
192          */
193         if (xen_hvm_domain()) {
194                 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
195                         return;
196         }
197         if (cpu < MAX_VIRT_CPUS)
198                 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
199
200         if (!have_vcpu_info_placement) {
201                 if (cpu >= MAX_VIRT_CPUS)
202                         clamp_max_cpus();
203                 return;
204         }
205
206         vcpup = &per_cpu(xen_vcpu_info, cpu);
207         info.mfn = arbitrary_virt_to_mfn(vcpup);
208         info.offset = offset_in_page(vcpup);
209
210         /* Check to see if the hypervisor will put the vcpu_info
211            structure where we want it, which allows direct access via
212            a percpu-variable.
213            N.B. This hypercall can _only_ be called once per CPU. Subsequent
214            calls will error out with -EINVAL. This is due to the fact that
215            hypervisor has no unregister variant and this hypercall does not
216            allow to over-write info.mfn and info.offset.
217          */
218         err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
219
220         if (err) {
221                 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
222                 have_vcpu_info_placement = 0;
223                 clamp_max_cpus();
224         } else {
225                 /* This cpu is using the registered vcpu info, even if
226                    later ones fail to. */
227                 per_cpu(xen_vcpu, cpu) = vcpup;
228         }
229 }
230
231 /*
232  * On restore, set the vcpu placement up again.
233  * If it fails, then we're in a bad state, since
234  * we can't back out from using it...
235  */
236 void xen_vcpu_restore(void)
237 {
238         int cpu;
239
240         for_each_possible_cpu(cpu) {
241                 bool other_cpu = (cpu != smp_processor_id());
242                 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
243
244                 if (other_cpu && is_up &&
245                     HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
246                         BUG();
247
248                 xen_setup_runstate_info(cpu);
249
250                 if (have_vcpu_info_placement)
251                         xen_vcpu_setup(cpu);
252
253                 if (other_cpu && is_up &&
254                     HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
255                         BUG();
256         }
257 }
258
259 static void __init xen_banner(void)
260 {
261         unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
262         struct xen_extraversion extra;
263         HYPERVISOR_xen_version(XENVER_extraversion, &extra);
264
265         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
266                pv_info.name);
267         printk(KERN_INFO "Xen version: %d.%d%s%s\n",
268                version >> 16, version & 0xffff, extra.extraversion,
269                xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
270 }
271 /* Check if running on Xen version (major, minor) or later */
272 bool
273 xen_running_on_version_or_later(unsigned int major, unsigned int minor)
274 {
275         unsigned int version;
276
277         if (!xen_domain())
278                 return false;
279
280         version = HYPERVISOR_xen_version(XENVER_version, NULL);
281         if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
282                 ((version >> 16) > major))
283                 return true;
284         return false;
285 }
286
287 #define CPUID_THERM_POWER_LEAF 6
288 #define APERFMPERF_PRESENT 0
289
290 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
291 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
292
293 static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
294 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
295 static __read_mostly unsigned int cpuid_leaf5_edx_val;
296
297 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
298                       unsigned int *cx, unsigned int *dx)
299 {
300         unsigned maskebx = ~0;
301         unsigned maskecx = ~0;
302         unsigned maskedx = ~0;
303         unsigned setecx = 0;
304         /*
305          * Mask out inconvenient features, to try and disable as many
306          * unsupported kernel subsystems as possible.
307          */
308         switch (*ax) {
309         case 1:
310                 maskecx = cpuid_leaf1_ecx_mask;
311                 setecx = cpuid_leaf1_ecx_set_mask;
312                 maskedx = cpuid_leaf1_edx_mask;
313                 break;
314
315         case CPUID_MWAIT_LEAF:
316                 /* Synthesize the values.. */
317                 *ax = 0;
318                 *bx = 0;
319                 *cx = cpuid_leaf5_ecx_val;
320                 *dx = cpuid_leaf5_edx_val;
321                 return;
322
323         case CPUID_THERM_POWER_LEAF:
324                 /* Disabling APERFMPERF for kernel usage */
325                 maskecx = ~(1 << APERFMPERF_PRESENT);
326                 break;
327
328         case 0xb:
329                 /* Suppress extended topology stuff */
330                 maskebx = 0;
331                 break;
332         }
333
334         asm(XEN_EMULATE_PREFIX "cpuid"
335                 : "=a" (*ax),
336                   "=b" (*bx),
337                   "=c" (*cx),
338                   "=d" (*dx)
339                 : "0" (*ax), "2" (*cx));
340
341         *bx &= maskebx;
342         *cx &= maskecx;
343         *cx |= setecx;
344         *dx &= maskedx;
345
346 }
347
348 static bool __init xen_check_mwait(void)
349 {
350 #ifdef CONFIG_ACPI
351         struct xen_platform_op op = {
352                 .cmd                    = XENPF_set_processor_pminfo,
353                 .u.set_pminfo.id        = -1,
354                 .u.set_pminfo.type      = XEN_PM_PDC,
355         };
356         uint32_t buf[3];
357         unsigned int ax, bx, cx, dx;
358         unsigned int mwait_mask;
359
360         /* We need to determine whether it is OK to expose the MWAIT
361          * capability to the kernel to harvest deeper than C3 states from ACPI
362          * _CST using the processor_harvest_xen.c module. For this to work, we
363          * need to gather the MWAIT_LEAF values (which the cstate.c code
364          * checks against). The hypervisor won't expose the MWAIT flag because
365          * it would break backwards compatibility; so we will find out directly
366          * from the hardware and hypercall.
367          */
368         if (!xen_initial_domain())
369                 return false;
370
371         /*
372          * When running under platform earlier than Xen4.2, do not expose
373          * mwait, to avoid the risk of loading native acpi pad driver
374          */
375         if (!xen_running_on_version_or_later(4, 2))
376                 return false;
377
378         ax = 1;
379         cx = 0;
380
381         native_cpuid(&ax, &bx, &cx, &dx);
382
383         mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
384                      (1 << (X86_FEATURE_MWAIT % 32));
385
386         if ((cx & mwait_mask) != mwait_mask)
387                 return false;
388
389         /* We need to emulate the MWAIT_LEAF and for that we need both
390          * ecx and edx. The hypercall provides only partial information.
391          */
392
393         ax = CPUID_MWAIT_LEAF;
394         bx = 0;
395         cx = 0;
396         dx = 0;
397
398         native_cpuid(&ax, &bx, &cx, &dx);
399
400         /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
401          * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
402          */
403         buf[0] = ACPI_PDC_REVISION_ID;
404         buf[1] = 1;
405         buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
406
407         set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
408
409         if ((HYPERVISOR_dom0_op(&op) == 0) &&
410             (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
411                 cpuid_leaf5_ecx_val = cx;
412                 cpuid_leaf5_edx_val = dx;
413         }
414         return true;
415 #else
416         return false;
417 #endif
418 }
419 static void __init xen_init_cpuid_mask(void)
420 {
421         unsigned int ax, bx, cx, dx;
422         unsigned int xsave_mask;
423
424         cpuid_leaf1_edx_mask =
425                 ~((1 << X86_FEATURE_MTRR) |  /* disable MTRR */
426                   (1 << X86_FEATURE_ACC));   /* thermal monitoring */
427
428         if (!xen_initial_domain())
429                 cpuid_leaf1_edx_mask &=
430                         ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
431                           (1 << X86_FEATURE_ACPI));  /* disable ACPI */
432
433         cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
434
435         ax = 1;
436         cx = 0;
437         xen_cpuid(&ax, &bx, &cx, &dx);
438
439         xsave_mask =
440                 (1 << (X86_FEATURE_XSAVE % 32)) |
441                 (1 << (X86_FEATURE_OSXSAVE % 32));
442
443         /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
444         if ((cx & xsave_mask) != xsave_mask)
445                 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
446         if (xen_check_mwait())
447                 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
448 }
449
450 static void xen_set_debugreg(int reg, unsigned long val)
451 {
452         HYPERVISOR_set_debugreg(reg, val);
453 }
454
455 static unsigned long xen_get_debugreg(int reg)
456 {
457         return HYPERVISOR_get_debugreg(reg);
458 }
459
460 static void xen_end_context_switch(struct task_struct *next)
461 {
462         xen_mc_flush();
463         paravirt_end_context_switch(next);
464 }
465
466 static unsigned long xen_store_tr(void)
467 {
468         return 0;
469 }
470
471 /*
472  * Set the page permissions for a particular virtual address.  If the
473  * address is a vmalloc mapping (or other non-linear mapping), then
474  * find the linear mapping of the page and also set its protections to
475  * match.
476  */
477 static void set_aliased_prot(void *v, pgprot_t prot)
478 {
479         int level;
480         pte_t *ptep;
481         pte_t pte;
482         unsigned long pfn;
483         struct page *page;
484
485         ptep = lookup_address((unsigned long)v, &level);
486         BUG_ON(ptep == NULL);
487
488         pfn = pte_pfn(*ptep);
489         page = pfn_to_page(pfn);
490
491         pte = pfn_pte(pfn, prot);
492
493         if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
494                 BUG();
495
496         if (!PageHighMem(page)) {
497                 void *av = __va(PFN_PHYS(pfn));
498
499                 if (av != v)
500                         if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
501                                 BUG();
502         } else
503                 kmap_flush_unused();
504 }
505
506 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
507 {
508         const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
509         int i;
510
511         for(i = 0; i < entries; i += entries_per_page)
512                 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
513 }
514
515 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
516 {
517         const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
518         int i;
519
520         for(i = 0; i < entries; i += entries_per_page)
521                 set_aliased_prot(ldt + i, PAGE_KERNEL);
522 }
523
524 static void xen_set_ldt(const void *addr, unsigned entries)
525 {
526         struct mmuext_op *op;
527         struct multicall_space mcs = xen_mc_entry(sizeof(*op));
528
529         trace_xen_cpu_set_ldt(addr, entries);
530
531         op = mcs.args;
532         op->cmd = MMUEXT_SET_LDT;
533         op->arg1.linear_addr = (unsigned long)addr;
534         op->arg2.nr_ents = entries;
535
536         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
537
538         xen_mc_issue(PARAVIRT_LAZY_CPU);
539 }
540
541 static void xen_load_gdt(const struct desc_ptr *dtr)
542 {
543         unsigned long va = dtr->address;
544         unsigned int size = dtr->size + 1;
545         unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
546         unsigned long frames[pages];
547         int f;
548
549         /*
550          * A GDT can be up to 64k in size, which corresponds to 8192
551          * 8-byte entries, or 16 4k pages..
552          */
553
554         BUG_ON(size > 65536);
555         BUG_ON(va & ~PAGE_MASK);
556
557         for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
558                 int level;
559                 pte_t *ptep;
560                 unsigned long pfn, mfn;
561                 void *virt;
562
563                 /*
564                  * The GDT is per-cpu and is in the percpu data area.
565                  * That can be virtually mapped, so we need to do a
566                  * page-walk to get the underlying MFN for the
567                  * hypercall.  The page can also be in the kernel's
568                  * linear range, so we need to RO that mapping too.
569                  */
570                 ptep = lookup_address(va, &level);
571                 BUG_ON(ptep == NULL);
572
573                 pfn = pte_pfn(*ptep);
574                 mfn = pfn_to_mfn(pfn);
575                 virt = __va(PFN_PHYS(pfn));
576
577                 frames[f] = mfn;
578
579                 make_lowmem_page_readonly((void *)va);
580                 make_lowmem_page_readonly(virt);
581         }
582
583         if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
584                 BUG();
585 }
586
587 /*
588  * load_gdt for early boot, when the gdt is only mapped once
589  */
590 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
591 {
592         unsigned long va = dtr->address;
593         unsigned int size = dtr->size + 1;
594         unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
595         unsigned long frames[pages];
596         int f;
597
598         /*
599          * A GDT can be up to 64k in size, which corresponds to 8192
600          * 8-byte entries, or 16 4k pages..
601          */
602
603         BUG_ON(size > 65536);
604         BUG_ON(va & ~PAGE_MASK);
605
606         for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
607                 pte_t pte;
608                 unsigned long pfn, mfn;
609
610                 pfn = virt_to_pfn(va);
611                 mfn = pfn_to_mfn(pfn);
612
613                 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
614
615                 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
616                         BUG();
617
618                 frames[f] = mfn;
619         }
620
621         if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
622                 BUG();
623 }
624
625 static inline bool desc_equal(const struct desc_struct *d1,
626                               const struct desc_struct *d2)
627 {
628         return d1->a == d2->a && d1->b == d2->b;
629 }
630
631 static void load_TLS_descriptor(struct thread_struct *t,
632                                 unsigned int cpu, unsigned int i)
633 {
634         struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
635         struct desc_struct *gdt;
636         xmaddr_t maddr;
637         struct multicall_space mc;
638
639         if (desc_equal(shadow, &t->tls_array[i]))
640                 return;
641
642         *shadow = t->tls_array[i];
643
644         gdt = get_cpu_gdt_table(cpu);
645         maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
646         mc = __xen_mc_entry(0);
647
648         MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
649 }
650
651 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
652 {
653         /*
654          * XXX sleazy hack: If we're being called in a lazy-cpu zone
655          * and lazy gs handling is enabled, it means we're in a
656          * context switch, and %gs has just been saved.  This means we
657          * can zero it out to prevent faults on exit from the
658          * hypervisor if the next process has no %gs.  Either way, it
659          * has been saved, and the new value will get loaded properly.
660          * This will go away as soon as Xen has been modified to not
661          * save/restore %gs for normal hypercalls.
662          *
663          * On x86_64, this hack is not used for %gs, because gs points
664          * to KERNEL_GS_BASE (and uses it for PDA references), so we
665          * must not zero %gs on x86_64
666          *
667          * For x86_64, we need to zero %fs, otherwise we may get an
668          * exception between the new %fs descriptor being loaded and
669          * %fs being effectively cleared at __switch_to().
670          */
671         if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
672 #ifdef CONFIG_X86_32
673                 lazy_load_gs(0);
674 #else
675                 loadsegment(fs, 0);
676 #endif
677         }
678
679         xen_mc_batch();
680
681         load_TLS_descriptor(t, cpu, 0);
682         load_TLS_descriptor(t, cpu, 1);
683         load_TLS_descriptor(t, cpu, 2);
684
685         xen_mc_issue(PARAVIRT_LAZY_CPU);
686 }
687
688 #ifdef CONFIG_X86_64
689 static void xen_load_gs_index(unsigned int idx)
690 {
691         if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
692                 BUG();
693 }
694 #endif
695
696 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
697                                 const void *ptr)
698 {
699         xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
700         u64 entry = *(u64 *)ptr;
701
702         trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
703
704         preempt_disable();
705
706         xen_mc_flush();
707         if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
708                 BUG();
709
710         preempt_enable();
711 }
712
713 static int cvt_gate_to_trap(int vector, const gate_desc *val,
714                             struct trap_info *info)
715 {
716         unsigned long addr;
717
718         if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
719                 return 0;
720
721         info->vector = vector;
722
723         addr = gate_offset(*val);
724 #ifdef CONFIG_X86_64
725         /*
726          * Look for known traps using IST, and substitute them
727          * appropriately.  The debugger ones are the only ones we care
728          * about.  Xen will handle faults like double_fault,
729          * so we should never see them.  Warn if
730          * there's an unexpected IST-using fault handler.
731          */
732         if (addr == (unsigned long)debug)
733                 addr = (unsigned long)xen_debug;
734         else if (addr == (unsigned long)int3)
735                 addr = (unsigned long)xen_int3;
736         else if (addr == (unsigned long)stack_segment)
737                 addr = (unsigned long)xen_stack_segment;
738         else if (addr == (unsigned long)double_fault ||
739                  addr == (unsigned long)nmi) {
740                 /* Don't need to handle these */
741                 return 0;
742 #ifdef CONFIG_X86_MCE
743         } else if (addr == (unsigned long)machine_check) {
744                 /*
745                  * when xen hypervisor inject vMCE to guest,
746                  * use native mce handler to handle it
747                  */
748                 ;
749 #endif
750         } else {
751                 /* Some other trap using IST? */
752                 if (WARN_ON(val->ist != 0))
753                         return 0;
754         }
755 #endif  /* CONFIG_X86_64 */
756         info->address = addr;
757
758         info->cs = gate_segment(*val);
759         info->flags = val->dpl;
760         /* interrupt gates clear IF */
761         if (val->type == GATE_INTERRUPT)
762                 info->flags |= 1 << 2;
763
764         return 1;
765 }
766
767 /* Locations of each CPU's IDT */
768 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
769
770 /* Set an IDT entry.  If the entry is part of the current IDT, then
771    also update Xen. */
772 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
773 {
774         unsigned long p = (unsigned long)&dt[entrynum];
775         unsigned long start, end;
776
777         trace_xen_cpu_write_idt_entry(dt, entrynum, g);
778
779         preempt_disable();
780
781         start = __this_cpu_read(idt_desc.address);
782         end = start + __this_cpu_read(idt_desc.size) + 1;
783
784         xen_mc_flush();
785
786         native_write_idt_entry(dt, entrynum, g);
787
788         if (p >= start && (p + 8) <= end) {
789                 struct trap_info info[2];
790
791                 info[1].address = 0;
792
793                 if (cvt_gate_to_trap(entrynum, g, &info[0]))
794                         if (HYPERVISOR_set_trap_table(info))
795                                 BUG();
796         }
797
798         preempt_enable();
799 }
800
801 static void xen_convert_trap_info(const struct desc_ptr *desc,
802                                   struct trap_info *traps)
803 {
804         unsigned in, out, count;
805
806         count = (desc->size+1) / sizeof(gate_desc);
807         BUG_ON(count > 256);
808
809         for (in = out = 0; in < count; in++) {
810                 gate_desc *entry = (gate_desc*)(desc->address) + in;
811
812                 if (cvt_gate_to_trap(in, entry, &traps[out]))
813                         out++;
814         }
815         traps[out].address = 0;
816 }
817
818 void xen_copy_trap_info(struct trap_info *traps)
819 {
820         const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
821
822         xen_convert_trap_info(desc, traps);
823 }
824
825 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
826    hold a spinlock to protect the static traps[] array (static because
827    it avoids allocation, and saves stack space). */
828 static void xen_load_idt(const struct desc_ptr *desc)
829 {
830         static DEFINE_SPINLOCK(lock);
831         static struct trap_info traps[257];
832
833         trace_xen_cpu_load_idt(desc);
834
835         spin_lock(&lock);
836
837         __get_cpu_var(idt_desc) = *desc;
838
839         xen_convert_trap_info(desc, traps);
840
841         xen_mc_flush();
842         if (HYPERVISOR_set_trap_table(traps))
843                 BUG();
844
845         spin_unlock(&lock);
846 }
847
848 /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
849    they're handled differently. */
850 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
851                                 const void *desc, int type)
852 {
853         trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
854
855         preempt_disable();
856
857         switch (type) {
858         case DESC_LDT:
859         case DESC_TSS:
860                 /* ignore */
861                 break;
862
863         default: {
864                 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
865
866                 xen_mc_flush();
867                 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
868                         BUG();
869         }
870
871         }
872
873         preempt_enable();
874 }
875
876 /*
877  * Version of write_gdt_entry for use at early boot-time needed to
878  * update an entry as simply as possible.
879  */
880 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
881                                             const void *desc, int type)
882 {
883         trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
884
885         switch (type) {
886         case DESC_LDT:
887         case DESC_TSS:
888                 /* ignore */
889                 break;
890
891         default: {
892                 xmaddr_t maddr = virt_to_machine(&dt[entry]);
893
894                 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
895                         dt[entry] = *(struct desc_struct *)desc;
896         }
897
898         }
899 }
900
901 static void xen_load_sp0(struct tss_struct *tss,
902                          struct thread_struct *thread)
903 {
904         struct multicall_space mcs;
905
906         mcs = xen_mc_entry(0);
907         MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
908         xen_mc_issue(PARAVIRT_LAZY_CPU);
909 }
910
911 static void xen_set_iopl_mask(unsigned mask)
912 {
913         struct physdev_set_iopl set_iopl;
914
915         /* Force the change at ring 0. */
916         set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
917         HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
918 }
919
920 static void xen_io_delay(void)
921 {
922 }
923
924 #ifdef CONFIG_X86_LOCAL_APIC
925 static unsigned long xen_set_apic_id(unsigned int x)
926 {
927         WARN_ON(1);
928         return x;
929 }
930 static unsigned int xen_get_apic_id(unsigned long x)
931 {
932         return ((x)>>24) & 0xFFu;
933 }
934 static u32 xen_apic_read(u32 reg)
935 {
936         struct xen_platform_op op = {
937                 .cmd = XENPF_get_cpuinfo,
938                 .interface_version = XENPF_INTERFACE_VERSION,
939                 .u.pcpu_info.xen_cpuid = 0,
940         };
941         int ret = 0;
942
943         /* Shouldn't need this as APIC is turned off for PV, and we only
944          * get called on the bootup processor. But just in case. */
945         if (!xen_initial_domain() || smp_processor_id())
946                 return 0;
947
948         if (reg == APIC_LVR)
949                 return 0x10;
950
951         if (reg != APIC_ID)
952                 return 0;
953
954         ret = HYPERVISOR_dom0_op(&op);
955         if (ret)
956                 return 0;
957
958         return op.u.pcpu_info.apic_id << 24;
959 }
960
961 static void xen_apic_write(u32 reg, u32 val)
962 {
963         /* Warn to see if there's any stray references */
964         WARN_ON(1);
965 }
966
967 static u64 xen_apic_icr_read(void)
968 {
969         return 0;
970 }
971
972 static void xen_apic_icr_write(u32 low, u32 id)
973 {
974         /* Warn to see if there's any stray references */
975         WARN_ON(1);
976 }
977
978 static void xen_apic_wait_icr_idle(void)
979 {
980         return;
981 }
982
983 static u32 xen_safe_apic_wait_icr_idle(void)
984 {
985         return 0;
986 }
987
988 static void set_xen_basic_apic_ops(void)
989 {
990         apic->read = xen_apic_read;
991         apic->write = xen_apic_write;
992         apic->icr_read = xen_apic_icr_read;
993         apic->icr_write = xen_apic_icr_write;
994         apic->wait_icr_idle = xen_apic_wait_icr_idle;
995         apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
996         apic->set_apic_id = xen_set_apic_id;
997         apic->get_apic_id = xen_get_apic_id;
998
999 #ifdef CONFIG_SMP
1000         apic->send_IPI_allbutself = xen_send_IPI_allbutself;
1001         apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
1002         apic->send_IPI_mask = xen_send_IPI_mask;
1003         apic->send_IPI_all = xen_send_IPI_all;
1004         apic->send_IPI_self = xen_send_IPI_self;
1005 #endif
1006 }
1007
1008 #endif
1009
1010 static void xen_clts(void)
1011 {
1012         struct multicall_space mcs;
1013
1014         mcs = xen_mc_entry(0);
1015
1016         MULTI_fpu_taskswitch(mcs.mc, 0);
1017
1018         xen_mc_issue(PARAVIRT_LAZY_CPU);
1019 }
1020
1021 static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
1022
1023 static unsigned long xen_read_cr0(void)
1024 {
1025         unsigned long cr0 = this_cpu_read(xen_cr0_value);
1026
1027         if (unlikely(cr0 == 0)) {
1028                 cr0 = native_read_cr0();
1029                 this_cpu_write(xen_cr0_value, cr0);
1030         }
1031
1032         return cr0;
1033 }
1034
1035 static void xen_write_cr0(unsigned long cr0)
1036 {
1037         struct multicall_space mcs;
1038
1039         this_cpu_write(xen_cr0_value, cr0);
1040
1041         /* Only pay attention to cr0.TS; everything else is
1042            ignored. */
1043         mcs = xen_mc_entry(0);
1044
1045         MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1046
1047         xen_mc_issue(PARAVIRT_LAZY_CPU);
1048 }
1049
1050 static void xen_write_cr4(unsigned long cr4)
1051 {
1052         cr4 &= ~X86_CR4_PGE;
1053         cr4 &= ~X86_CR4_PSE;
1054
1055         native_write_cr4(cr4);
1056 }
1057 #ifdef CONFIG_X86_64
1058 static inline unsigned long xen_read_cr8(void)
1059 {
1060         return 0;
1061 }
1062 static inline void xen_write_cr8(unsigned long val)
1063 {
1064         BUG_ON(val);
1065 }
1066 #endif
1067 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1068 {
1069         int ret;
1070
1071         ret = 0;
1072
1073         switch (msr) {
1074 #ifdef CONFIG_X86_64
1075                 unsigned which;
1076                 u64 base;
1077
1078         case MSR_FS_BASE:               which = SEGBASE_FS; goto set;
1079         case MSR_KERNEL_GS_BASE:        which = SEGBASE_GS_USER; goto set;
1080         case MSR_GS_BASE:               which = SEGBASE_GS_KERNEL; goto set;
1081
1082         set:
1083                 base = ((u64)high << 32) | low;
1084                 if (HYPERVISOR_set_segment_base(which, base) != 0)
1085                         ret = -EIO;
1086                 break;
1087 #endif
1088
1089         case MSR_STAR:
1090         case MSR_CSTAR:
1091         case MSR_LSTAR:
1092         case MSR_SYSCALL_MASK:
1093         case MSR_IA32_SYSENTER_CS:
1094         case MSR_IA32_SYSENTER_ESP:
1095         case MSR_IA32_SYSENTER_EIP:
1096                 /* Fast syscall setup is all done in hypercalls, so
1097                    these are all ignored.  Stub them out here to stop
1098                    Xen console noise. */
1099                 break;
1100
1101         case MSR_IA32_CR_PAT:
1102                 if (smp_processor_id() == 0)
1103                         xen_set_pat(((u64)high << 32) | low);
1104                 break;
1105
1106         default:
1107                 ret = native_write_msr_safe(msr, low, high);
1108         }
1109
1110         return ret;
1111 }
1112
1113 void xen_setup_shared_info(void)
1114 {
1115         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1116                 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1117                            xen_start_info->shared_info);
1118
1119                 HYPERVISOR_shared_info =
1120                         (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1121         } else
1122                 HYPERVISOR_shared_info =
1123                         (struct shared_info *)__va(xen_start_info->shared_info);
1124
1125 #ifndef CONFIG_SMP
1126         /* In UP this is as good a place as any to set up shared info */
1127         xen_setup_vcpu_info_placement();
1128 #endif
1129
1130         xen_setup_mfn_list_list();
1131 }
1132
1133 /* This is called once we have the cpu_possible_mask */
1134 void xen_setup_vcpu_info_placement(void)
1135 {
1136         int cpu;
1137
1138         for_each_possible_cpu(cpu)
1139                 xen_vcpu_setup(cpu);
1140
1141         /* xen_vcpu_setup managed to place the vcpu_info within the
1142            percpu area for all cpus, so make use of it */
1143         if (have_vcpu_info_placement) {
1144                 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1145                 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1146                 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1147                 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1148                 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1149         }
1150 }
1151
1152 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1153                           unsigned long addr, unsigned len)
1154 {
1155         char *start, *end, *reloc;
1156         unsigned ret;
1157
1158         start = end = reloc = NULL;
1159
1160 #define SITE(op, x)                                                     \
1161         case PARAVIRT_PATCH(op.x):                                      \
1162         if (have_vcpu_info_placement) {                                 \
1163                 start = (char *)xen_##x##_direct;                       \
1164                 end = xen_##x##_direct_end;                             \
1165                 reloc = xen_##x##_direct_reloc;                         \
1166         }                                                               \
1167         goto patch_site
1168
1169         switch (type) {
1170                 SITE(pv_irq_ops, irq_enable);
1171                 SITE(pv_irq_ops, irq_disable);
1172                 SITE(pv_irq_ops, save_fl);
1173                 SITE(pv_irq_ops, restore_fl);
1174 #undef SITE
1175
1176         patch_site:
1177                 if (start == NULL || (end-start) > len)
1178                         goto default_patch;
1179
1180                 ret = paravirt_patch_insns(insnbuf, len, start, end);
1181
1182                 /* Note: because reloc is assigned from something that
1183                    appears to be an array, gcc assumes it's non-null,
1184                    but doesn't know its relationship with start and
1185                    end. */
1186                 if (reloc > start && reloc < end) {
1187                         int reloc_off = reloc - start;
1188                         long *relocp = (long *)(insnbuf + reloc_off);
1189                         long delta = start - (char *)addr;
1190
1191                         *relocp += delta;
1192                 }
1193                 break;
1194
1195         default_patch:
1196         default:
1197                 ret = paravirt_patch_default(type, clobbers, insnbuf,
1198                                              addr, len);
1199                 break;
1200         }
1201
1202         return ret;
1203 }
1204
1205 static const struct pv_info xen_info __initconst = {
1206         .paravirt_enabled = 1,
1207         .shared_kernel_pmd = 0,
1208
1209 #ifdef CONFIG_X86_64
1210         .extra_user_64bit_cs = FLAT_USER_CS64,
1211 #endif
1212
1213         .name = "Xen",
1214 };
1215
1216 static const struct pv_init_ops xen_init_ops __initconst = {
1217         .patch = xen_patch,
1218 };
1219
1220 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1221         .cpuid = xen_cpuid,
1222
1223         .set_debugreg = xen_set_debugreg,
1224         .get_debugreg = xen_get_debugreg,
1225
1226         .clts = xen_clts,
1227
1228         .read_cr0 = xen_read_cr0,
1229         .write_cr0 = xen_write_cr0,
1230
1231         .read_cr4 = native_read_cr4,
1232         .read_cr4_safe = native_read_cr4_safe,
1233         .write_cr4 = xen_write_cr4,
1234
1235 #ifdef CONFIG_X86_64
1236         .read_cr8 = xen_read_cr8,
1237         .write_cr8 = xen_write_cr8,
1238 #endif
1239
1240         .wbinvd = native_wbinvd,
1241
1242         .read_msr = native_read_msr_safe,
1243         .write_msr = xen_write_msr_safe,
1244
1245         .read_tsc = native_read_tsc,
1246         .read_pmc = native_read_pmc,
1247
1248         .read_tscp = native_read_tscp,
1249
1250         .iret = xen_iret,
1251         .irq_enable_sysexit = xen_sysexit,
1252 #ifdef CONFIG_X86_64
1253         .usergs_sysret32 = xen_sysret32,
1254         .usergs_sysret64 = xen_sysret64,
1255 #endif
1256
1257         .load_tr_desc = paravirt_nop,
1258         .set_ldt = xen_set_ldt,
1259         .load_gdt = xen_load_gdt,
1260         .load_idt = xen_load_idt,
1261         .load_tls = xen_load_tls,
1262 #ifdef CONFIG_X86_64
1263         .load_gs_index = xen_load_gs_index,
1264 #endif
1265
1266         .alloc_ldt = xen_alloc_ldt,
1267         .free_ldt = xen_free_ldt,
1268
1269         .store_idt = native_store_idt,
1270         .store_tr = xen_store_tr,
1271
1272         .write_ldt_entry = xen_write_ldt_entry,
1273         .write_gdt_entry = xen_write_gdt_entry,
1274         .write_idt_entry = xen_write_idt_entry,
1275         .load_sp0 = xen_load_sp0,
1276
1277         .set_iopl_mask = xen_set_iopl_mask,
1278         .io_delay = xen_io_delay,
1279
1280         /* Xen takes care of %gs when switching to usermode for us */
1281         .swapgs = paravirt_nop,
1282
1283         .start_context_switch = paravirt_start_context_switch,
1284         .end_context_switch = xen_end_context_switch,
1285 };
1286
1287 static const struct pv_apic_ops xen_apic_ops __initconst = {
1288 #ifdef CONFIG_X86_LOCAL_APIC
1289         .startup_ipi_hook = paravirt_nop,
1290 #endif
1291 };
1292
1293 static void xen_reboot(int reason)
1294 {
1295         struct sched_shutdown r = { .reason = reason };
1296
1297         if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1298                 BUG();
1299 }
1300
1301 static void xen_restart(char *msg)
1302 {
1303         xen_reboot(SHUTDOWN_reboot);
1304 }
1305
1306 static void xen_emergency_restart(void)
1307 {
1308         xen_reboot(SHUTDOWN_reboot);
1309 }
1310
1311 static void xen_machine_halt(void)
1312 {
1313         xen_reboot(SHUTDOWN_poweroff);
1314 }
1315
1316 static void xen_machine_power_off(void)
1317 {
1318         if (pm_power_off)
1319                 pm_power_off();
1320         xen_reboot(SHUTDOWN_poweroff);
1321 }
1322
1323 static void xen_crash_shutdown(struct pt_regs *regs)
1324 {
1325         xen_reboot(SHUTDOWN_crash);
1326 }
1327
1328 static int
1329 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1330 {
1331         xen_reboot(SHUTDOWN_crash);
1332         return NOTIFY_DONE;
1333 }
1334
1335 static struct notifier_block xen_panic_block = {
1336         .notifier_call= xen_panic_event,
1337 };
1338
1339 int xen_panic_handler_init(void)
1340 {
1341         atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1342         return 0;
1343 }
1344
1345 static const struct machine_ops xen_machine_ops __initconst = {
1346         .restart = xen_restart,
1347         .halt = xen_machine_halt,
1348         .power_off = xen_machine_power_off,
1349         .shutdown = xen_machine_halt,
1350         .crash_shutdown = xen_crash_shutdown,
1351         .emergency_restart = xen_emergency_restart,
1352 };
1353
1354 static void __init xen_boot_params_init_edd(void)
1355 {
1356 #if IS_ENABLED(CONFIG_EDD)
1357         struct xen_platform_op op;
1358         struct edd_info *edd_info;
1359         u32 *mbr_signature;
1360         unsigned nr;
1361         int ret;
1362
1363         edd_info = boot_params.eddbuf;
1364         mbr_signature = boot_params.edd_mbr_sig_buffer;
1365
1366         op.cmd = XENPF_firmware_info;
1367
1368         op.u.firmware_info.type = XEN_FW_DISK_INFO;
1369         for (nr = 0; nr < EDDMAXNR; nr++) {
1370                 struct edd_info *info = edd_info + nr;
1371
1372                 op.u.firmware_info.index = nr;
1373                 info->params.length = sizeof(info->params);
1374                 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1375                                      &info->params);
1376                 ret = HYPERVISOR_dom0_op(&op);
1377                 if (ret)
1378                         break;
1379
1380 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1381                 C(device);
1382                 C(version);
1383                 C(interface_support);
1384                 C(legacy_max_cylinder);
1385                 C(legacy_max_head);
1386                 C(legacy_sectors_per_track);
1387 #undef C
1388         }
1389         boot_params.eddbuf_entries = nr;
1390
1391         op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1392         for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1393                 op.u.firmware_info.index = nr;
1394                 ret = HYPERVISOR_dom0_op(&op);
1395                 if (ret)
1396                         break;
1397                 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1398         }
1399         boot_params.edd_mbr_sig_buf_entries = nr;
1400 #endif
1401 }
1402
1403 /*
1404  * Set up the GDT and segment registers for -fstack-protector.  Until
1405  * we do this, we have to be careful not to call any stack-protected
1406  * function, which is most of the kernel.
1407  */
1408 static void __init xen_setup_stackprotector(void)
1409 {
1410         pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1411         pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1412
1413         setup_stack_canary_segment(0);
1414         switch_to_new_gdt(0);
1415
1416         pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1417         pv_cpu_ops.load_gdt = xen_load_gdt;
1418 }
1419
1420 /* First C function to be called on Xen boot */
1421 asmlinkage void __init xen_start_kernel(void)
1422 {
1423         struct physdev_set_iopl set_iopl;
1424         int rc;
1425
1426         if (!xen_start_info)
1427                 return;
1428
1429         xen_domain_type = XEN_PV_DOMAIN;
1430
1431         xen_setup_machphys_mapping();
1432
1433         /* Install Xen paravirt ops */
1434         pv_info = xen_info;
1435         pv_init_ops = xen_init_ops;
1436         pv_cpu_ops = xen_cpu_ops;
1437         pv_apic_ops = xen_apic_ops;
1438
1439         x86_init.resources.memory_setup = xen_memory_setup;
1440         x86_init.oem.arch_setup = xen_arch_setup;
1441         x86_init.oem.banner = xen_banner;
1442
1443         xen_init_time_ops();
1444
1445         /*
1446          * Set up some pagetable state before starting to set any ptes.
1447          */
1448
1449         xen_init_mmu_ops();
1450
1451         /* Prevent unwanted bits from being set in PTEs. */
1452         __supported_pte_mask &= ~_PAGE_GLOBAL;
1453 #if 0
1454         if (!xen_initial_domain())
1455 #endif
1456                 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1457
1458         __supported_pte_mask |= _PAGE_IOMAP;
1459
1460         /*
1461          * Prevent page tables from being allocated in highmem, even
1462          * if CONFIG_HIGHPTE is enabled.
1463          */
1464         __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1465
1466         /* Work out if we support NX */
1467         x86_configure_nx();
1468
1469         xen_setup_features();
1470
1471         /* Get mfn list */
1472         if (!xen_feature(XENFEAT_auto_translated_physmap))
1473                 xen_build_dynamic_phys_to_machine();
1474
1475         /*
1476          * Set up kernel GDT and segment registers, mainly so that
1477          * -fstack-protector code can be executed.
1478          */
1479         xen_setup_stackprotector();
1480
1481         xen_init_irq_ops();
1482         xen_init_cpuid_mask();
1483
1484 #ifdef CONFIG_X86_LOCAL_APIC
1485         /*
1486          * set up the basic apic ops.
1487          */
1488         set_xen_basic_apic_ops();
1489 #endif
1490
1491         if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1492                 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1493                 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1494         }
1495
1496         machine_ops = xen_machine_ops;
1497
1498         /*
1499          * The only reliable way to retain the initial address of the
1500          * percpu gdt_page is to remember it here, so we can go and
1501          * mark it RW later, when the initial percpu area is freed.
1502          */
1503         xen_initial_gdt = &per_cpu(gdt_page, 0);
1504
1505         xen_smp_init();
1506
1507 #ifdef CONFIG_ACPI_NUMA
1508         /*
1509          * The pages we from Xen are not related to machine pages, so
1510          * any NUMA information the kernel tries to get from ACPI will
1511          * be meaningless.  Prevent it from trying.
1512          */
1513         acpi_numa = -1;
1514 #endif
1515 #ifdef CONFIG_X86_PAT
1516         /*
1517          * For right now disable the PAT. We should remove this once
1518          * git commit 8eaffa67b43e99ae581622c5133e20b0f48bcef1
1519          * (xen/pat: Disable PAT support for now) is reverted.
1520          */
1521         pat_enabled = 0;
1522 #endif
1523         /* Don't do the full vcpu_info placement stuff until we have a
1524            possible map and a non-dummy shared_info. */
1525         per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1526
1527         local_irq_disable();
1528         early_boot_irqs_disabled = true;
1529
1530         xen_raw_console_write("mapping kernel into physical memory\n");
1531         xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
1532
1533         /* Allocate and initialize top and mid mfn levels for p2m structure */
1534         xen_build_mfn_list_list();
1535
1536         /* keep using Xen gdt for now; no urgent need to change it */
1537
1538 #ifdef CONFIG_X86_32
1539         pv_info.kernel_rpl = 1;
1540         if (xen_feature(XENFEAT_supervisor_mode_kernel))
1541                 pv_info.kernel_rpl = 0;
1542 #else
1543         pv_info.kernel_rpl = 0;
1544 #endif
1545         /* set the limit of our address space */
1546         xen_reserve_top();
1547
1548         /* We used to do this in xen_arch_setup, but that is too late on AMD
1549          * were early_cpu_init (run before ->arch_setup()) calls early_amd_init
1550          * which pokes 0xcf8 port.
1551          */
1552         set_iopl.iopl = 1;
1553         rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1554         if (rc != 0)
1555                 xen_raw_printk("physdev_op failed %d\n", rc);
1556
1557 #ifdef CONFIG_X86_32
1558         /* set up basic CPUID stuff */
1559         cpu_detect(&new_cpu_data);
1560         new_cpu_data.hard_math = 1;
1561         new_cpu_data.wp_works_ok = 1;
1562         new_cpu_data.x86_capability[0] = cpuid_edx(1);
1563 #endif
1564
1565         /* Poke various useful things into boot_params */
1566         boot_params.hdr.type_of_loader = (9 << 4) | 0;
1567         boot_params.hdr.ramdisk_image = xen_start_info->mod_start
1568                 ? __pa(xen_start_info->mod_start) : 0;
1569         boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1570         boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1571
1572         if (!xen_initial_domain()) {
1573                 add_preferred_console("xenboot", 0, NULL);
1574                 add_preferred_console("tty", 0, NULL);
1575                 add_preferred_console("hvc", 0, NULL);
1576                 if (pci_xen)
1577                         x86_init.pci.arch_init = pci_xen_init;
1578         } else {
1579                 const struct dom0_vga_console_info *info =
1580                         (void *)((char *)xen_start_info +
1581                                  xen_start_info->console.dom0.info_off);
1582                 struct xen_platform_op op = {
1583                         .cmd = XENPF_firmware_info,
1584                         .interface_version = XENPF_INTERFACE_VERSION,
1585                         .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1586                 };
1587
1588                 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1589                 xen_start_info->console.domU.mfn = 0;
1590                 xen_start_info->console.domU.evtchn = 0;
1591
1592                 if (HYPERVISOR_dom0_op(&op) == 0)
1593                         boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1594
1595                 xen_init_apic();
1596
1597                 /* Make sure ACS will be enabled */
1598                 pci_request_acs();
1599
1600                 xen_acpi_sleep_register();
1601
1602                 /* Avoid searching for BIOS MP tables */
1603                 x86_init.mpparse.find_smp_config = x86_init_noop;
1604                 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1605
1606                 xen_boot_params_init_edd();
1607         }
1608 #ifdef CONFIG_PCI
1609         /* PCI BIOS service won't work from a PV guest. */
1610         pci_probe &= ~PCI_PROBE_BIOS;
1611 #endif
1612         xen_raw_console_write("about to get started...\n");
1613
1614         xen_setup_runstate_info(0);
1615
1616         /* Start the world */
1617 #ifdef CONFIG_X86_32
1618         i386_start_kernel();
1619 #else
1620         x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1621 #endif
1622 }
1623
1624 void __ref xen_hvm_init_shared_info(void)
1625 {
1626         int cpu;
1627         struct xen_add_to_physmap xatp;
1628         static struct shared_info *shared_info_page = 0;
1629
1630         if (!shared_info_page)
1631                 shared_info_page = (struct shared_info *)
1632                         extend_brk(PAGE_SIZE, PAGE_SIZE);
1633         xatp.domid = DOMID_SELF;
1634         xatp.idx = 0;
1635         xatp.space = XENMAPSPACE_shared_info;
1636         xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1637         if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1638                 BUG();
1639
1640         HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1641
1642         /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1643          * page, we use it in the event channel upcall and in some pvclock
1644          * related functions. We don't need the vcpu_info placement
1645          * optimizations because we don't use any pv_mmu or pv_irq op on
1646          * HVM.
1647          * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1648          * online but xen_hvm_init_shared_info is run at resume time too and
1649          * in that case multiple vcpus might be online. */
1650         for_each_online_cpu(cpu) {
1651                 /* Leave it to be NULL. */
1652                 if (cpu >= MAX_VIRT_CPUS)
1653                         continue;
1654                 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1655         }
1656 }
1657
1658 #ifdef CONFIG_XEN_PVHVM
1659 static void __init init_hvm_pv_info(void)
1660 {
1661         int major, minor;
1662         uint32_t eax, ebx, ecx, edx, pages, msr, base;
1663         u64 pfn;
1664
1665         base = xen_cpuid_base();
1666         cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1667
1668         major = eax >> 16;
1669         minor = eax & 0xffff;
1670         printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1671
1672         cpuid(base + 2, &pages, &msr, &ecx, &edx);
1673
1674         pfn = __pa(hypercall_page);
1675         wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1676
1677         xen_setup_features();
1678
1679         pv_info.name = "Xen HVM";
1680
1681         xen_domain_type = XEN_HVM_DOMAIN;
1682 }
1683
1684 static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1685                                     unsigned long action, void *hcpu)
1686 {
1687         int cpu = (long)hcpu;
1688         switch (action) {
1689         case CPU_UP_PREPARE:
1690                 xen_vcpu_setup(cpu);
1691                 if (xen_have_vector_callback) {
1692                         xen_init_lock_cpu(cpu);
1693                         if (xen_feature(XENFEAT_hvm_safe_pvclock))
1694                                 xen_setup_timer(cpu);
1695                 }
1696                 break;
1697         default:
1698                 break;
1699         }
1700         return NOTIFY_OK;
1701 }
1702
1703 static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
1704         .notifier_call  = xen_hvm_cpu_notify,
1705 };
1706
1707 static void __init xen_hvm_guest_init(void)
1708 {
1709         init_hvm_pv_info();
1710
1711         xen_hvm_init_shared_info();
1712
1713         if (xen_feature(XENFEAT_hvm_callback_vector))
1714                 xen_have_vector_callback = 1;
1715         xen_hvm_smp_init();
1716         register_cpu_notifier(&xen_hvm_cpu_notifier);
1717         xen_unplug_emulated_devices();
1718         x86_init.irqs.intr_init = xen_init_IRQ;
1719         xen_hvm_init_time_ops();
1720         xen_hvm_init_mmu_ops();
1721 }
1722
1723 static bool __init xen_hvm_platform(void)
1724 {
1725         if (xen_pv_domain())
1726                 return false;
1727
1728         if (!xen_cpuid_base())
1729                 return false;
1730
1731         return true;
1732 }
1733
1734 bool xen_hvm_need_lapic(void)
1735 {
1736         if (xen_pv_domain())
1737                 return false;
1738         if (!xen_hvm_domain())
1739                 return false;
1740         if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1741                 return false;
1742         return true;
1743 }
1744 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1745
1746 const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1747         .name                   = "Xen HVM",
1748         .detect                 = xen_hvm_platform,
1749         .init_platform          = xen_hvm_guest_init,
1750         .x2apic_available       = xen_x2apic_para_available,
1751 };
1752 EXPORT_SYMBOL(x86_hyper_xen_hvm);
1753 #endif