]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s_hv.c
b38c10e00c168c3b28ca1ceaa50570077b1783f4
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_hv.c
1 /*
2  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4  *
5  * Authors:
6  *    Paul Mackerras <paulus@au1.ibm.com>
7  *    Alexander Graf <agraf@suse.de>
8  *    Kevin Wolf <mail@kevin-wolf.de>
9  *
10  * Description: KVM functions specific to running on Book 3S
11  * processors in hypervisor mode (specifically POWER7 and later).
12  *
13  * This file is derived from arch/powerpc/kvm/book3s.c,
14  * by Alexander Graf <agraf@suse.de>.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License, version 2, as
18  * published by the Free Software Foundation.
19  */
20
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
28 #include <linux/fs.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
33 #include <linux/srcu.h>
34 #include <linux/miscdevice.h>
35 #include <linux/debugfs.h>
36
37 #include <asm/reg.h>
38 #include <asm/cputable.h>
39 #include <asm/cache.h>
40 #include <asm/cacheflush.h>
41 #include <asm/tlbflush.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/kvm_book3s.h>
46 #include <asm/mmu_context.h>
47 #include <asm/lppaca.h>
48 #include <asm/processor.h>
49 #include <asm/cputhreads.h>
50 #include <asm/page.h>
51 #include <asm/hvcall.h>
52 #include <asm/switch_to.h>
53 #include <asm/smp.h>
54 #include <linux/gfp.h>
55 #include <linux/vmalloc.h>
56 #include <linux/highmem.h>
57 #include <linux/hugetlb.h>
58 #include <linux/module.h>
59
60 #include "book3s.h"
61
62 #define CREATE_TRACE_POINTS
63 #include "trace_hv.h"
64
65 /* #define EXIT_DEBUG */
66 /* #define EXIT_DEBUG_SIMPLE */
67 /* #define EXIT_DEBUG_INT */
68
69 /* Used to indicate that a guest page fault needs to be handled */
70 #define RESUME_PAGE_FAULT       (RESUME_GUEST | RESUME_FLAG_ARCH1)
71
72 /* Used as a "null" value for timebase values */
73 #define TB_NIL  (~(u64)0)
74
75 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
76
77 #if defined(CONFIG_PPC_64K_PAGES)
78 #define MPP_BUFFER_ORDER        0
79 #elif defined(CONFIG_PPC_4K_PAGES)
80 #define MPP_BUFFER_ORDER        3
81 #endif
82
83
84 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
85 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
86
87 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
88 {
89         int me;
90         int cpu = vcpu->cpu;
91         wait_queue_head_t *wqp;
92
93         wqp = kvm_arch_vcpu_wq(vcpu);
94         if (waitqueue_active(wqp)) {
95                 wake_up_interruptible(wqp);
96                 ++vcpu->stat.halt_wakeup;
97         }
98
99         me = get_cpu();
100
101         /* CPU points to the first thread of the core */
102         if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
103 #ifdef CONFIG_PPC_ICP_NATIVE
104                 int real_cpu = cpu + vcpu->arch.ptid;
105                 if (paca[real_cpu].kvm_hstate.xics_phys)
106                         xics_wake_cpu(real_cpu);
107                 else
108 #endif
109                 if (cpu_online(cpu))
110                         smp_send_reschedule(cpu);
111         }
112         put_cpu();
113 }
114
115 /*
116  * We use the vcpu_load/put functions to measure stolen time.
117  * Stolen time is counted as time when either the vcpu is able to
118  * run as part of a virtual core, but the task running the vcore
119  * is preempted or sleeping, or when the vcpu needs something done
120  * in the kernel by the task running the vcpu, but that task is
121  * preempted or sleeping.  Those two things have to be counted
122  * separately, since one of the vcpu tasks will take on the job
123  * of running the core, and the other vcpu tasks in the vcore will
124  * sleep waiting for it to do that, but that sleep shouldn't count
125  * as stolen time.
126  *
127  * Hence we accumulate stolen time when the vcpu can run as part of
128  * a vcore using vc->stolen_tb, and the stolen time when the vcpu
129  * needs its task to do other things in the kernel (for example,
130  * service a page fault) in busy_stolen.  We don't accumulate
131  * stolen time for a vcore when it is inactive, or for a vcpu
132  * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
133  * a misnomer; it means that the vcpu task is not executing in
134  * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
135  * the kernel.  We don't have any way of dividing up that time
136  * between time that the vcpu is genuinely stopped, time that
137  * the task is actively working on behalf of the vcpu, and time
138  * that the task is preempted, so we don't count any of it as
139  * stolen.
140  *
141  * Updates to busy_stolen are protected by arch.tbacct_lock;
142  * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
143  * lock.  The stolen times are measured in units of timebase ticks.
144  * (Note that the != TB_NIL checks below are purely defensive;
145  * they should never fail.)
146  */
147
148 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
149 {
150         struct kvmppc_vcore *vc = vcpu->arch.vcore;
151         unsigned long flags;
152
153         /*
154          * We can test vc->runner without taking the vcore lock,
155          * because only this task ever sets vc->runner to this
156          * vcpu, and once it is set to this vcpu, only this task
157          * ever sets it to NULL.
158          */
159         if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
160                 spin_lock_irqsave(&vc->stoltb_lock, flags);
161                 if (vc->preempt_tb != TB_NIL) {
162                         vc->stolen_tb += mftb() - vc->preempt_tb;
163                         vc->preempt_tb = TB_NIL;
164                 }
165                 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
166         }
167         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
168         if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
169             vcpu->arch.busy_preempt != TB_NIL) {
170                 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
171                 vcpu->arch.busy_preempt = TB_NIL;
172         }
173         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
174 }
175
176 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
177 {
178         struct kvmppc_vcore *vc = vcpu->arch.vcore;
179         unsigned long flags;
180
181         if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
182                 spin_lock_irqsave(&vc->stoltb_lock, flags);
183                 vc->preempt_tb = mftb();
184                 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
185         }
186         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
187         if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
188                 vcpu->arch.busy_preempt = mftb();
189         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
190 }
191
192 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
193 {
194         vcpu->arch.shregs.msr = msr;
195         kvmppc_end_cede(vcpu);
196 }
197
198 void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
199 {
200         vcpu->arch.pvr = pvr;
201 }
202
203 int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
204 {
205         unsigned long pcr = 0;
206         struct kvmppc_vcore *vc = vcpu->arch.vcore;
207
208         if (arch_compat) {
209                 switch (arch_compat) {
210                 case PVR_ARCH_205:
211                         /*
212                          * If an arch bit is set in PCR, all the defined
213                          * higher-order arch bits also have to be set.
214                          */
215                         pcr = PCR_ARCH_206 | PCR_ARCH_205;
216                         break;
217                 case PVR_ARCH_206:
218                 case PVR_ARCH_206p:
219                         pcr = PCR_ARCH_206;
220                         break;
221                 case PVR_ARCH_207:
222                         break;
223                 default:
224                         return -EINVAL;
225                 }
226
227                 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
228                         /* POWER7 can't emulate POWER8 */
229                         if (!(pcr & PCR_ARCH_206))
230                                 return -EINVAL;
231                         pcr &= ~PCR_ARCH_206;
232                 }
233         }
234
235         spin_lock(&vc->lock);
236         vc->arch_compat = arch_compat;
237         vc->pcr = pcr;
238         spin_unlock(&vc->lock);
239
240         return 0;
241 }
242
243 void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
244 {
245         int r;
246
247         pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
248         pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
249                vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
250         for (r = 0; r < 16; ++r)
251                 pr_err("r%2d = %.16lx  r%d = %.16lx\n",
252                        r, kvmppc_get_gpr(vcpu, r),
253                        r+16, kvmppc_get_gpr(vcpu, r+16));
254         pr_err("ctr = %.16lx  lr  = %.16lx\n",
255                vcpu->arch.ctr, vcpu->arch.lr);
256         pr_err("srr0 = %.16llx srr1 = %.16llx\n",
257                vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
258         pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
259                vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
260         pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
261                vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
262         pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
263                vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
264         pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
265         pr_err("fault dar = %.16lx dsisr = %.8x\n",
266                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
267         pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
268         for (r = 0; r < vcpu->arch.slb_max; ++r)
269                 pr_err("  ESID = %.16llx VSID = %.16llx\n",
270                        vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
271         pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
272                vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
273                vcpu->arch.last_inst);
274 }
275
276 struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
277 {
278         int r;
279         struct kvm_vcpu *v, *ret = NULL;
280
281         mutex_lock(&kvm->lock);
282         kvm_for_each_vcpu(r, v, kvm) {
283                 if (v->vcpu_id == id) {
284                         ret = v;
285                         break;
286                 }
287         }
288         mutex_unlock(&kvm->lock);
289         return ret;
290 }
291
292 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
293 {
294         vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
295         vpa->yield_count = cpu_to_be32(1);
296 }
297
298 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
299                    unsigned long addr, unsigned long len)
300 {
301         /* check address is cacheline aligned */
302         if (addr & (L1_CACHE_BYTES - 1))
303                 return -EINVAL;
304         spin_lock(&vcpu->arch.vpa_update_lock);
305         if (v->next_gpa != addr || v->len != len) {
306                 v->next_gpa = addr;
307                 v->len = addr ? len : 0;
308                 v->update_pending = 1;
309         }
310         spin_unlock(&vcpu->arch.vpa_update_lock);
311         return 0;
312 }
313
314 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
315 struct reg_vpa {
316         u32 dummy;
317         union {
318                 __be16 hword;
319                 __be32 word;
320         } length;
321 };
322
323 static int vpa_is_registered(struct kvmppc_vpa *vpap)
324 {
325         if (vpap->update_pending)
326                 return vpap->next_gpa != 0;
327         return vpap->pinned_addr != NULL;
328 }
329
330 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
331                                        unsigned long flags,
332                                        unsigned long vcpuid, unsigned long vpa)
333 {
334         struct kvm *kvm = vcpu->kvm;
335         unsigned long len, nb;
336         void *va;
337         struct kvm_vcpu *tvcpu;
338         int err;
339         int subfunc;
340         struct kvmppc_vpa *vpap;
341
342         tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
343         if (!tvcpu)
344                 return H_PARAMETER;
345
346         subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
347         if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
348             subfunc == H_VPA_REG_SLB) {
349                 /* Registering new area - address must be cache-line aligned */
350                 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
351                         return H_PARAMETER;
352
353                 /* convert logical addr to kernel addr and read length */
354                 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
355                 if (va == NULL)
356                         return H_PARAMETER;
357                 if (subfunc == H_VPA_REG_VPA)
358                         len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
359                 else
360                         len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
361                 kvmppc_unpin_guest_page(kvm, va, vpa, false);
362
363                 /* Check length */
364                 if (len > nb || len < sizeof(struct reg_vpa))
365                         return H_PARAMETER;
366         } else {
367                 vpa = 0;
368                 len = 0;
369         }
370
371         err = H_PARAMETER;
372         vpap = NULL;
373         spin_lock(&tvcpu->arch.vpa_update_lock);
374
375         switch (subfunc) {
376         case H_VPA_REG_VPA:             /* register VPA */
377                 if (len < sizeof(struct lppaca))
378                         break;
379                 vpap = &tvcpu->arch.vpa;
380                 err = 0;
381                 break;
382
383         case H_VPA_REG_DTL:             /* register DTL */
384                 if (len < sizeof(struct dtl_entry))
385                         break;
386                 len -= len % sizeof(struct dtl_entry);
387
388                 /* Check that they have previously registered a VPA */
389                 err = H_RESOURCE;
390                 if (!vpa_is_registered(&tvcpu->arch.vpa))
391                         break;
392
393                 vpap = &tvcpu->arch.dtl;
394                 err = 0;
395                 break;
396
397         case H_VPA_REG_SLB:             /* register SLB shadow buffer */
398                 /* Check that they have previously registered a VPA */
399                 err = H_RESOURCE;
400                 if (!vpa_is_registered(&tvcpu->arch.vpa))
401                         break;
402
403                 vpap = &tvcpu->arch.slb_shadow;
404                 err = 0;
405                 break;
406
407         case H_VPA_DEREG_VPA:           /* deregister VPA */
408                 /* Check they don't still have a DTL or SLB buf registered */
409                 err = H_RESOURCE;
410                 if (vpa_is_registered(&tvcpu->arch.dtl) ||
411                     vpa_is_registered(&tvcpu->arch.slb_shadow))
412                         break;
413
414                 vpap = &tvcpu->arch.vpa;
415                 err = 0;
416                 break;
417
418         case H_VPA_DEREG_DTL:           /* deregister DTL */
419                 vpap = &tvcpu->arch.dtl;
420                 err = 0;
421                 break;
422
423         case H_VPA_DEREG_SLB:           /* deregister SLB shadow buffer */
424                 vpap = &tvcpu->arch.slb_shadow;
425                 err = 0;
426                 break;
427         }
428
429         if (vpap) {
430                 vpap->next_gpa = vpa;
431                 vpap->len = len;
432                 vpap->update_pending = 1;
433         }
434
435         spin_unlock(&tvcpu->arch.vpa_update_lock);
436
437         return err;
438 }
439
440 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
441 {
442         struct kvm *kvm = vcpu->kvm;
443         void *va;
444         unsigned long nb;
445         unsigned long gpa;
446
447         /*
448          * We need to pin the page pointed to by vpap->next_gpa,
449          * but we can't call kvmppc_pin_guest_page under the lock
450          * as it does get_user_pages() and down_read().  So we
451          * have to drop the lock, pin the page, then get the lock
452          * again and check that a new area didn't get registered
453          * in the meantime.
454          */
455         for (;;) {
456                 gpa = vpap->next_gpa;
457                 spin_unlock(&vcpu->arch.vpa_update_lock);
458                 va = NULL;
459                 nb = 0;
460                 if (gpa)
461                         va = kvmppc_pin_guest_page(kvm, gpa, &nb);
462                 spin_lock(&vcpu->arch.vpa_update_lock);
463                 if (gpa == vpap->next_gpa)
464                         break;
465                 /* sigh... unpin that one and try again */
466                 if (va)
467                         kvmppc_unpin_guest_page(kvm, va, gpa, false);
468         }
469
470         vpap->update_pending = 0;
471         if (va && nb < vpap->len) {
472                 /*
473                  * If it's now too short, it must be that userspace
474                  * has changed the mappings underlying guest memory,
475                  * so unregister the region.
476                  */
477                 kvmppc_unpin_guest_page(kvm, va, gpa, false);
478                 va = NULL;
479         }
480         if (vpap->pinned_addr)
481                 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
482                                         vpap->dirty);
483         vpap->gpa = gpa;
484         vpap->pinned_addr = va;
485         vpap->dirty = false;
486         if (va)
487                 vpap->pinned_end = va + vpap->len;
488 }
489
490 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
491 {
492         if (!(vcpu->arch.vpa.update_pending ||
493               vcpu->arch.slb_shadow.update_pending ||
494               vcpu->arch.dtl.update_pending))
495                 return;
496
497         spin_lock(&vcpu->arch.vpa_update_lock);
498         if (vcpu->arch.vpa.update_pending) {
499                 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
500                 if (vcpu->arch.vpa.pinned_addr)
501                         init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
502         }
503         if (vcpu->arch.dtl.update_pending) {
504                 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
505                 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
506                 vcpu->arch.dtl_index = 0;
507         }
508         if (vcpu->arch.slb_shadow.update_pending)
509                 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
510         spin_unlock(&vcpu->arch.vpa_update_lock);
511 }
512
513 /*
514  * Return the accumulated stolen time for the vcore up until `now'.
515  * The caller should hold the vcore lock.
516  */
517 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
518 {
519         u64 p;
520         unsigned long flags;
521
522         spin_lock_irqsave(&vc->stoltb_lock, flags);
523         p = vc->stolen_tb;
524         if (vc->vcore_state != VCORE_INACTIVE &&
525             vc->preempt_tb != TB_NIL)
526                 p += now - vc->preempt_tb;
527         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
528         return p;
529 }
530
531 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
532                                     struct kvmppc_vcore *vc)
533 {
534         struct dtl_entry *dt;
535         struct lppaca *vpa;
536         unsigned long stolen;
537         unsigned long core_stolen;
538         u64 now;
539
540         dt = vcpu->arch.dtl_ptr;
541         vpa = vcpu->arch.vpa.pinned_addr;
542         now = mftb();
543         core_stolen = vcore_stolen_time(vc, now);
544         stolen = core_stolen - vcpu->arch.stolen_logged;
545         vcpu->arch.stolen_logged = core_stolen;
546         spin_lock_irq(&vcpu->arch.tbacct_lock);
547         stolen += vcpu->arch.busy_stolen;
548         vcpu->arch.busy_stolen = 0;
549         spin_unlock_irq(&vcpu->arch.tbacct_lock);
550         if (!dt || !vpa)
551                 return;
552         memset(dt, 0, sizeof(struct dtl_entry));
553         dt->dispatch_reason = 7;
554         dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
555         dt->timebase = cpu_to_be64(now + vc->tb_offset);
556         dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
557         dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
558         dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
559         ++dt;
560         if (dt == vcpu->arch.dtl.pinned_end)
561                 dt = vcpu->arch.dtl.pinned_addr;
562         vcpu->arch.dtl_ptr = dt;
563         /* order writing *dt vs. writing vpa->dtl_idx */
564         smp_wmb();
565         vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
566         vcpu->arch.dtl.dirty = true;
567 }
568
569 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
570 {
571         if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
572                 return true;
573         if ((!vcpu->arch.vcore->arch_compat) &&
574             cpu_has_feature(CPU_FTR_ARCH_207S))
575                 return true;
576         return false;
577 }
578
579 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
580                              unsigned long resource, unsigned long value1,
581                              unsigned long value2)
582 {
583         switch (resource) {
584         case H_SET_MODE_RESOURCE_SET_CIABR:
585                 if (!kvmppc_power8_compatible(vcpu))
586                         return H_P2;
587                 if (value2)
588                         return H_P4;
589                 if (mflags)
590                         return H_UNSUPPORTED_FLAG_START;
591                 /* Guests can't breakpoint the hypervisor */
592                 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
593                         return H_P3;
594                 vcpu->arch.ciabr  = value1;
595                 return H_SUCCESS;
596         case H_SET_MODE_RESOURCE_SET_DAWR:
597                 if (!kvmppc_power8_compatible(vcpu))
598                         return H_P2;
599                 if (mflags)
600                         return H_UNSUPPORTED_FLAG_START;
601                 if (value2 & DABRX_HYP)
602                         return H_P4;
603                 vcpu->arch.dawr  = value1;
604                 vcpu->arch.dawrx = value2;
605                 return H_SUCCESS;
606         default:
607                 return H_TOO_HARD;
608         }
609 }
610
611 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
612 {
613         struct kvmppc_vcore *vcore = target->arch.vcore;
614
615         /*
616          * We expect to have been called by the real mode handler
617          * (kvmppc_rm_h_confer()) which would have directly returned
618          * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
619          * have useful work to do and should not confer) so we don't
620          * recheck that here.
621          */
622
623         spin_lock(&vcore->lock);
624         if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
625             vcore->vcore_state != VCORE_INACTIVE)
626                 target = vcore->runner;
627         spin_unlock(&vcore->lock);
628
629         return kvm_vcpu_yield_to(target);
630 }
631
632 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
633 {
634         int yield_count = 0;
635         struct lppaca *lppaca;
636
637         spin_lock(&vcpu->arch.vpa_update_lock);
638         lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
639         if (lppaca)
640                 yield_count = be32_to_cpu(lppaca->yield_count);
641         spin_unlock(&vcpu->arch.vpa_update_lock);
642         return yield_count;
643 }
644
645 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
646 {
647         unsigned long req = kvmppc_get_gpr(vcpu, 3);
648         unsigned long target, ret = H_SUCCESS;
649         int yield_count;
650         struct kvm_vcpu *tvcpu;
651         int idx, rc;
652
653         if (req <= MAX_HCALL_OPCODE &&
654             !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
655                 return RESUME_HOST;
656
657         switch (req) {
658         case H_CEDE:
659                 break;
660         case H_PROD:
661                 target = kvmppc_get_gpr(vcpu, 4);
662                 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
663                 if (!tvcpu) {
664                         ret = H_PARAMETER;
665                         break;
666                 }
667                 tvcpu->arch.prodded = 1;
668                 smp_mb();
669                 if (vcpu->arch.ceded) {
670                         if (waitqueue_active(&vcpu->wq)) {
671                                 wake_up_interruptible(&vcpu->wq);
672                                 vcpu->stat.halt_wakeup++;
673                         }
674                 }
675                 break;
676         case H_CONFER:
677                 target = kvmppc_get_gpr(vcpu, 4);
678                 if (target == -1)
679                         break;
680                 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
681                 if (!tvcpu) {
682                         ret = H_PARAMETER;
683                         break;
684                 }
685                 yield_count = kvmppc_get_gpr(vcpu, 5);
686                 if (kvmppc_get_yield_count(tvcpu) != yield_count)
687                         break;
688                 kvm_arch_vcpu_yield_to(tvcpu);
689                 break;
690         case H_REGISTER_VPA:
691                 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
692                                         kvmppc_get_gpr(vcpu, 5),
693                                         kvmppc_get_gpr(vcpu, 6));
694                 break;
695         case H_RTAS:
696                 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
697                         return RESUME_HOST;
698
699                 idx = srcu_read_lock(&vcpu->kvm->srcu);
700                 rc = kvmppc_rtas_hcall(vcpu);
701                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
702
703                 if (rc == -ENOENT)
704                         return RESUME_HOST;
705                 else if (rc == 0)
706                         break;
707
708                 /* Send the error out to userspace via KVM_RUN */
709                 return rc;
710         case H_LOGICAL_CI_LOAD:
711                 ret = kvmppc_h_logical_ci_load(vcpu);
712                 if (ret == H_TOO_HARD)
713                         return RESUME_HOST;
714                 break;
715         case H_LOGICAL_CI_STORE:
716                 ret = kvmppc_h_logical_ci_store(vcpu);
717                 if (ret == H_TOO_HARD)
718                         return RESUME_HOST;
719                 break;
720         case H_SET_MODE:
721                 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
722                                         kvmppc_get_gpr(vcpu, 5),
723                                         kvmppc_get_gpr(vcpu, 6),
724                                         kvmppc_get_gpr(vcpu, 7));
725                 if (ret == H_TOO_HARD)
726                         return RESUME_HOST;
727                 break;
728         case H_XIRR:
729         case H_CPPR:
730         case H_EOI:
731         case H_IPI:
732         case H_IPOLL:
733         case H_XIRR_X:
734                 if (kvmppc_xics_enabled(vcpu)) {
735                         ret = kvmppc_xics_hcall(vcpu, req);
736                         break;
737                 } /* fallthrough */
738         default:
739                 return RESUME_HOST;
740         }
741         kvmppc_set_gpr(vcpu, 3, ret);
742         vcpu->arch.hcall_needed = 0;
743         return RESUME_GUEST;
744 }
745
746 static int kvmppc_hcall_impl_hv(unsigned long cmd)
747 {
748         switch (cmd) {
749         case H_CEDE:
750         case H_PROD:
751         case H_CONFER:
752         case H_REGISTER_VPA:
753         case H_SET_MODE:
754         case H_LOGICAL_CI_LOAD:
755         case H_LOGICAL_CI_STORE:
756 #ifdef CONFIG_KVM_XICS
757         case H_XIRR:
758         case H_CPPR:
759         case H_EOI:
760         case H_IPI:
761         case H_IPOLL:
762         case H_XIRR_X:
763 #endif
764                 return 1;
765         }
766
767         /* See if it's in the real-mode table */
768         return kvmppc_hcall_impl_hv_realmode(cmd);
769 }
770
771 static int kvmppc_emulate_debug_inst(struct kvm_run *run,
772                                         struct kvm_vcpu *vcpu)
773 {
774         u32 last_inst;
775
776         if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
777                                         EMULATE_DONE) {
778                 /*
779                  * Fetch failed, so return to guest and
780                  * try executing it again.
781                  */
782                 return RESUME_GUEST;
783         }
784
785         if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
786                 run->exit_reason = KVM_EXIT_DEBUG;
787                 run->debug.arch.address = kvmppc_get_pc(vcpu);
788                 return RESUME_HOST;
789         } else {
790                 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
791                 return RESUME_GUEST;
792         }
793 }
794
795 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
796                                  struct task_struct *tsk)
797 {
798         int r = RESUME_HOST;
799
800         vcpu->stat.sum_exits++;
801
802         run->exit_reason = KVM_EXIT_UNKNOWN;
803         run->ready_for_interrupt_injection = 1;
804         switch (vcpu->arch.trap) {
805         /* We're good on these - the host merely wanted to get our attention */
806         case BOOK3S_INTERRUPT_HV_DECREMENTER:
807                 vcpu->stat.dec_exits++;
808                 r = RESUME_GUEST;
809                 break;
810         case BOOK3S_INTERRUPT_EXTERNAL:
811         case BOOK3S_INTERRUPT_H_DOORBELL:
812                 vcpu->stat.ext_intr_exits++;
813                 r = RESUME_GUEST;
814                 break;
815         /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
816         case BOOK3S_INTERRUPT_HMI:
817         case BOOK3S_INTERRUPT_PERFMON:
818                 r = RESUME_GUEST;
819                 break;
820         case BOOK3S_INTERRUPT_MACHINE_CHECK:
821                 /*
822                  * Deliver a machine check interrupt to the guest.
823                  * We have to do this, even if the host has handled the
824                  * machine check, because machine checks use SRR0/1 and
825                  * the interrupt might have trashed guest state in them.
826                  */
827                 kvmppc_book3s_queue_irqprio(vcpu,
828                                             BOOK3S_INTERRUPT_MACHINE_CHECK);
829                 r = RESUME_GUEST;
830                 break;
831         case BOOK3S_INTERRUPT_PROGRAM:
832         {
833                 ulong flags;
834                 /*
835                  * Normally program interrupts are delivered directly
836                  * to the guest by the hardware, but we can get here
837                  * as a result of a hypervisor emulation interrupt
838                  * (e40) getting turned into a 700 by BML RTAS.
839                  */
840                 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
841                 kvmppc_core_queue_program(vcpu, flags);
842                 r = RESUME_GUEST;
843                 break;
844         }
845         case BOOK3S_INTERRUPT_SYSCALL:
846         {
847                 /* hcall - punt to userspace */
848                 int i;
849
850                 /* hypercall with MSR_PR has already been handled in rmode,
851                  * and never reaches here.
852                  */
853
854                 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
855                 for (i = 0; i < 9; ++i)
856                         run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
857                 run->exit_reason = KVM_EXIT_PAPR_HCALL;
858                 vcpu->arch.hcall_needed = 1;
859                 r = RESUME_HOST;
860                 break;
861         }
862         /*
863          * We get these next two if the guest accesses a page which it thinks
864          * it has mapped but which is not actually present, either because
865          * it is for an emulated I/O device or because the corresonding
866          * host page has been paged out.  Any other HDSI/HISI interrupts
867          * have been handled already.
868          */
869         case BOOK3S_INTERRUPT_H_DATA_STORAGE:
870                 r = RESUME_PAGE_FAULT;
871                 break;
872         case BOOK3S_INTERRUPT_H_INST_STORAGE:
873                 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
874                 vcpu->arch.fault_dsisr = 0;
875                 r = RESUME_PAGE_FAULT;
876                 break;
877         /*
878          * This occurs if the guest executes an illegal instruction.
879          * If the guest debug is disabled, generate a program interrupt
880          * to the guest. If guest debug is enabled, we need to check
881          * whether the instruction is a software breakpoint instruction.
882          * Accordingly return to Guest or Host.
883          */
884         case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
885                 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
886                         vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
887                                 swab32(vcpu->arch.emul_inst) :
888                                 vcpu->arch.emul_inst;
889                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
890                         r = kvmppc_emulate_debug_inst(run, vcpu);
891                 } else {
892                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
893                         r = RESUME_GUEST;
894                 }
895                 break;
896         /*
897          * This occurs if the guest (kernel or userspace), does something that
898          * is prohibited by HFSCR.  We just generate a program interrupt to
899          * the guest.
900          */
901         case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
902                 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
903                 r = RESUME_GUEST;
904                 break;
905         default:
906                 kvmppc_dump_regs(vcpu);
907                 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
908                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
909                         vcpu->arch.shregs.msr);
910                 run->hw.hardware_exit_reason = vcpu->arch.trap;
911                 r = RESUME_HOST;
912                 break;
913         }
914
915         return r;
916 }
917
918 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
919                                             struct kvm_sregs *sregs)
920 {
921         int i;
922
923         memset(sregs, 0, sizeof(struct kvm_sregs));
924         sregs->pvr = vcpu->arch.pvr;
925         for (i = 0; i < vcpu->arch.slb_max; i++) {
926                 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
927                 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
928         }
929
930         return 0;
931 }
932
933 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
934                                             struct kvm_sregs *sregs)
935 {
936         int i, j;
937
938         /* Only accept the same PVR as the host's, since we can't spoof it */
939         if (sregs->pvr != vcpu->arch.pvr)
940                 return -EINVAL;
941
942         j = 0;
943         for (i = 0; i < vcpu->arch.slb_nr; i++) {
944                 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
945                         vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
946                         vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
947                         ++j;
948                 }
949         }
950         vcpu->arch.slb_max = j;
951
952         return 0;
953 }
954
955 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
956                 bool preserve_top32)
957 {
958         struct kvm *kvm = vcpu->kvm;
959         struct kvmppc_vcore *vc = vcpu->arch.vcore;
960         u64 mask;
961
962         mutex_lock(&kvm->lock);
963         spin_lock(&vc->lock);
964         /*
965          * If ILE (interrupt little-endian) has changed, update the
966          * MSR_LE bit in the intr_msr for each vcpu in this vcore.
967          */
968         if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
969                 struct kvm_vcpu *vcpu;
970                 int i;
971
972                 kvm_for_each_vcpu(i, vcpu, kvm) {
973                         if (vcpu->arch.vcore != vc)
974                                 continue;
975                         if (new_lpcr & LPCR_ILE)
976                                 vcpu->arch.intr_msr |= MSR_LE;
977                         else
978                                 vcpu->arch.intr_msr &= ~MSR_LE;
979                 }
980         }
981
982         /*
983          * Userspace can only modify DPFD (default prefetch depth),
984          * ILE (interrupt little-endian) and TC (translation control).
985          * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
986          */
987         mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
988         if (cpu_has_feature(CPU_FTR_ARCH_207S))
989                 mask |= LPCR_AIL;
990
991         /* Broken 32-bit version of LPCR must not clear top bits */
992         if (preserve_top32)
993                 mask &= 0xFFFFFFFF;
994         vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
995         spin_unlock(&vc->lock);
996         mutex_unlock(&kvm->lock);
997 }
998
999 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1000                                  union kvmppc_one_reg *val)
1001 {
1002         int r = 0;
1003         long int i;
1004
1005         switch (id) {
1006         case KVM_REG_PPC_DEBUG_INST:
1007                 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1008                 break;
1009         case KVM_REG_PPC_HIOR:
1010                 *val = get_reg_val(id, 0);
1011                 break;
1012         case KVM_REG_PPC_DABR:
1013                 *val = get_reg_val(id, vcpu->arch.dabr);
1014                 break;
1015         case KVM_REG_PPC_DABRX:
1016                 *val = get_reg_val(id, vcpu->arch.dabrx);
1017                 break;
1018         case KVM_REG_PPC_DSCR:
1019                 *val = get_reg_val(id, vcpu->arch.dscr);
1020                 break;
1021         case KVM_REG_PPC_PURR:
1022                 *val = get_reg_val(id, vcpu->arch.purr);
1023                 break;
1024         case KVM_REG_PPC_SPURR:
1025                 *val = get_reg_val(id, vcpu->arch.spurr);
1026                 break;
1027         case KVM_REG_PPC_AMR:
1028                 *val = get_reg_val(id, vcpu->arch.amr);
1029                 break;
1030         case KVM_REG_PPC_UAMOR:
1031                 *val = get_reg_val(id, vcpu->arch.uamor);
1032                 break;
1033         case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1034                 i = id - KVM_REG_PPC_MMCR0;
1035                 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1036                 break;
1037         case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1038                 i = id - KVM_REG_PPC_PMC1;
1039                 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1040                 break;
1041         case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1042                 i = id - KVM_REG_PPC_SPMC1;
1043                 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1044                 break;
1045         case KVM_REG_PPC_SIAR:
1046                 *val = get_reg_val(id, vcpu->arch.siar);
1047                 break;
1048         case KVM_REG_PPC_SDAR:
1049                 *val = get_reg_val(id, vcpu->arch.sdar);
1050                 break;
1051         case KVM_REG_PPC_SIER:
1052                 *val = get_reg_val(id, vcpu->arch.sier);
1053                 break;
1054         case KVM_REG_PPC_IAMR:
1055                 *val = get_reg_val(id, vcpu->arch.iamr);
1056                 break;
1057         case KVM_REG_PPC_PSPB:
1058                 *val = get_reg_val(id, vcpu->arch.pspb);
1059                 break;
1060         case KVM_REG_PPC_DPDES:
1061                 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1062                 break;
1063         case KVM_REG_PPC_DAWR:
1064                 *val = get_reg_val(id, vcpu->arch.dawr);
1065                 break;
1066         case KVM_REG_PPC_DAWRX:
1067                 *val = get_reg_val(id, vcpu->arch.dawrx);
1068                 break;
1069         case KVM_REG_PPC_CIABR:
1070                 *val = get_reg_val(id, vcpu->arch.ciabr);
1071                 break;
1072         case KVM_REG_PPC_CSIGR:
1073                 *val = get_reg_val(id, vcpu->arch.csigr);
1074                 break;
1075         case KVM_REG_PPC_TACR:
1076                 *val = get_reg_val(id, vcpu->arch.tacr);
1077                 break;
1078         case KVM_REG_PPC_TCSCR:
1079                 *val = get_reg_val(id, vcpu->arch.tcscr);
1080                 break;
1081         case KVM_REG_PPC_PID:
1082                 *val = get_reg_val(id, vcpu->arch.pid);
1083                 break;
1084         case KVM_REG_PPC_ACOP:
1085                 *val = get_reg_val(id, vcpu->arch.acop);
1086                 break;
1087         case KVM_REG_PPC_WORT:
1088                 *val = get_reg_val(id, vcpu->arch.wort);
1089                 break;
1090         case KVM_REG_PPC_VPA_ADDR:
1091                 spin_lock(&vcpu->arch.vpa_update_lock);
1092                 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1093                 spin_unlock(&vcpu->arch.vpa_update_lock);
1094                 break;
1095         case KVM_REG_PPC_VPA_SLB:
1096                 spin_lock(&vcpu->arch.vpa_update_lock);
1097                 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1098                 val->vpaval.length = vcpu->arch.slb_shadow.len;
1099                 spin_unlock(&vcpu->arch.vpa_update_lock);
1100                 break;
1101         case KVM_REG_PPC_VPA_DTL:
1102                 spin_lock(&vcpu->arch.vpa_update_lock);
1103                 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1104                 val->vpaval.length = vcpu->arch.dtl.len;
1105                 spin_unlock(&vcpu->arch.vpa_update_lock);
1106                 break;
1107         case KVM_REG_PPC_TB_OFFSET:
1108                 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1109                 break;
1110         case KVM_REG_PPC_LPCR:
1111         case KVM_REG_PPC_LPCR_64:
1112                 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1113                 break;
1114         case KVM_REG_PPC_PPR:
1115                 *val = get_reg_val(id, vcpu->arch.ppr);
1116                 break;
1117 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1118         case KVM_REG_PPC_TFHAR:
1119                 *val = get_reg_val(id, vcpu->arch.tfhar);
1120                 break;
1121         case KVM_REG_PPC_TFIAR:
1122                 *val = get_reg_val(id, vcpu->arch.tfiar);
1123                 break;
1124         case KVM_REG_PPC_TEXASR:
1125                 *val = get_reg_val(id, vcpu->arch.texasr);
1126                 break;
1127         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1128                 i = id - KVM_REG_PPC_TM_GPR0;
1129                 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1130                 break;
1131         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1132         {
1133                 int j;
1134                 i = id - KVM_REG_PPC_TM_VSR0;
1135                 if (i < 32)
1136                         for (j = 0; j < TS_FPRWIDTH; j++)
1137                                 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1138                 else {
1139                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1140                                 val->vval = vcpu->arch.vr_tm.vr[i-32];
1141                         else
1142                                 r = -ENXIO;
1143                 }
1144                 break;
1145         }
1146         case KVM_REG_PPC_TM_CR:
1147                 *val = get_reg_val(id, vcpu->arch.cr_tm);
1148                 break;
1149         case KVM_REG_PPC_TM_LR:
1150                 *val = get_reg_val(id, vcpu->arch.lr_tm);
1151                 break;
1152         case KVM_REG_PPC_TM_CTR:
1153                 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1154                 break;
1155         case KVM_REG_PPC_TM_FPSCR:
1156                 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1157                 break;
1158         case KVM_REG_PPC_TM_AMR:
1159                 *val = get_reg_val(id, vcpu->arch.amr_tm);
1160                 break;
1161         case KVM_REG_PPC_TM_PPR:
1162                 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1163                 break;
1164         case KVM_REG_PPC_TM_VRSAVE:
1165                 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1166                 break;
1167         case KVM_REG_PPC_TM_VSCR:
1168                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1169                         *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1170                 else
1171                         r = -ENXIO;
1172                 break;
1173         case KVM_REG_PPC_TM_DSCR:
1174                 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1175                 break;
1176         case KVM_REG_PPC_TM_TAR:
1177                 *val = get_reg_val(id, vcpu->arch.tar_tm);
1178                 break;
1179 #endif
1180         case KVM_REG_PPC_ARCH_COMPAT:
1181                 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1182                 break;
1183         default:
1184                 r = -EINVAL;
1185                 break;
1186         }
1187
1188         return r;
1189 }
1190
1191 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1192                                  union kvmppc_one_reg *val)
1193 {
1194         int r = 0;
1195         long int i;
1196         unsigned long addr, len;
1197
1198         switch (id) {
1199         case KVM_REG_PPC_HIOR:
1200                 /* Only allow this to be set to zero */
1201                 if (set_reg_val(id, *val))
1202                         r = -EINVAL;
1203                 break;
1204         case KVM_REG_PPC_DABR:
1205                 vcpu->arch.dabr = set_reg_val(id, *val);
1206                 break;
1207         case KVM_REG_PPC_DABRX:
1208                 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1209                 break;
1210         case KVM_REG_PPC_DSCR:
1211                 vcpu->arch.dscr = set_reg_val(id, *val);
1212                 break;
1213         case KVM_REG_PPC_PURR:
1214                 vcpu->arch.purr = set_reg_val(id, *val);
1215                 break;
1216         case KVM_REG_PPC_SPURR:
1217                 vcpu->arch.spurr = set_reg_val(id, *val);
1218                 break;
1219         case KVM_REG_PPC_AMR:
1220                 vcpu->arch.amr = set_reg_val(id, *val);
1221                 break;
1222         case KVM_REG_PPC_UAMOR:
1223                 vcpu->arch.uamor = set_reg_val(id, *val);
1224                 break;
1225         case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1226                 i = id - KVM_REG_PPC_MMCR0;
1227                 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1228                 break;
1229         case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1230                 i = id - KVM_REG_PPC_PMC1;
1231                 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1232                 break;
1233         case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1234                 i = id - KVM_REG_PPC_SPMC1;
1235                 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1236                 break;
1237         case KVM_REG_PPC_SIAR:
1238                 vcpu->arch.siar = set_reg_val(id, *val);
1239                 break;
1240         case KVM_REG_PPC_SDAR:
1241                 vcpu->arch.sdar = set_reg_val(id, *val);
1242                 break;
1243         case KVM_REG_PPC_SIER:
1244                 vcpu->arch.sier = set_reg_val(id, *val);
1245                 break;
1246         case KVM_REG_PPC_IAMR:
1247                 vcpu->arch.iamr = set_reg_val(id, *val);
1248                 break;
1249         case KVM_REG_PPC_PSPB:
1250                 vcpu->arch.pspb = set_reg_val(id, *val);
1251                 break;
1252         case KVM_REG_PPC_DPDES:
1253                 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1254                 break;
1255         case KVM_REG_PPC_DAWR:
1256                 vcpu->arch.dawr = set_reg_val(id, *val);
1257                 break;
1258         case KVM_REG_PPC_DAWRX:
1259                 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1260                 break;
1261         case KVM_REG_PPC_CIABR:
1262                 vcpu->arch.ciabr = set_reg_val(id, *val);
1263                 /* Don't allow setting breakpoints in hypervisor code */
1264                 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1265                         vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
1266                 break;
1267         case KVM_REG_PPC_CSIGR:
1268                 vcpu->arch.csigr = set_reg_val(id, *val);
1269                 break;
1270         case KVM_REG_PPC_TACR:
1271                 vcpu->arch.tacr = set_reg_val(id, *val);
1272                 break;
1273         case KVM_REG_PPC_TCSCR:
1274                 vcpu->arch.tcscr = set_reg_val(id, *val);
1275                 break;
1276         case KVM_REG_PPC_PID:
1277                 vcpu->arch.pid = set_reg_val(id, *val);
1278                 break;
1279         case KVM_REG_PPC_ACOP:
1280                 vcpu->arch.acop = set_reg_val(id, *val);
1281                 break;
1282         case KVM_REG_PPC_WORT:
1283                 vcpu->arch.wort = set_reg_val(id, *val);
1284                 break;
1285         case KVM_REG_PPC_VPA_ADDR:
1286                 addr = set_reg_val(id, *val);
1287                 r = -EINVAL;
1288                 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1289                               vcpu->arch.dtl.next_gpa))
1290                         break;
1291                 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1292                 break;
1293         case KVM_REG_PPC_VPA_SLB:
1294                 addr = val->vpaval.addr;
1295                 len = val->vpaval.length;
1296                 r = -EINVAL;
1297                 if (addr && !vcpu->arch.vpa.next_gpa)
1298                         break;
1299                 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1300                 break;
1301         case KVM_REG_PPC_VPA_DTL:
1302                 addr = val->vpaval.addr;
1303                 len = val->vpaval.length;
1304                 r = -EINVAL;
1305                 if (addr && (len < sizeof(struct dtl_entry) ||
1306                              !vcpu->arch.vpa.next_gpa))
1307                         break;
1308                 len -= len % sizeof(struct dtl_entry);
1309                 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1310                 break;
1311         case KVM_REG_PPC_TB_OFFSET:
1312                 /* round up to multiple of 2^24 */
1313                 vcpu->arch.vcore->tb_offset =
1314                         ALIGN(set_reg_val(id, *val), 1UL << 24);
1315                 break;
1316         case KVM_REG_PPC_LPCR:
1317                 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1318                 break;
1319         case KVM_REG_PPC_LPCR_64:
1320                 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1321                 break;
1322         case KVM_REG_PPC_PPR:
1323                 vcpu->arch.ppr = set_reg_val(id, *val);
1324                 break;
1325 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1326         case KVM_REG_PPC_TFHAR:
1327                 vcpu->arch.tfhar = set_reg_val(id, *val);
1328                 break;
1329         case KVM_REG_PPC_TFIAR:
1330                 vcpu->arch.tfiar = set_reg_val(id, *val);
1331                 break;
1332         case KVM_REG_PPC_TEXASR:
1333                 vcpu->arch.texasr = set_reg_val(id, *val);
1334                 break;
1335         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1336                 i = id - KVM_REG_PPC_TM_GPR0;
1337                 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1338                 break;
1339         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1340         {
1341                 int j;
1342                 i = id - KVM_REG_PPC_TM_VSR0;
1343                 if (i < 32)
1344                         for (j = 0; j < TS_FPRWIDTH; j++)
1345                                 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1346                 else
1347                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1348                                 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1349                         else
1350                                 r = -ENXIO;
1351                 break;
1352         }
1353         case KVM_REG_PPC_TM_CR:
1354                 vcpu->arch.cr_tm = set_reg_val(id, *val);
1355                 break;
1356         case KVM_REG_PPC_TM_LR:
1357                 vcpu->arch.lr_tm = set_reg_val(id, *val);
1358                 break;
1359         case KVM_REG_PPC_TM_CTR:
1360                 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1361                 break;
1362         case KVM_REG_PPC_TM_FPSCR:
1363                 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1364                 break;
1365         case KVM_REG_PPC_TM_AMR:
1366                 vcpu->arch.amr_tm = set_reg_val(id, *val);
1367                 break;
1368         case KVM_REG_PPC_TM_PPR:
1369                 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1370                 break;
1371         case KVM_REG_PPC_TM_VRSAVE:
1372                 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1373                 break;
1374         case KVM_REG_PPC_TM_VSCR:
1375                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1376                         vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1377                 else
1378                         r = - ENXIO;
1379                 break;
1380         case KVM_REG_PPC_TM_DSCR:
1381                 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1382                 break;
1383         case KVM_REG_PPC_TM_TAR:
1384                 vcpu->arch.tar_tm = set_reg_val(id, *val);
1385                 break;
1386 #endif
1387         case KVM_REG_PPC_ARCH_COMPAT:
1388                 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1389                 break;
1390         default:
1391                 r = -EINVAL;
1392                 break;
1393         }
1394
1395         return r;
1396 }
1397
1398 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1399 {
1400         struct kvmppc_vcore *vcore;
1401
1402         vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1403
1404         if (vcore == NULL)
1405                 return NULL;
1406
1407         INIT_LIST_HEAD(&vcore->runnable_threads);
1408         spin_lock_init(&vcore->lock);
1409         spin_lock_init(&vcore->stoltb_lock);
1410         init_waitqueue_head(&vcore->wq);
1411         vcore->preempt_tb = TB_NIL;
1412         vcore->lpcr = kvm->arch.lpcr;
1413         vcore->first_vcpuid = core * threads_per_subcore;
1414         vcore->kvm = kvm;
1415
1416         vcore->mpp_buffer_is_valid = false;
1417
1418         if (cpu_has_feature(CPU_FTR_ARCH_207S))
1419                 vcore->mpp_buffer = (void *)__get_free_pages(
1420                         GFP_KERNEL|__GFP_ZERO,
1421                         MPP_BUFFER_ORDER);
1422
1423         return vcore;
1424 }
1425
1426 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1427 static struct debugfs_timings_element {
1428         const char *name;
1429         size_t offset;
1430 } timings[] = {
1431         {"rm_entry",    offsetof(struct kvm_vcpu, arch.rm_entry)},
1432         {"rm_intr",     offsetof(struct kvm_vcpu, arch.rm_intr)},
1433         {"rm_exit",     offsetof(struct kvm_vcpu, arch.rm_exit)},
1434         {"guest",       offsetof(struct kvm_vcpu, arch.guest_time)},
1435         {"cede",        offsetof(struct kvm_vcpu, arch.cede_time)},
1436 };
1437
1438 #define N_TIMINGS       (sizeof(timings) / sizeof(timings[0]))
1439
1440 struct debugfs_timings_state {
1441         struct kvm_vcpu *vcpu;
1442         unsigned int    buflen;
1443         char            buf[N_TIMINGS * 100];
1444 };
1445
1446 static int debugfs_timings_open(struct inode *inode, struct file *file)
1447 {
1448         struct kvm_vcpu *vcpu = inode->i_private;
1449         struct debugfs_timings_state *p;
1450
1451         p = kzalloc(sizeof(*p), GFP_KERNEL);
1452         if (!p)
1453                 return -ENOMEM;
1454
1455         kvm_get_kvm(vcpu->kvm);
1456         p->vcpu = vcpu;
1457         file->private_data = p;
1458
1459         return nonseekable_open(inode, file);
1460 }
1461
1462 static int debugfs_timings_release(struct inode *inode, struct file *file)
1463 {
1464         struct debugfs_timings_state *p = file->private_data;
1465
1466         kvm_put_kvm(p->vcpu->kvm);
1467         kfree(p);
1468         return 0;
1469 }
1470
1471 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1472                                     size_t len, loff_t *ppos)
1473 {
1474         struct debugfs_timings_state *p = file->private_data;
1475         struct kvm_vcpu *vcpu = p->vcpu;
1476         char *s, *buf_end;
1477         struct kvmhv_tb_accumulator tb;
1478         u64 count;
1479         loff_t pos;
1480         ssize_t n;
1481         int i, loops;
1482         bool ok;
1483
1484         if (!p->buflen) {
1485                 s = p->buf;
1486                 buf_end = s + sizeof(p->buf);
1487                 for (i = 0; i < N_TIMINGS; ++i) {
1488                         struct kvmhv_tb_accumulator *acc;
1489
1490                         acc = (struct kvmhv_tb_accumulator *)
1491                                 ((unsigned long)vcpu + timings[i].offset);
1492                         ok = false;
1493                         for (loops = 0; loops < 1000; ++loops) {
1494                                 count = acc->seqcount;
1495                                 if (!(count & 1)) {
1496                                         smp_rmb();
1497                                         tb = *acc;
1498                                         smp_rmb();
1499                                         if (count == acc->seqcount) {
1500                                                 ok = true;
1501                                                 break;
1502                                         }
1503                                 }
1504                                 udelay(1);
1505                         }
1506                         if (!ok)
1507                                 snprintf(s, buf_end - s, "%s: stuck\n",
1508                                         timings[i].name);
1509                         else
1510                                 snprintf(s, buf_end - s,
1511                                         "%s: %llu %llu %llu %llu\n",
1512                                         timings[i].name, count / 2,
1513                                         tb_to_ns(tb.tb_total),
1514                                         tb_to_ns(tb.tb_min),
1515                                         tb_to_ns(tb.tb_max));
1516                         s += strlen(s);
1517                 }
1518                 p->buflen = s - p->buf;
1519         }
1520
1521         pos = *ppos;
1522         if (pos >= p->buflen)
1523                 return 0;
1524         if (len > p->buflen - pos)
1525                 len = p->buflen - pos;
1526         n = copy_to_user(buf, p->buf + pos, len);
1527         if (n) {
1528                 if (n == len)
1529                         return -EFAULT;
1530                 len -= n;
1531         }
1532         *ppos = pos + len;
1533         return len;
1534 }
1535
1536 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1537                                      size_t len, loff_t *ppos)
1538 {
1539         return -EACCES;
1540 }
1541
1542 static const struct file_operations debugfs_timings_ops = {
1543         .owner   = THIS_MODULE,
1544         .open    = debugfs_timings_open,
1545         .release = debugfs_timings_release,
1546         .read    = debugfs_timings_read,
1547         .write   = debugfs_timings_write,
1548         .llseek  = generic_file_llseek,
1549 };
1550
1551 /* Create a debugfs directory for the vcpu */
1552 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1553 {
1554         char buf[16];
1555         struct kvm *kvm = vcpu->kvm;
1556
1557         snprintf(buf, sizeof(buf), "vcpu%u", id);
1558         if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1559                 return;
1560         vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1561         if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1562                 return;
1563         vcpu->arch.debugfs_timings =
1564                 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1565                                     vcpu, &debugfs_timings_ops);
1566 }
1567
1568 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1569 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1570 {
1571 }
1572 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1573
1574 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1575                                                    unsigned int id)
1576 {
1577         struct kvm_vcpu *vcpu;
1578         int err = -EINVAL;
1579         int core;
1580         struct kvmppc_vcore *vcore;
1581
1582         core = id / threads_per_subcore;
1583         if (core >= KVM_MAX_VCORES)
1584                 goto out;
1585
1586         err = -ENOMEM;
1587         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1588         if (!vcpu)
1589                 goto out;
1590
1591         err = kvm_vcpu_init(vcpu, kvm, id);
1592         if (err)
1593                 goto free_vcpu;
1594
1595         vcpu->arch.shared = &vcpu->arch.shregs;
1596 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1597         /*
1598          * The shared struct is never shared on HV,
1599          * so we can always use host endianness
1600          */
1601 #ifdef __BIG_ENDIAN__
1602         vcpu->arch.shared_big_endian = true;
1603 #else
1604         vcpu->arch.shared_big_endian = false;
1605 #endif
1606 #endif
1607         vcpu->arch.mmcr[0] = MMCR0_FC;
1608         vcpu->arch.ctrl = CTRL_RUNLATCH;
1609         /* default to host PVR, since we can't spoof it */
1610         kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1611         spin_lock_init(&vcpu->arch.vpa_update_lock);
1612         spin_lock_init(&vcpu->arch.tbacct_lock);
1613         vcpu->arch.busy_preempt = TB_NIL;
1614         vcpu->arch.intr_msr = MSR_SF | MSR_ME;
1615
1616         kvmppc_mmu_book3s_hv_init(vcpu);
1617
1618         vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1619
1620         init_waitqueue_head(&vcpu->arch.cpu_run);
1621
1622         mutex_lock(&kvm->lock);
1623         vcore = kvm->arch.vcores[core];
1624         if (!vcore) {
1625                 vcore = kvmppc_vcore_create(kvm, core);
1626                 kvm->arch.vcores[core] = vcore;
1627                 kvm->arch.online_vcores++;
1628         }
1629         mutex_unlock(&kvm->lock);
1630
1631         if (!vcore)
1632                 goto free_vcpu;
1633
1634         spin_lock(&vcore->lock);
1635         ++vcore->num_threads;
1636         spin_unlock(&vcore->lock);
1637         vcpu->arch.vcore = vcore;
1638         vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1639
1640         vcpu->arch.cpu_type = KVM_CPU_3S_64;
1641         kvmppc_sanity_check(vcpu);
1642
1643         debugfs_vcpu_init(vcpu, id);
1644
1645         return vcpu;
1646
1647 free_vcpu:
1648         kmem_cache_free(kvm_vcpu_cache, vcpu);
1649 out:
1650         return ERR_PTR(err);
1651 }
1652
1653 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
1654 {
1655         if (vpa->pinned_addr)
1656                 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
1657                                         vpa->dirty);
1658 }
1659
1660 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
1661 {
1662         spin_lock(&vcpu->arch.vpa_update_lock);
1663         unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
1664         unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
1665         unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
1666         spin_unlock(&vcpu->arch.vpa_update_lock);
1667         kvm_vcpu_uninit(vcpu);
1668         kmem_cache_free(kvm_vcpu_cache, vcpu);
1669 }
1670
1671 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1672 {
1673         /* Indicate we want to get back into the guest */
1674         return 1;
1675 }
1676
1677 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
1678 {
1679         unsigned long dec_nsec, now;
1680
1681         now = get_tb();
1682         if (now > vcpu->arch.dec_expires) {
1683                 /* decrementer has already gone negative */
1684                 kvmppc_core_queue_dec(vcpu);
1685                 kvmppc_core_prepare_to_enter(vcpu);
1686                 return;
1687         }
1688         dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1689                    / tb_ticks_per_sec;
1690         hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
1691                       HRTIMER_MODE_REL);
1692         vcpu->arch.timer_running = 1;
1693 }
1694
1695 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1696 {
1697         vcpu->arch.ceded = 0;
1698         if (vcpu->arch.timer_running) {
1699                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1700                 vcpu->arch.timer_running = 0;
1701         }
1702 }
1703
1704 extern void __kvmppc_vcore_entry(void);
1705
1706 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1707                                    struct kvm_vcpu *vcpu)
1708 {
1709         u64 now;
1710
1711         if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1712                 return;
1713         spin_lock_irq(&vcpu->arch.tbacct_lock);
1714         now = mftb();
1715         vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1716                 vcpu->arch.stolen_logged;
1717         vcpu->arch.busy_preempt = now;
1718         vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1719         spin_unlock_irq(&vcpu->arch.tbacct_lock);
1720         --vc->n_runnable;
1721         list_del(&vcpu->arch.run_list);
1722 }
1723
1724 static int kvmppc_grab_hwthread(int cpu)
1725 {
1726         struct paca_struct *tpaca;
1727         long timeout = 10000;
1728
1729         tpaca = &paca[cpu];
1730
1731         /* Ensure the thread won't go into the kernel if it wakes */
1732         tpaca->kvm_hstate.hwthread_req = 1;
1733         tpaca->kvm_hstate.kvm_vcpu = NULL;
1734
1735         /*
1736          * If the thread is already executing in the kernel (e.g. handling
1737          * a stray interrupt), wait for it to get back to nap mode.
1738          * The smp_mb() is to ensure that our setting of hwthread_req
1739          * is visible before we look at hwthread_state, so if this
1740          * races with the code at system_reset_pSeries and the thread
1741          * misses our setting of hwthread_req, we are sure to see its
1742          * setting of hwthread_state, and vice versa.
1743          */
1744         smp_mb();
1745         while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1746                 if (--timeout <= 0) {
1747                         pr_err("KVM: couldn't grab cpu %d\n", cpu);
1748                         return -EBUSY;
1749                 }
1750                 udelay(1);
1751         }
1752         return 0;
1753 }
1754
1755 static void kvmppc_release_hwthread(int cpu)
1756 {
1757         struct paca_struct *tpaca;
1758
1759         tpaca = &paca[cpu];
1760         tpaca->kvm_hstate.hwthread_req = 0;
1761         tpaca->kvm_hstate.kvm_vcpu = NULL;
1762 }
1763
1764 static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1765 {
1766         int cpu;
1767         struct paca_struct *tpaca;
1768         struct kvmppc_vcore *vc = vcpu->arch.vcore;
1769
1770         if (vcpu->arch.timer_running) {
1771                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1772                 vcpu->arch.timer_running = 0;
1773         }
1774         cpu = vc->pcpu + vcpu->arch.ptid;
1775         tpaca = &paca[cpu];
1776         tpaca->kvm_hstate.kvm_vcpu = vcpu;
1777         tpaca->kvm_hstate.kvm_vcore = vc;
1778         tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
1779         vcpu->cpu = vc->pcpu;
1780         smp_wmb();
1781 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1782         if (cpu != smp_processor_id()) {
1783                 xics_wake_cpu(cpu);
1784                 if (vcpu->arch.ptid)
1785                         ++vc->n_woken;
1786         }
1787 #endif
1788 }
1789
1790 static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
1791 {
1792         int i;
1793
1794         HMT_low();
1795         i = 0;
1796         while (vc->nap_count < vc->n_woken) {
1797                 if (++i >= 1000000) {
1798                         pr_err("kvmppc_wait_for_nap timeout %d %d\n",
1799                                vc->nap_count, vc->n_woken);
1800                         break;
1801                 }
1802                 cpu_relax();
1803         }
1804         HMT_medium();
1805 }
1806
1807 /*
1808  * Check that we are on thread 0 and that any other threads in
1809  * this core are off-line.  Then grab the threads so they can't
1810  * enter the kernel.
1811  */
1812 static int on_primary_thread(void)
1813 {
1814         int cpu = smp_processor_id();
1815         int thr;
1816
1817         /* Are we on a primary subcore? */
1818         if (cpu_thread_in_subcore(cpu))
1819                 return 0;
1820
1821         thr = 0;
1822         while (++thr < threads_per_subcore)
1823                 if (cpu_online(cpu + thr))
1824                         return 0;
1825
1826         /* Grab all hw threads so they can't go into the kernel */
1827         for (thr = 1; thr < threads_per_subcore; ++thr) {
1828                 if (kvmppc_grab_hwthread(cpu + thr)) {
1829                         /* Couldn't grab one; let the others go */
1830                         do {
1831                                 kvmppc_release_hwthread(cpu + thr);
1832                         } while (--thr > 0);
1833                         return 0;
1834                 }
1835         }
1836         return 1;
1837 }
1838
1839 static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1840 {
1841         phys_addr_t phy_addr, mpp_addr;
1842
1843         phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1844         mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1845
1846         mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1847         logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1848
1849         vc->mpp_buffer_is_valid = true;
1850 }
1851
1852 static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1853 {
1854         phys_addr_t phy_addr, mpp_addr;
1855
1856         phy_addr = virt_to_phys(vc->mpp_buffer);
1857         mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1858
1859         /* We must abort any in-progress save operations to ensure
1860          * the table is valid so that prefetch engine knows when to
1861          * stop prefetching. */
1862         logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1863         mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1864 }
1865
1866 static void prepare_threads(struct kvmppc_vcore *vc)
1867 {
1868         struct kvm_vcpu *vcpu, *vnext;
1869
1870         list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1871                                  arch.run_list) {
1872                 if (signal_pending(vcpu->arch.run_task))
1873                         vcpu->arch.ret = -EINTR;
1874                 else if (vcpu->arch.vpa.update_pending ||
1875                          vcpu->arch.slb_shadow.update_pending ||
1876                          vcpu->arch.dtl.update_pending)
1877                         vcpu->arch.ret = RESUME_GUEST;
1878                 else
1879                         continue;
1880                 kvmppc_remove_runnable(vc, vcpu);
1881                 wake_up(&vcpu->arch.cpu_run);
1882         }
1883 }
1884
1885 /*
1886  * Run a set of guest threads on a physical core.
1887  * Called with vc->lock held.
1888  */
1889 static void kvmppc_run_core(struct kvmppc_vcore *vc)
1890 {
1891         struct kvm_vcpu *vcpu, *vnext;
1892         long ret;
1893         u64 now;
1894         int i;
1895         int srcu_idx;
1896
1897         /*
1898          * Remove from the list any threads that have a signal pending
1899          * or need a VPA update done
1900          */
1901         prepare_threads(vc);
1902
1903         /* if the runner is no longer runnable, let the caller pick a new one */
1904         if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
1905                 return;
1906
1907         /*
1908          * Initialize *vc.
1909          */
1910         vc->n_woken = 0;
1911         vc->nap_count = 0;
1912         vc->entry_exit_count = 0;
1913         vc->preempt_tb = TB_NIL;
1914         vc->in_guest = 0;
1915         vc->napping_threads = 0;
1916         vc->conferring_threads = 0;
1917
1918         /*
1919          * Make sure we are running on primary threads, and that secondary
1920          * threads are offline.  Also check if the number of threads in this
1921          * guest are greater than the current system threads per guest.
1922          */
1923         if ((threads_per_core > 1) &&
1924             ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1925                 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1926                         vcpu->arch.ret = -EBUSY;
1927                 goto out;
1928         }
1929
1930
1931         vc->pcpu = smp_processor_id();
1932         list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1933                 kvmppc_start_thread(vcpu);
1934                 kvmppc_create_dtl_entry(vcpu, vc);
1935                 trace_kvm_guest_enter(vcpu);
1936         }
1937
1938         /* Set this explicitly in case thread 0 doesn't have a vcpu */
1939         get_paca()->kvm_hstate.kvm_vcore = vc;
1940         get_paca()->kvm_hstate.ptid = 0;
1941
1942         vc->vcore_state = VCORE_RUNNING;
1943         preempt_disable();
1944
1945         trace_kvmppc_run_core(vc, 0);
1946
1947         spin_unlock(&vc->lock);
1948
1949         kvm_guest_enter();
1950
1951         srcu_idx = srcu_read_lock(&vc->kvm->srcu);
1952
1953         if (vc->mpp_buffer_is_valid)
1954                 kvmppc_start_restoring_l2_cache(vc);
1955
1956         __kvmppc_vcore_entry();
1957
1958         spin_lock(&vc->lock);
1959
1960         if (vc->mpp_buffer)
1961                 kvmppc_start_saving_l2_cache(vc);
1962
1963         /* disable sending of IPIs on virtual external irqs */
1964         list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1965                 vcpu->cpu = -1;
1966         /* wait for secondary threads to finish writing their state to memory */
1967         if (vc->nap_count < vc->n_woken)
1968                 kvmppc_wait_for_nap(vc);
1969         for (i = 0; i < threads_per_subcore; ++i)
1970                 kvmppc_release_hwthread(vc->pcpu + i);
1971         /* prevent other vcpu threads from doing kvmppc_start_thread() now */
1972         vc->vcore_state = VCORE_EXITING;
1973         spin_unlock(&vc->lock);
1974
1975         srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
1976
1977         /* make sure updates to secondary vcpu structs are visible now */
1978         smp_mb();
1979         kvm_guest_exit();
1980
1981         preempt_enable();
1982         cond_resched();
1983
1984         spin_lock(&vc->lock);
1985         now = get_tb();
1986         list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1987                 /* cancel pending dec exception if dec is positive */
1988                 if (now < vcpu->arch.dec_expires &&
1989                     kvmppc_core_pending_dec(vcpu))
1990                         kvmppc_core_dequeue_dec(vcpu);
1991
1992                 trace_kvm_guest_exit(vcpu);
1993
1994                 ret = RESUME_GUEST;
1995                 if (vcpu->arch.trap)
1996                         ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1997                                                     vcpu->arch.run_task);
1998
1999                 vcpu->arch.ret = ret;
2000                 vcpu->arch.trap = 0;
2001
2002                 if (vcpu->arch.ceded) {
2003                         if (!is_kvmppc_resume_guest(ret))
2004                                 kvmppc_end_cede(vcpu);
2005                         else
2006                                 kvmppc_set_timer(vcpu);
2007                 }
2008         }
2009
2010  out:
2011         vc->vcore_state = VCORE_INACTIVE;
2012         list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2013                                  arch.run_list) {
2014                 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
2015                         kvmppc_remove_runnable(vc, vcpu);
2016                         wake_up(&vcpu->arch.cpu_run);
2017                 }
2018         }
2019
2020         trace_kvmppc_run_core(vc, 1);
2021 }
2022
2023 /*
2024  * Wait for some other vcpu thread to execute us, and
2025  * wake us up when we need to handle something in the host.
2026  */
2027 static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
2028 {
2029         DEFINE_WAIT(wait);
2030
2031         prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
2032         if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
2033                 schedule();
2034         finish_wait(&vcpu->arch.cpu_run, &wait);
2035 }
2036
2037 /*
2038  * All the vcpus in this vcore are idle, so wait for a decrementer
2039  * or external interrupt to one of the vcpus.  vc->lock is held.
2040  */
2041 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
2042 {
2043         struct kvm_vcpu *vcpu;
2044         int do_sleep = 1;
2045
2046         DEFINE_WAIT(wait);
2047
2048         prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
2049
2050         /*
2051          * Check one last time for pending exceptions and ceded state after
2052          * we put ourselves on the wait queue
2053          */
2054         list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
2055                 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
2056                         do_sleep = 0;
2057                         break;
2058                 }
2059         }
2060
2061         if (!do_sleep) {
2062                 finish_wait(&vc->wq, &wait);
2063                 return;
2064         }
2065
2066         vc->vcore_state = VCORE_SLEEPING;
2067         trace_kvmppc_vcore_blocked(vc, 0);
2068         spin_unlock(&vc->lock);
2069         schedule();
2070         finish_wait(&vc->wq, &wait);
2071         spin_lock(&vc->lock);
2072         vc->vcore_state = VCORE_INACTIVE;
2073         trace_kvmppc_vcore_blocked(vc, 1);
2074 }
2075
2076 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2077 {
2078         int n_ceded;
2079         struct kvmppc_vcore *vc;
2080         struct kvm_vcpu *v, *vn;
2081
2082         trace_kvmppc_run_vcpu_enter(vcpu);
2083
2084         kvm_run->exit_reason = 0;
2085         vcpu->arch.ret = RESUME_GUEST;
2086         vcpu->arch.trap = 0;
2087         kvmppc_update_vpas(vcpu);
2088
2089         /*
2090          * Synchronize with other threads in this virtual core
2091          */
2092         vc = vcpu->arch.vcore;
2093         spin_lock(&vc->lock);
2094         vcpu->arch.ceded = 0;
2095         vcpu->arch.run_task = current;
2096         vcpu->arch.kvm_run = kvm_run;
2097         vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
2098         vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
2099         vcpu->arch.busy_preempt = TB_NIL;
2100         list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
2101         ++vc->n_runnable;
2102
2103         /*
2104          * This happens the first time this is called for a vcpu.
2105          * If the vcore is already running, we may be able to start
2106          * this thread straight away and have it join in.
2107          */
2108         if (!signal_pending(current)) {
2109                 if (vc->vcore_state == VCORE_RUNNING &&
2110                     VCORE_EXIT_COUNT(vc) == 0) {
2111                         kvmppc_create_dtl_entry(vcpu, vc);
2112                         kvmppc_start_thread(vcpu);
2113                         trace_kvm_guest_enter(vcpu);
2114                 } else if (vc->vcore_state == VCORE_SLEEPING) {
2115                         wake_up(&vc->wq);
2116                 }
2117
2118         }
2119
2120         while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2121                !signal_pending(current)) {
2122                 if (vc->vcore_state != VCORE_INACTIVE) {
2123                         spin_unlock(&vc->lock);
2124                         kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
2125                         spin_lock(&vc->lock);
2126                         continue;
2127                 }
2128                 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
2129                                          arch.run_list) {
2130                         kvmppc_core_prepare_to_enter(v);
2131                         if (signal_pending(v->arch.run_task)) {
2132                                 kvmppc_remove_runnable(vc, v);
2133                                 v->stat.signal_exits++;
2134                                 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
2135                                 v->arch.ret = -EINTR;
2136                                 wake_up(&v->arch.cpu_run);
2137                         }
2138                 }
2139                 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2140                         break;
2141                 vc->runner = vcpu;
2142                 n_ceded = 0;
2143                 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
2144                         if (!v->arch.pending_exceptions)
2145                                 n_ceded += v->arch.ceded;
2146                         else
2147                                 v->arch.ceded = 0;
2148                 }
2149                 if (n_ceded == vc->n_runnable)
2150                         kvmppc_vcore_blocked(vc);
2151                 else
2152                         kvmppc_run_core(vc);
2153                 vc->runner = NULL;
2154         }
2155
2156         while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2157                (vc->vcore_state == VCORE_RUNNING ||
2158                 vc->vcore_state == VCORE_EXITING)) {
2159                 spin_unlock(&vc->lock);
2160                 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
2161                 spin_lock(&vc->lock);
2162         }
2163
2164         if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2165                 kvmppc_remove_runnable(vc, vcpu);
2166                 vcpu->stat.signal_exits++;
2167                 kvm_run->exit_reason = KVM_EXIT_INTR;
2168                 vcpu->arch.ret = -EINTR;
2169         }
2170
2171         if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
2172                 /* Wake up some vcpu to run the core */
2173                 v = list_first_entry(&vc->runnable_threads,
2174                                      struct kvm_vcpu, arch.run_list);
2175                 wake_up(&v->arch.cpu_run);
2176         }
2177
2178         trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
2179         spin_unlock(&vc->lock);
2180         return vcpu->arch.ret;
2181 }
2182
2183 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2184 {
2185         int r;
2186         int srcu_idx;
2187
2188         if (!vcpu->arch.sane) {
2189                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2190                 return -EINVAL;
2191         }
2192
2193         kvmppc_core_prepare_to_enter(vcpu);
2194
2195         /* No need to go into the guest when all we'll do is come back out */
2196         if (signal_pending(current)) {
2197                 run->exit_reason = KVM_EXIT_INTR;
2198                 return -EINTR;
2199         }
2200
2201         atomic_inc(&vcpu->kvm->arch.vcpus_running);
2202         /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
2203         smp_mb();
2204
2205         /* On the first time here, set up HTAB and VRMA */
2206         if (!vcpu->kvm->arch.hpte_setup_done) {
2207                 r = kvmppc_hv_setup_htab_rma(vcpu);
2208                 if (r)
2209                         goto out;
2210         }
2211
2212         flush_fp_to_thread(current);
2213         flush_altivec_to_thread(current);
2214         flush_vsx_to_thread(current);
2215         vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2216         vcpu->arch.pgdir = current->mm->pgd;
2217         vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2218
2219         do {
2220                 r = kvmppc_run_vcpu(run, vcpu);
2221
2222                 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
2223                     !(vcpu->arch.shregs.msr & MSR_PR)) {
2224                         trace_kvm_hcall_enter(vcpu);
2225                         r = kvmppc_pseries_do_hcall(vcpu);
2226                         trace_kvm_hcall_exit(vcpu, r);
2227                         kvmppc_core_prepare_to_enter(vcpu);
2228                 } else if (r == RESUME_PAGE_FAULT) {
2229                         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2230                         r = kvmppc_book3s_hv_page_fault(run, vcpu,
2231                                 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
2232                         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2233                 }
2234         } while (is_kvmppc_resume_guest(r));
2235
2236  out:
2237         vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2238         atomic_dec(&vcpu->kvm->arch.vcpus_running);
2239         return r;
2240 }
2241
2242 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2243                                      int linux_psize)
2244 {
2245         struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
2246
2247         if (!def->shift)
2248                 return;
2249         (*sps)->page_shift = def->shift;
2250         (*sps)->slb_enc = def->sllp;
2251         (*sps)->enc[0].page_shift = def->shift;
2252         (*sps)->enc[0].pte_enc = def->penc[linux_psize];
2253         /*
2254          * Add 16MB MPSS support if host supports it
2255          */
2256         if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
2257                 (*sps)->enc[1].page_shift = 24;
2258                 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
2259         }
2260         (*sps)++;
2261 }
2262
2263 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2264                                          struct kvm_ppc_smmu_info *info)
2265 {
2266         struct kvm_ppc_one_seg_page_size *sps;
2267
2268         info->flags = KVM_PPC_PAGE_SIZES_REAL;
2269         if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
2270                 info->flags |= KVM_PPC_1T_SEGMENTS;
2271         info->slb_size = mmu_slb_size;
2272
2273         /* We only support these sizes for now, and no muti-size segments */
2274         sps = &info->sps[0];
2275         kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
2276         kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
2277         kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
2278
2279         return 0;
2280 }
2281
2282 /*
2283  * Get (and clear) the dirty memory log for a memory slot.
2284  */
2285 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2286                                          struct kvm_dirty_log *log)
2287 {
2288         struct kvm_memory_slot *memslot;
2289         int r;
2290         unsigned long n;
2291
2292         mutex_lock(&kvm->slots_lock);
2293
2294         r = -EINVAL;
2295         if (log->slot >= KVM_USER_MEM_SLOTS)
2296                 goto out;
2297
2298         memslot = id_to_memslot(kvm->memslots, log->slot);
2299         r = -ENOENT;
2300         if (!memslot->dirty_bitmap)
2301                 goto out;
2302
2303         n = kvm_dirty_bitmap_bytes(memslot);
2304         memset(memslot->dirty_bitmap, 0, n);
2305
2306         r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
2307         if (r)
2308                 goto out;
2309
2310         r = -EFAULT;
2311         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
2312                 goto out;
2313
2314         r = 0;
2315 out:
2316         mutex_unlock(&kvm->slots_lock);
2317         return r;
2318 }
2319
2320 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2321                                         struct kvm_memory_slot *dont)
2322 {
2323         if (!dont || free->arch.rmap != dont->arch.rmap) {
2324                 vfree(free->arch.rmap);
2325                 free->arch.rmap = NULL;
2326         }
2327 }
2328
2329 static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
2330                                          unsigned long npages)
2331 {
2332         slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
2333         if (!slot->arch.rmap)
2334                 return -ENOMEM;
2335
2336         return 0;
2337 }
2338
2339 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
2340                                         struct kvm_memory_slot *memslot,
2341                                         struct kvm_userspace_memory_region *mem)
2342 {
2343         return 0;
2344 }
2345
2346 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2347                                 struct kvm_userspace_memory_region *mem,
2348                                 const struct kvm_memory_slot *old)
2349 {
2350         unsigned long npages = mem->memory_size >> PAGE_SHIFT;
2351         struct kvm_memory_slot *memslot;
2352
2353         if (npages && old->npages) {
2354                 /*
2355                  * If modifying a memslot, reset all the rmap dirty bits.
2356                  * If this is a new memslot, we don't need to do anything
2357                  * since the rmap array starts out as all zeroes,
2358                  * i.e. no pages are dirty.
2359                  */
2360                 memslot = id_to_memslot(kvm->memslots, mem->slot);
2361                 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
2362         }
2363 }
2364
2365 /*
2366  * Update LPCR values in kvm->arch and in vcores.
2367  * Caller must hold kvm->lock.
2368  */
2369 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
2370 {
2371         long int i;
2372         u32 cores_done = 0;
2373
2374         if ((kvm->arch.lpcr & mask) == lpcr)
2375                 return;
2376
2377         kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
2378
2379         for (i = 0; i < KVM_MAX_VCORES; ++i) {
2380                 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2381                 if (!vc)
2382                         continue;
2383                 spin_lock(&vc->lock);
2384                 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
2385                 spin_unlock(&vc->lock);
2386                 if (++cores_done >= kvm->arch.online_vcores)
2387                         break;
2388         }
2389 }
2390
2391 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
2392 {
2393         return;
2394 }
2395
2396 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
2397 {
2398         int err = 0;
2399         struct kvm *kvm = vcpu->kvm;
2400         unsigned long hva;
2401         struct kvm_memory_slot *memslot;
2402         struct vm_area_struct *vma;
2403         unsigned long lpcr = 0, senc;
2404         unsigned long psize, porder;
2405         int srcu_idx;
2406
2407         mutex_lock(&kvm->lock);
2408         if (kvm->arch.hpte_setup_done)
2409                 goto out;       /* another vcpu beat us to it */
2410
2411         /* Allocate hashed page table (if not done already) and reset it */
2412         if (!kvm->arch.hpt_virt) {
2413                 err = kvmppc_alloc_hpt(kvm, NULL);
2414                 if (err) {
2415                         pr_err("KVM: Couldn't alloc HPT\n");
2416                         goto out;
2417                 }
2418         }
2419
2420         /* Look up the memslot for guest physical address 0 */
2421         srcu_idx = srcu_read_lock(&kvm->srcu);
2422         memslot = gfn_to_memslot(kvm, 0);
2423
2424         /* We must have some memory at 0 by now */
2425         err = -EINVAL;
2426         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
2427                 goto out_srcu;
2428
2429         /* Look up the VMA for the start of this memory slot */
2430         hva = memslot->userspace_addr;
2431         down_read(&current->mm->mmap_sem);
2432         vma = find_vma(current->mm, hva);
2433         if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
2434                 goto up_out;
2435
2436         psize = vma_kernel_pagesize(vma);
2437         porder = __ilog2(psize);
2438
2439         up_read(&current->mm->mmap_sem);
2440
2441         /* We can handle 4k, 64k or 16M pages in the VRMA */
2442         err = -EINVAL;
2443         if (!(psize == 0x1000 || psize == 0x10000 ||
2444               psize == 0x1000000))
2445                 goto out_srcu;
2446
2447         /* Update VRMASD field in the LPCR */
2448         senc = slb_pgsize_encoding(psize);
2449         kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
2450                 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2451         /* the -4 is to account for senc values starting at 0x10 */
2452         lpcr = senc << (LPCR_VRMASD_SH - 4);
2453
2454         /* Create HPTEs in the hash page table for the VRMA */
2455         kvmppc_map_vrma(vcpu, memslot, porder);
2456
2457         kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
2458
2459         /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
2460         smp_wmb();
2461         kvm->arch.hpte_setup_done = 1;
2462         err = 0;
2463  out_srcu:
2464         srcu_read_unlock(&kvm->srcu, srcu_idx);
2465  out:
2466         mutex_unlock(&kvm->lock);
2467         return err;
2468
2469  up_out:
2470         up_read(&current->mm->mmap_sem);
2471         goto out_srcu;
2472 }
2473
2474 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2475 {
2476         unsigned long lpcr, lpid;
2477         char buf[32];
2478
2479         /* Allocate the guest's logical partition ID */
2480
2481         lpid = kvmppc_alloc_lpid();
2482         if ((long)lpid < 0)
2483                 return -ENOMEM;
2484         kvm->arch.lpid = lpid;
2485
2486         /*
2487          * Since we don't flush the TLB when tearing down a VM,
2488          * and this lpid might have previously been used,
2489          * make sure we flush on each core before running the new VM.
2490          */
2491         cpumask_setall(&kvm->arch.need_tlb_flush);
2492
2493         /* Start out with the default set of hcalls enabled */
2494         memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2495                sizeof(kvm->arch.enabled_hcalls));
2496
2497         kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
2498
2499         /* Init LPCR for virtual RMA mode */
2500         kvm->arch.host_lpid = mfspr(SPRN_LPID);
2501         kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
2502         lpcr &= LPCR_PECE | LPCR_LPES;
2503         lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
2504                 LPCR_VPM0 | LPCR_VPM1;
2505         kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
2506                 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2507         /* On POWER8 turn on online bit to enable PURR/SPURR */
2508         if (cpu_has_feature(CPU_FTR_ARCH_207S))
2509                 lpcr |= LPCR_ONL;
2510         kvm->arch.lpcr = lpcr;
2511
2512         /*
2513          * Track that we now have a HV mode VM active. This blocks secondary
2514          * CPU threads from coming online.
2515          */
2516         kvm_hv_vm_activated();
2517
2518         /*
2519          * Create a debugfs directory for the VM
2520          */
2521         snprintf(buf, sizeof(buf), "vm%d", current->pid);
2522         kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
2523         if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
2524                 kvmppc_mmu_debugfs_init(kvm);
2525
2526         return 0;
2527 }
2528
2529 static void kvmppc_free_vcores(struct kvm *kvm)
2530 {
2531         long int i;
2532
2533         for (i = 0; i < KVM_MAX_VCORES; ++i) {
2534                 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
2535                         struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2536                         free_pages((unsigned long)vc->mpp_buffer,
2537                                    MPP_BUFFER_ORDER);
2538                 }
2539                 kfree(kvm->arch.vcores[i]);
2540         }
2541         kvm->arch.online_vcores = 0;
2542 }
2543
2544 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
2545 {
2546         debugfs_remove_recursive(kvm->arch.debugfs_dir);
2547
2548         kvm_hv_vm_deactivated();
2549
2550         kvmppc_free_vcores(kvm);
2551
2552         kvmppc_free_hpt(kvm);
2553 }
2554
2555 /* We don't need to emulate any privileged instructions or dcbz */
2556 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
2557                                      unsigned int inst, int *advance)
2558 {
2559         return EMULATE_FAIL;
2560 }
2561
2562 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
2563                                         ulong spr_val)
2564 {
2565         return EMULATE_FAIL;
2566 }
2567
2568 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
2569                                         ulong *spr_val)
2570 {
2571         return EMULATE_FAIL;
2572 }
2573
2574 static int kvmppc_core_check_processor_compat_hv(void)
2575 {
2576         if (!cpu_has_feature(CPU_FTR_HVMODE) ||
2577             !cpu_has_feature(CPU_FTR_ARCH_206))
2578                 return -EIO;
2579         return 0;
2580 }
2581
2582 static long kvm_arch_vm_ioctl_hv(struct file *filp,
2583                                  unsigned int ioctl, unsigned long arg)
2584 {
2585         struct kvm *kvm __maybe_unused = filp->private_data;
2586         void __user *argp = (void __user *)arg;
2587         long r;
2588
2589         switch (ioctl) {
2590
2591         case KVM_PPC_ALLOCATE_HTAB: {
2592                 u32 htab_order;
2593
2594                 r = -EFAULT;
2595                 if (get_user(htab_order, (u32 __user *)argp))
2596                         break;
2597                 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
2598                 if (r)
2599                         break;
2600                 r = -EFAULT;
2601                 if (put_user(htab_order, (u32 __user *)argp))
2602                         break;
2603                 r = 0;
2604                 break;
2605         }
2606
2607         case KVM_PPC_GET_HTAB_FD: {
2608                 struct kvm_get_htab_fd ghf;
2609
2610                 r = -EFAULT;
2611                 if (copy_from_user(&ghf, argp, sizeof(ghf)))
2612                         break;
2613                 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
2614                 break;
2615         }
2616
2617         default:
2618                 r = -ENOTTY;
2619         }
2620
2621         return r;
2622 }
2623
2624 /*
2625  * List of hcall numbers to enable by default.
2626  * For compatibility with old userspace, we enable by default
2627  * all hcalls that were implemented before the hcall-enabling
2628  * facility was added.  Note this list should not include H_RTAS.
2629  */
2630 static unsigned int default_hcall_list[] = {
2631         H_REMOVE,
2632         H_ENTER,
2633         H_READ,
2634         H_PROTECT,
2635         H_BULK_REMOVE,
2636         H_GET_TCE,
2637         H_PUT_TCE,
2638         H_SET_DABR,
2639         H_SET_XDABR,
2640         H_CEDE,
2641         H_PROD,
2642         H_CONFER,
2643         H_REGISTER_VPA,
2644 #ifdef CONFIG_KVM_XICS
2645         H_EOI,
2646         H_CPPR,
2647         H_IPI,
2648         H_IPOLL,
2649         H_XIRR,
2650         H_XIRR_X,
2651 #endif
2652         0
2653 };
2654
2655 static void init_default_hcalls(void)
2656 {
2657         int i;
2658         unsigned int hcall;
2659
2660         for (i = 0; default_hcall_list[i]; ++i) {
2661                 hcall = default_hcall_list[i];
2662                 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
2663                 __set_bit(hcall / 4, default_enabled_hcalls);
2664         }
2665 }
2666
2667 static struct kvmppc_ops kvm_ops_hv = {
2668         .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2669         .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
2670         .get_one_reg = kvmppc_get_one_reg_hv,
2671         .set_one_reg = kvmppc_set_one_reg_hv,
2672         .vcpu_load   = kvmppc_core_vcpu_load_hv,
2673         .vcpu_put    = kvmppc_core_vcpu_put_hv,
2674         .set_msr     = kvmppc_set_msr_hv,
2675         .vcpu_run    = kvmppc_vcpu_run_hv,
2676         .vcpu_create = kvmppc_core_vcpu_create_hv,
2677         .vcpu_free   = kvmppc_core_vcpu_free_hv,
2678         .check_requests = kvmppc_core_check_requests_hv,
2679         .get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
2680         .flush_memslot  = kvmppc_core_flush_memslot_hv,
2681         .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
2682         .commit_memory_region  = kvmppc_core_commit_memory_region_hv,
2683         .unmap_hva = kvm_unmap_hva_hv,
2684         .unmap_hva_range = kvm_unmap_hva_range_hv,
2685         .age_hva  = kvm_age_hva_hv,
2686         .test_age_hva = kvm_test_age_hva_hv,
2687         .set_spte_hva = kvm_set_spte_hva_hv,
2688         .mmu_destroy  = kvmppc_mmu_destroy_hv,
2689         .free_memslot = kvmppc_core_free_memslot_hv,
2690         .create_memslot = kvmppc_core_create_memslot_hv,
2691         .init_vm =  kvmppc_core_init_vm_hv,
2692         .destroy_vm = kvmppc_core_destroy_vm_hv,
2693         .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
2694         .emulate_op = kvmppc_core_emulate_op_hv,
2695         .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
2696         .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2697         .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2698         .arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
2699         .hcall_implemented = kvmppc_hcall_impl_hv,
2700 };
2701
2702 static int kvmppc_book3s_init_hv(void)
2703 {
2704         int r;
2705         /*
2706          * FIXME!! Do we need to check on all cpus ?
2707          */
2708         r = kvmppc_core_check_processor_compat_hv();
2709         if (r < 0)
2710                 return -ENODEV;
2711
2712         kvm_ops_hv.owner = THIS_MODULE;
2713         kvmppc_hv_ops = &kvm_ops_hv;
2714
2715         init_default_hcalls();
2716
2717         r = kvmppc_mmu_hv_init();
2718         return r;
2719 }
2720
2721 static void kvmppc_book3s_exit_hv(void)
2722 {
2723         kvmppc_hv_ops = NULL;
2724 }
2725
2726 module_init(kvmppc_book3s_init_hv);
2727 module_exit(kvmppc_book3s_exit_hv);
2728 MODULE_LICENSE("GPL");
2729 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2730 MODULE_ALIAS("devname:kvm");