]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/powerpc/kvm/book3s.c
Merge tag 'edac_for_3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp...
[karo-tx-linux.git] / arch / powerpc / kvm / book3s.c
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23
24 #include <asm/reg.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
38
39 #include "book3s.h"
40 #include "trace.h"
41
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44 /* #define EXIT_DEBUG */
45
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47         { "exits",       VCPU_STAT(sum_exits) },
48         { "mmio",        VCPU_STAT(mmio_exits) },
49         { "sig",         VCPU_STAT(signal_exits) },
50         { "sysc",        VCPU_STAT(syscall_exits) },
51         { "inst_emu",    VCPU_STAT(emulated_inst_exits) },
52         { "dec",         VCPU_STAT(dec_exits) },
53         { "ext_intr",    VCPU_STAT(ext_intr_exits) },
54         { "queue_intr",  VCPU_STAT(queue_intr) },
55         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
56         { "pf_storage",  VCPU_STAT(pf_storage) },
57         { "sp_storage",  VCPU_STAT(sp_storage) },
58         { "pf_instruc",  VCPU_STAT(pf_instruc) },
59         { "sp_instruc",  VCPU_STAT(sp_instruc) },
60         { "ld",          VCPU_STAT(ld) },
61         { "ld_slow",     VCPU_STAT(ld_slow) },
62         { "st",          VCPU_STAT(st) },
63         { "st_slow",     VCPU_STAT(st_slow) },
64         { NULL }
65 };
66
67 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
68 {
69 }
70
71 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
72 {
73 }
74
75 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
76 {
77         if (!is_kvmppc_hv_enabled(vcpu->kvm))
78                 return to_book3s(vcpu)->hior;
79         return 0;
80 }
81
82 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
83                         unsigned long pending_now, unsigned long old_pending)
84 {
85         if (is_kvmppc_hv_enabled(vcpu->kvm))
86                 return;
87         if (pending_now)
88                 vcpu->arch.shared->int_pending = 1;
89         else if (old_pending)
90                 vcpu->arch.shared->int_pending = 0;
91 }
92
93 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
94 {
95         ulong crit_raw;
96         ulong crit_r1;
97         bool crit;
98
99         if (is_kvmppc_hv_enabled(vcpu->kvm))
100                 return false;
101
102         crit_raw = vcpu->arch.shared->critical;
103         crit_r1 = kvmppc_get_gpr(vcpu, 1);
104
105         /* Truncate crit indicators in 32 bit mode */
106         if (!(vcpu->arch.shared->msr & MSR_SF)) {
107                 crit_raw &= 0xffffffff;
108                 crit_r1 &= 0xffffffff;
109         }
110
111         /* Critical section when crit == r1 */
112         crit = (crit_raw == crit_r1);
113         /* ... and we're in supervisor mode */
114         crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
115
116         return crit;
117 }
118
119 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
120 {
121         vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
122         vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
123         kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
124         vcpu->arch.mmu.reset_msr(vcpu);
125 }
126
127 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
128 {
129         unsigned int prio;
130
131         switch (vec) {
132         case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;         break;
133         case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;        break;
134         case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;         break;
135         case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;         break;
136         case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;         break;
137         case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;         break;
138         case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;             break;
139         case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;       break;
140         case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;            break;
141         case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;              break;
142         case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;           break;
143         case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;          break;
144         case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;              break;
145         case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;                break;
146         case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;              break;
147         case 0xf40: prio = BOOK3S_IRQPRIO_VSX;                  break;
148         default:    prio = BOOK3S_IRQPRIO_MAX;                  break;
149         }
150
151         return prio;
152 }
153
154 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
155                                           unsigned int vec)
156 {
157         unsigned long old_pending = vcpu->arch.pending_exceptions;
158
159         clear_bit(kvmppc_book3s_vec2irqprio(vec),
160                   &vcpu->arch.pending_exceptions);
161
162         kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
163                                   old_pending);
164 }
165
166 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
167 {
168         vcpu->stat.queue_intr++;
169
170         set_bit(kvmppc_book3s_vec2irqprio(vec),
171                 &vcpu->arch.pending_exceptions);
172 #ifdef EXIT_DEBUG
173         printk(KERN_INFO "Queueing interrupt %x\n", vec);
174 #endif
175 }
176 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
177
178 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
179 {
180         /* might as well deliver this straight away */
181         kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
182 }
183 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
184
185 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
186 {
187         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
188 }
189 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
190
191 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
192 {
193         return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
194 }
195 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
196
197 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
198 {
199         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
200 }
201 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
202
203 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
204                                 struct kvm_interrupt *irq)
205 {
206         unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
207
208         if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
209                 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
210
211         kvmppc_book3s_queue_irqprio(vcpu, vec);
212 }
213
214 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
215 {
216         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
217         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
218 }
219
220 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
221 {
222         int deliver = 1;
223         int vec = 0;
224         bool crit = kvmppc_critical_section(vcpu);
225
226         switch (priority) {
227         case BOOK3S_IRQPRIO_DECREMENTER:
228                 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
229                 vec = BOOK3S_INTERRUPT_DECREMENTER;
230                 break;
231         case BOOK3S_IRQPRIO_EXTERNAL:
232         case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
233                 deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
234                 vec = BOOK3S_INTERRUPT_EXTERNAL;
235                 break;
236         case BOOK3S_IRQPRIO_SYSTEM_RESET:
237                 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
238                 break;
239         case BOOK3S_IRQPRIO_MACHINE_CHECK:
240                 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
241                 break;
242         case BOOK3S_IRQPRIO_DATA_STORAGE:
243                 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
244                 break;
245         case BOOK3S_IRQPRIO_INST_STORAGE:
246                 vec = BOOK3S_INTERRUPT_INST_STORAGE;
247                 break;
248         case BOOK3S_IRQPRIO_DATA_SEGMENT:
249                 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
250                 break;
251         case BOOK3S_IRQPRIO_INST_SEGMENT:
252                 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
253                 break;
254         case BOOK3S_IRQPRIO_ALIGNMENT:
255                 vec = BOOK3S_INTERRUPT_ALIGNMENT;
256                 break;
257         case BOOK3S_IRQPRIO_PROGRAM:
258                 vec = BOOK3S_INTERRUPT_PROGRAM;
259                 break;
260         case BOOK3S_IRQPRIO_VSX:
261                 vec = BOOK3S_INTERRUPT_VSX;
262                 break;
263         case BOOK3S_IRQPRIO_ALTIVEC:
264                 vec = BOOK3S_INTERRUPT_ALTIVEC;
265                 break;
266         case BOOK3S_IRQPRIO_FP_UNAVAIL:
267                 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
268                 break;
269         case BOOK3S_IRQPRIO_SYSCALL:
270                 vec = BOOK3S_INTERRUPT_SYSCALL;
271                 break;
272         case BOOK3S_IRQPRIO_DEBUG:
273                 vec = BOOK3S_INTERRUPT_TRACE;
274                 break;
275         case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
276                 vec = BOOK3S_INTERRUPT_PERFMON;
277                 break;
278         default:
279                 deliver = 0;
280                 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
281                 break;
282         }
283
284 #if 0
285         printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
286 #endif
287
288         if (deliver)
289                 kvmppc_inject_interrupt(vcpu, vec, 0);
290
291         return deliver;
292 }
293
294 /*
295  * This function determines if an irqprio should be cleared once issued.
296  */
297 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
298 {
299         switch (priority) {
300                 case BOOK3S_IRQPRIO_DECREMENTER:
301                         /* DEC interrupts get cleared by mtdec */
302                         return false;
303                 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
304                         /* External interrupts get cleared by userspace */
305                         return false;
306         }
307
308         return true;
309 }
310
311 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
312 {
313         unsigned long *pending = &vcpu->arch.pending_exceptions;
314         unsigned long old_pending = vcpu->arch.pending_exceptions;
315         unsigned int priority;
316
317 #ifdef EXIT_DEBUG
318         if (vcpu->arch.pending_exceptions)
319                 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
320 #endif
321         priority = __ffs(*pending);
322         while (priority < BOOK3S_IRQPRIO_MAX) {
323                 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
324                     clear_irqprio(vcpu, priority)) {
325                         clear_bit(priority, &vcpu->arch.pending_exceptions);
326                         break;
327                 }
328
329                 priority = find_next_bit(pending,
330                                          BITS_PER_BYTE * sizeof(*pending),
331                                          priority + 1);
332         }
333
334         /* Tell the guest about our interrupt status */
335         kvmppc_update_int_pending(vcpu, *pending, old_pending);
336
337         return 0;
338 }
339 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
340
341 pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
342                         bool *writable)
343 {
344         ulong mp_pa = vcpu->arch.magic_page_pa;
345
346         if (!(vcpu->arch.shared->msr & MSR_SF))
347                 mp_pa = (uint32_t)mp_pa;
348
349         /* Magic page override */
350         if (unlikely(mp_pa) &&
351             unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
352                      ((mp_pa & PAGE_MASK) & KVM_PAM))) {
353                 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
354                 pfn_t pfn;
355
356                 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
357                 get_page(pfn_to_page(pfn));
358                 if (writable)
359                         *writable = true;
360                 return pfn;
361         }
362
363         return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
364 }
365 EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
366
367 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
368                         bool iswrite, struct kvmppc_pte *pte)
369 {
370         int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
371         int r;
372
373         if (relocated) {
374                 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
375         } else {
376                 pte->eaddr = eaddr;
377                 pte->raddr = eaddr & KVM_PAM;
378                 pte->vpage = VSID_REAL | eaddr >> 12;
379                 pte->may_read = true;
380                 pte->may_write = true;
381                 pte->may_execute = true;
382                 r = 0;
383         }
384
385         return r;
386 }
387
388 static hva_t kvmppc_bad_hva(void)
389 {
390         return PAGE_OFFSET;
391 }
392
393 static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
394                                bool read)
395 {
396         hva_t hpage;
397
398         if (read && !pte->may_read)
399                 goto err;
400
401         if (!read && !pte->may_write)
402                 goto err;
403
404         hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
405         if (kvm_is_error_hva(hpage))
406                 goto err;
407
408         return hpage | (pte->raddr & ~PAGE_MASK);
409 err:
410         return kvmppc_bad_hva();
411 }
412
413 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
414               bool data)
415 {
416         struct kvmppc_pte pte;
417
418         vcpu->stat.st++;
419
420         if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte))
421                 return -ENOENT;
422
423         *eaddr = pte.raddr;
424
425         if (!pte.may_write)
426                 return -EPERM;
427
428         if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
429                 return EMULATE_DO_MMIO;
430
431         return EMULATE_DONE;
432 }
433 EXPORT_SYMBOL_GPL(kvmppc_st);
434
435 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
436                       bool data)
437 {
438         struct kvmppc_pte pte;
439         hva_t hva = *eaddr;
440
441         vcpu->stat.ld++;
442
443         if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte))
444                 goto nopte;
445
446         *eaddr = pte.raddr;
447
448         hva = kvmppc_pte_to_hva(vcpu, &pte, true);
449         if (kvm_is_error_hva(hva))
450                 goto mmio;
451
452         if (copy_from_user(ptr, (void __user *)hva, size)) {
453                 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
454                 goto mmio;
455         }
456
457         return EMULATE_DONE;
458
459 nopte:
460         return -ENOENT;
461 mmio:
462         return EMULATE_DO_MMIO;
463 }
464 EXPORT_SYMBOL_GPL(kvmppc_ld);
465
466 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
467 {
468         return 0;
469 }
470
471 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
472 {
473         return 0;
474 }
475
476 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
477 {
478 }
479
480 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
481                                   struct kvm_sregs *sregs)
482 {
483         return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
484 }
485
486 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
487                                   struct kvm_sregs *sregs)
488 {
489         return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
490 }
491
492 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
493 {
494         int i;
495
496         regs->pc = kvmppc_get_pc(vcpu);
497         regs->cr = kvmppc_get_cr(vcpu);
498         regs->ctr = kvmppc_get_ctr(vcpu);
499         regs->lr = kvmppc_get_lr(vcpu);
500         regs->xer = kvmppc_get_xer(vcpu);
501         regs->msr = vcpu->arch.shared->msr;
502         regs->srr0 = vcpu->arch.shared->srr0;
503         regs->srr1 = vcpu->arch.shared->srr1;
504         regs->pid = vcpu->arch.pid;
505         regs->sprg0 = vcpu->arch.shared->sprg0;
506         regs->sprg1 = vcpu->arch.shared->sprg1;
507         regs->sprg2 = vcpu->arch.shared->sprg2;
508         regs->sprg3 = vcpu->arch.shared->sprg3;
509         regs->sprg4 = vcpu->arch.shared->sprg4;
510         regs->sprg5 = vcpu->arch.shared->sprg5;
511         regs->sprg6 = vcpu->arch.shared->sprg6;
512         regs->sprg7 = vcpu->arch.shared->sprg7;
513
514         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
515                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
516
517         return 0;
518 }
519
520 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
521 {
522         int i;
523
524         kvmppc_set_pc(vcpu, regs->pc);
525         kvmppc_set_cr(vcpu, regs->cr);
526         kvmppc_set_ctr(vcpu, regs->ctr);
527         kvmppc_set_lr(vcpu, regs->lr);
528         kvmppc_set_xer(vcpu, regs->xer);
529         kvmppc_set_msr(vcpu, regs->msr);
530         vcpu->arch.shared->srr0 = regs->srr0;
531         vcpu->arch.shared->srr1 = regs->srr1;
532         vcpu->arch.shared->sprg0 = regs->sprg0;
533         vcpu->arch.shared->sprg1 = regs->sprg1;
534         vcpu->arch.shared->sprg2 = regs->sprg2;
535         vcpu->arch.shared->sprg3 = regs->sprg3;
536         vcpu->arch.shared->sprg4 = regs->sprg4;
537         vcpu->arch.shared->sprg5 = regs->sprg5;
538         vcpu->arch.shared->sprg6 = regs->sprg6;
539         vcpu->arch.shared->sprg7 = regs->sprg7;
540
541         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
542                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
543
544         return 0;
545 }
546
547 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
548 {
549         return -ENOTSUPP;
550 }
551
552 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
553 {
554         return -ENOTSUPP;
555 }
556
557 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
558 {
559         int r;
560         union kvmppc_one_reg val;
561         int size;
562         long int i;
563
564         size = one_reg_size(reg->id);
565         if (size > sizeof(val))
566                 return -EINVAL;
567
568         r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
569         if (r == -EINVAL) {
570                 r = 0;
571                 switch (reg->id) {
572                 case KVM_REG_PPC_DAR:
573                         val = get_reg_val(reg->id, vcpu->arch.shared->dar);
574                         break;
575                 case KVM_REG_PPC_DSISR:
576                         val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
577                         break;
578                 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
579                         i = reg->id - KVM_REG_PPC_FPR0;
580                         val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
581                         break;
582                 case KVM_REG_PPC_FPSCR:
583                         val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
584                         break;
585 #ifdef CONFIG_ALTIVEC
586                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
587                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
588                                 r = -ENXIO;
589                                 break;
590                         }
591                         val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
592                         break;
593                 case KVM_REG_PPC_VSCR:
594                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
595                                 r = -ENXIO;
596                                 break;
597                         }
598                         val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
599                         break;
600                 case KVM_REG_PPC_VRSAVE:
601                         val = get_reg_val(reg->id, vcpu->arch.vrsave);
602                         break;
603 #endif /* CONFIG_ALTIVEC */
604 #ifdef CONFIG_VSX
605                 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
606                         if (cpu_has_feature(CPU_FTR_VSX)) {
607                                 long int i = reg->id - KVM_REG_PPC_VSR0;
608                                 val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
609                                 val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
610                         } else {
611                                 r = -ENXIO;
612                         }
613                         break;
614 #endif /* CONFIG_VSX */
615                 case KVM_REG_PPC_DEBUG_INST: {
616                         u32 opcode = INS_TW;
617                         r = copy_to_user((u32 __user *)(long)reg->addr,
618                                          &opcode, sizeof(u32));
619                         break;
620                 }
621 #ifdef CONFIG_KVM_XICS
622                 case KVM_REG_PPC_ICP_STATE:
623                         if (!vcpu->arch.icp) {
624                                 r = -ENXIO;
625                                 break;
626                         }
627                         val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
628                         break;
629 #endif /* CONFIG_KVM_XICS */
630                 default:
631                         r = -EINVAL;
632                         break;
633                 }
634         }
635         if (r)
636                 return r;
637
638         if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
639                 r = -EFAULT;
640
641         return r;
642 }
643
644 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
645 {
646         int r;
647         union kvmppc_one_reg val;
648         int size;
649         long int i;
650
651         size = one_reg_size(reg->id);
652         if (size > sizeof(val))
653                 return -EINVAL;
654
655         if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
656                 return -EFAULT;
657
658         r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
659         if (r == -EINVAL) {
660                 r = 0;
661                 switch (reg->id) {
662                 case KVM_REG_PPC_DAR:
663                         vcpu->arch.shared->dar = set_reg_val(reg->id, val);
664                         break;
665                 case KVM_REG_PPC_DSISR:
666                         vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
667                         break;
668                 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
669                         i = reg->id - KVM_REG_PPC_FPR0;
670                         VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
671                         break;
672                 case KVM_REG_PPC_FPSCR:
673                         vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
674                         break;
675 #ifdef CONFIG_ALTIVEC
676                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
677                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
678                                 r = -ENXIO;
679                                 break;
680                         }
681                         vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
682                         break;
683                 case KVM_REG_PPC_VSCR:
684                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
685                                 r = -ENXIO;
686                                 break;
687                         }
688                         vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
689                         break;
690                 case KVM_REG_PPC_VRSAVE:
691                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
692                                 r = -ENXIO;
693                                 break;
694                         }
695                         vcpu->arch.vrsave = set_reg_val(reg->id, val);
696                         break;
697 #endif /* CONFIG_ALTIVEC */
698 #ifdef CONFIG_VSX
699                 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
700                         if (cpu_has_feature(CPU_FTR_VSX)) {
701                                 long int i = reg->id - KVM_REG_PPC_VSR0;
702                                 vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
703                                 vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
704                         } else {
705                                 r = -ENXIO;
706                         }
707                         break;
708 #endif /* CONFIG_VSX */
709 #ifdef CONFIG_KVM_XICS
710                 case KVM_REG_PPC_ICP_STATE:
711                         if (!vcpu->arch.icp) {
712                                 r = -ENXIO;
713                                 break;
714                         }
715                         r = kvmppc_xics_set_icp(vcpu,
716                                                 set_reg_val(reg->id, val));
717                         break;
718 #endif /* CONFIG_KVM_XICS */
719                 default:
720                         r = -EINVAL;
721                         break;
722                 }
723         }
724
725         return r;
726 }
727
728 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
729 {
730         vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
731 }
732
733 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
734 {
735         vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
736 }
737
738 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
739 {
740         vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
741 }
742 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
743
744 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
745 {
746         return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
747 }
748
749 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
750                                   struct kvm_translation *tr)
751 {
752         return 0;
753 }
754
755 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
756                                         struct kvm_guest_debug *dbg)
757 {
758         return -EINVAL;
759 }
760
761 void kvmppc_decrementer_func(unsigned long data)
762 {
763         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
764
765         kvmppc_core_queue_dec(vcpu);
766         kvm_vcpu_kick(vcpu);
767 }
768
769 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
770 {
771         return kvm->arch.kvm_ops->vcpu_create(kvm, id);
772 }
773
774 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
775 {
776         vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
777 }
778
779 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
780 {
781         return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
782 }
783
784 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
785 {
786         return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
787 }
788
789 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
790                               struct kvm_memory_slot *dont)
791 {
792         kvm->arch.kvm_ops->free_memslot(free, dont);
793 }
794
795 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
796                                unsigned long npages)
797 {
798         return kvm->arch.kvm_ops->create_memslot(slot, npages);
799 }
800
801 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
802 {
803         kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
804 }
805
806 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
807                                 struct kvm_memory_slot *memslot,
808                                 struct kvm_userspace_memory_region *mem)
809 {
810         return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
811 }
812
813 void kvmppc_core_commit_memory_region(struct kvm *kvm,
814                                 struct kvm_userspace_memory_region *mem,
815                                 const struct kvm_memory_slot *old)
816 {
817         kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
818 }
819
820 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
821 {
822         return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
823 }
824 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
825
826 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
827 {
828         return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
829 }
830
831 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
832 {
833         return kvm->arch.kvm_ops->age_hva(kvm, hva);
834 }
835
836 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
837 {
838         return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
839 }
840
841 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
842 {
843         kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
844 }
845
846 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
847 {
848         vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
849 }
850
851 int kvmppc_core_init_vm(struct kvm *kvm)
852 {
853
854 #ifdef CONFIG_PPC64
855         INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
856         INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
857 #endif
858
859         return kvm->arch.kvm_ops->init_vm(kvm);
860 }
861
862 void kvmppc_core_destroy_vm(struct kvm *kvm)
863 {
864         kvm->arch.kvm_ops->destroy_vm(kvm);
865
866 #ifdef CONFIG_PPC64
867         kvmppc_rtas_tokens_free(kvm);
868         WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
869 #endif
870 }
871
872 int kvmppc_core_check_processor_compat(void)
873 {
874         /*
875          * We always return 0 for book3s. We check
876          * for compatability while loading the HV
877          * or PR module
878          */
879         return 0;
880 }
881
882 static int kvmppc_book3s_init(void)
883 {
884         int r;
885
886         r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
887         if (r)
888                 return r;
889 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
890         r = kvmppc_book3s_init_pr();
891 #endif
892         return r;
893
894 }
895
896 static void kvmppc_book3s_exit(void)
897 {
898 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
899         kvmppc_book3s_exit_pr();
900 #endif
901         kvm_exit();
902 }
903
904 module_init(kvmppc_book3s_init);
905 module_exit(kvmppc_book3s_exit);
906
907 /* On 32bit this is our one and only kernel module */
908 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
909 MODULE_ALIAS_MISCDEV(KVM_MINOR);
910 MODULE_ALIAS("devname:kvm");
911 #endif