2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
32 #ifdef __LITTLE_ENDIAN__
33 #error Need to fix lppaca and SLB shadow accesses in little endian mode
36 /* Values in HSTATE_NAPPING(r13) */
37 #define NAPPING_CEDE 1
38 #define NAPPING_NOVCPU 2
41 * Call kvmppc_hv_entry in real mode.
42 * Must be called with interrupts hard-disabled.
46 * LR = return address to continue at after eventually re-enabling MMU
48 _GLOBAL(kvmppc_hv_entry_trampoline)
50 std r0, PPC_LR_STKOFF(r1)
53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
58 mtmsrd r0,1 /* clear RI in MSR */
64 ld r4, HSTATE_KVM_VCPU(r13)
67 /* Back from guest - restore host state and return to caller */
70 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
75 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
81 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
85 beq 23f /* skip if not */
86 lwz r3, HSTATE_PMC(r13)
87 lwz r4, HSTATE_PMC + 4(r13)
88 lwz r5, HSTATE_PMC + 8(r13)
89 lwz r6, HSTATE_PMC + 12(r13)
90 lwz r8, HSTATE_PMC + 16(r13)
91 lwz r9, HSTATE_PMC + 20(r13)
93 lwz r10, HSTATE_PMC + 24(r13)
94 lwz r11, HSTATE_PMC + 28(r13)
95 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
105 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
106 ld r3, HSTATE_MMCR(r13)
107 ld r4, HSTATE_MMCR + 8(r13)
108 ld r5, HSTATE_MMCR + 16(r13)
116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
119 ld r3, HSTATE_DECEXP(r13)
125 * For external and machine check interrupts, we need
126 * to call the Linux handler to process the interrupt.
127 * We do that by jumping to absolute address 0x500 for
128 * external interrupts, or the machine_check_fwnmi label
129 * for machine checks (since firmware might have patched
130 * the vector area at 0x200). The [h]rfid at the end of the
131 * handler will return to the book3s_hv_interrupts.S code.
132 * For other interrupts we do the rfid to get back
133 * to the book3s_hv_interrupts.S code here.
135 ld r8, 112+PPC_LR_STKOFF(r1)
137 ld r7, HSTATE_HOST_MSR(r13)
139 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
140 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
143 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
145 /* RFI into the highmem handler, or branch to interrupt handler */
149 mtmsrd r6, 1 /* Clear RI in MSR */
152 beqa 0x500 /* external interrupt (PPC970) */
153 beq cr1, 13f /* machine check */
156 /* On POWER7, we have external interrupts set to use HSRR0/1 */
157 11: mtspr SPRN_HSRR0, r8
161 13: b machine_check_fwnmi
164 kvmppc_primary_no_guest:
165 /* We handle this much like a ceded vcpu */
166 /* set our bit in napping_threads */
167 ld r5, HSTATE_KVM_VCORE(r13)
168 lbz r7, HSTATE_PTID(r13)
171 addi r6, r5, VCORE_NAPPING_THREADS
176 /* order napping_threads update vs testing entry_exit_count */
179 lwz r7, VCORE_ENTRY_EXIT(r5)
181 bge kvm_novcpu_exit /* another thread already exiting */
182 li r3, NAPPING_NOVCPU
183 stb r3, HSTATE_NAPPING(r13)
185 stb r3, HSTATE_HWTHREAD_REQ(r13)
190 ld r1, HSTATE_HOST_R1(r13)
191 ld r5, HSTATE_KVM_VCORE(r13)
193 stb r0, HSTATE_NAPPING(r13)
194 stb r0, HSTATE_HWTHREAD_REQ(r13)
196 /* check the wake reason */
197 bl kvmppc_check_wake_reason
199 /* see if any other thread is already exiting */
200 lwz r0, VCORE_ENTRY_EXIT(r5)
204 /* clear our bit in napping_threads */
205 lbz r7, HSTATE_PTID(r13)
208 addi r6, r5, VCORE_NAPPING_THREADS
214 /* See if the wake reason means we need to exit */
218 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
219 ld r4, HSTATE_KVM_VCPU(r13)
227 * We come in here when wakened from nap mode.
228 * Relocation is off and most register values are lost.
229 * r13 points to the PACA.
231 .globl kvm_start_guest
235 li r0,KVM_HWTHREAD_IN_KVM
236 stb r0,HSTATE_HWTHREAD_STATE(r13)
238 /* NV GPR values from power7_idle() will no longer be valid */
240 stb r0,PACA_NAPSTATELOST(r13)
242 /* were we napping due to cede? */
243 lbz r0,HSTATE_NAPPING(r13)
244 cmpwi r0,NAPPING_CEDE
246 cmpwi r0,NAPPING_NOVCPU
247 beq kvm_novcpu_wakeup
249 ld r1,PACAEMERGSP(r13)
250 subi r1,r1,STACK_FRAME_OVERHEAD
253 * We weren't napping due to cede, so this must be a secondary
254 * thread being woken up to run a guest, or being woken up due
255 * to a stray IPI. (Or due to some machine check or hypervisor
256 * maintenance interrupt while the core is in KVM.)
259 /* Check the wake reason in SRR1 to see why we got here */
260 bl kvmppc_check_wake_reason
264 /* get vcpu pointer, NULL if we have no vcpu to run */
265 ld r4,HSTATE_KVM_VCPU(r13)
267 /* if we have no vcpu to run, go back to sleep */
270 /* Set HSTATE_DSCR(r13) to something sensible */
271 LOAD_REG_ADDR(r6, dscr_default)
273 std r6, HSTATE_DSCR(r13)
277 /* Back from the guest, go back to nap */
278 /* Clear our vcpu pointer so we don't come back in early */
280 std r0, HSTATE_KVM_VCPU(r13)
283 /* increment the nap count and then go to nap mode */
284 ld r4, HSTATE_KVM_VCORE(r13)
285 addi r4, r4, VCORE_NAP_COUNT
286 lwsync /* make previous updates visible */
293 li r0, KVM_HWTHREAD_IN_NAP
294 stb r0, HSTATE_HWTHREAD_STATE(r13)
298 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
301 std r0, HSTATE_SCRATCH0(r13)
303 ld r0, HSTATE_SCRATCH0(r13)
309 /******************************************************************************
313 *****************************************************************************/
315 .global kvmppc_hv_entry
320 * R4 = vcpu pointer (or NULL)
324 * all other volatile GPRS = free
327 std r0, PPC_LR_STKOFF(r1)
330 /* Save R1 in the PACA */
331 std r1, HSTATE_HOST_R1(r13)
333 li r6, KVM_GUEST_MODE_HOST_HV
334 stb r6, HSTATE_IN_GUEST(r13)
344 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
346 * POWER7 host -> guest partition switch code.
347 * We don't have to lock against concurrent tlbies,
348 * but we do have to coordinate across hardware threads.
350 /* Increment entry count iff exit count is zero. */
351 ld r5,HSTATE_KVM_VCORE(r13)
352 addi r9,r5,VCORE_ENTRY_EXIT
354 cmpwi r3,0x100 /* any threads starting to exit? */
355 bge secondary_too_late /* if so we're too late to the party */
360 /* Primary thread switches to guest partition. */
361 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
362 lbz r6,HSTATE_PTID(r13)
367 li r0,LPID_RSVD /* switch to reserved LPID */
370 mtspr SPRN_SDR1,r6 /* switch to partition page table */
374 /* See if we need to flush the TLB */
375 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
376 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
377 srdi r6,r6,6 /* doubleword number */
378 sldi r6,r6,3 /* address offset */
380 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
386 23: ldarx r7,0,r6 /* if set, clear the bit */
390 /* Flush the TLB of any entries for this LPID */
391 /* use arch 2.07S as a proxy for POWER8 */
393 li r6,512 /* POWER8 has 512 sets */
395 li r6,128 /* POWER7 has 128 sets */
396 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
398 li r7,0x800 /* IS field = 0b10 */
405 /* Add timebase offset onto timebase */
406 22: ld r8,VCORE_TB_OFFSET(r5)
409 mftb r6 /* current host timebase */
411 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
412 mftb r7 /* check if lower 24 bits overflowed */
417 addis r8,r8,0x100 /* if so, increment upper 40 bits */
420 /* Load guest PCR value to select appropriate compat mode */
421 37: ld r7, VCORE_PCR(r5)
428 /* DPDES is shared between threads */
429 ld r8, VCORE_DPDES(r5)
431 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
434 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
437 /* Secondary threads wait for primary to have done partition switch */
438 20: lbz r0,VCORE_IN_GUEST(r5)
442 /* Set LPCR and RMOR. */
443 10: ld r8,VCORE_LPCR(r5)
449 /* Check if HDEC expires soon */
451 cmpwi r3,512 /* 1 microsecond */
452 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
457 * PPC970 host -> guest partition switch code.
458 * We have to lock against concurrent tlbies,
459 * using native_tlbie_lock to lock against host tlbies
460 * and kvm->arch.tlbie_lock to lock against guest tlbies.
461 * We also have to invalidate the TLB since its
462 * entries aren't tagged with the LPID.
464 30: ld r5,HSTATE_KVM_VCORE(r13)
465 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
467 /* first take native_tlbie_lock */
470 .tc native_tlbie_lock[TC],native_tlbie_lock
472 ld r3,toc_tlbie_lock@toc(2)
473 #ifdef __BIG_ENDIAN__
474 lwz r8,PACA_LOCK_TOKEN(r13)
476 lwz r8,PACAPACAINDEX(r13)
485 ld r5,HSTATE_KVM_VCORE(r13)
486 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
488 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
492 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
495 stw r0,0(r3) /* drop native_tlbie_lock */
497 /* invalidate the whole TLB */
506 /* Take the guest's tlbie_lock */
507 addi r3,r9,KVM_TLBIE_LOCK
515 mtspr SPRN_SDR1,r6 /* switch to partition page table */
517 /* Set up HID4 with the guest's LPID etc. */
522 /* drop the guest's tlbie_lock */
526 /* Check if HDEC expires soon */
529 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
532 /* Enable HDEC interrupts */
535 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
545 /* Do we have a guest vcpu to run? */
547 beq kvmppc_primary_no_guest
550 /* Load up guest SLB entries */
551 lwz r5,VCPU_SLB_MAX(r4)
556 1: ld r8,VCPU_SLB_E(r6)
559 addi r6,r6,VCPU_SLB_SIZE
562 /* Increment yield count if they have a VPA */
566 lwz r5, LPPACA_YIELDCOUNT(r3)
568 stw r5, LPPACA_YIELDCOUNT(r3)
570 stb r6, VCPU_VPA_DIRTY(r4)
574 /* Save purr/spurr */
577 std r5,HSTATE_PURR(r13)
578 std r6,HSTATE_SPURR(r13)
583 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
586 /* Set partition DABR */
587 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
592 BEGIN_FTR_SECTION_NESTED(89)
594 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
595 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
597 /* Load guest PMU registers */
598 /* R4 is live here (vcpu pointer) */
600 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
601 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
603 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
604 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
605 lwz r6, VCPU_PMC + 8(r4)
606 lwz r7, VCPU_PMC + 12(r4)
607 lwz r8, VCPU_PMC + 16(r4)
608 lwz r9, VCPU_PMC + 20(r4)
610 lwz r10, VCPU_PMC + 24(r4)
611 lwz r11, VCPU_PMC + 28(r4)
612 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
622 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
624 ld r5, VCPU_MMCR + 8(r4)
625 ld r6, VCPU_MMCR + 16(r4)
633 ld r5, VCPU_MMCR + 24(r4)
635 lwz r7, VCPU_PMC + 24(r4)
636 lwz r8, VCPU_PMC + 28(r4)
637 ld r9, VCPU_MMCR + 32(r4)
643 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
647 /* Load up FP, VMX and VSX registers */
650 ld r14, VCPU_GPR(R14)(r4)
651 ld r15, VCPU_GPR(R15)(r4)
652 ld r16, VCPU_GPR(R16)(r4)
653 ld r17, VCPU_GPR(R17)(r4)
654 ld r18, VCPU_GPR(R18)(r4)
655 ld r19, VCPU_GPR(R19)(r4)
656 ld r20, VCPU_GPR(R20)(r4)
657 ld r21, VCPU_GPR(R21)(r4)
658 ld r22, VCPU_GPR(R22)(r4)
659 ld r23, VCPU_GPR(R23)(r4)
660 ld r24, VCPU_GPR(R24)(r4)
661 ld r25, VCPU_GPR(R25)(r4)
662 ld r26, VCPU_GPR(R26)(r4)
663 ld r27, VCPU_GPR(R27)(r4)
664 ld r28, VCPU_GPR(R28)(r4)
665 ld r29, VCPU_GPR(R29)(r4)
666 ld r30, VCPU_GPR(R30)(r4)
667 ld r31, VCPU_GPR(R31)(r4)
670 /* Switch DSCR to guest value */
673 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
676 /* Skip next section on POWER7 or PPC970 */
678 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
679 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
682 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
685 /* Load up POWER8-specific registers */
687 lwz r6, VCPU_PSPB(r4)
693 ld r6, VCPU_DAWRX(r4)
694 ld r7, VCPU_CIABR(r4)
704 ld r5, VCPU_TFHAR(r4)
705 ld r6, VCPU_TFIAR(r4)
706 ld r7, VCPU_TEXASR(r4)
707 ld r8, VCPU_EBBHR(r4)
710 mtspr SPRN_TEXASR, r7
712 ld r5, VCPU_EBBRR(r4)
713 ld r6, VCPU_BESCR(r4)
714 ld r7, VCPU_CSIGR(r4)
720 ld r5, VCPU_TCSCR(r4)
722 lwz r7, VCPU_GUEST_PID(r4)
731 * Set the decrementer to the guest decrementer.
733 ld r8,VCPU_DEC_EXPIRES(r4)
739 ld r5, VCPU_SPRG0(r4)
740 ld r6, VCPU_SPRG1(r4)
741 ld r7, VCPU_SPRG2(r4)
742 ld r8, VCPU_SPRG3(r4)
748 /* Load up DAR and DSISR */
750 lwz r6, VCPU_DSISR(r4)
755 /* Restore AMR and UAMOR, set AMOR to all 1s */
762 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
764 /* Restore state of CTRL run bit; assume 1 on entry */
778 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
786 deliver_guest_interrupt:
787 /* r11 = vcpu->arch.msr & ~MSR_HV */
788 rldicl r11, r11, 63 - MSR_HV_LG, 1
789 rotldi r11, r11, 1 + MSR_HV_LG
792 /* Check if we can deliver an external or decrementer interrupt now */
793 ld r0, VCPU_PENDING_EXC(r4)
794 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
796 andi. r8, r11, MSR_EE
799 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
800 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
803 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
805 li r0, BOOK3S_INTERRUPT_EXTERNAL
809 li r0, BOOK3S_INTERRUPT_DECREMENTER
812 12: mtspr SPRN_SRR0, r10
815 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
822 * R10: value for HSRR0
823 * R11: value for HSRR1
828 stb r0,VCPU_CEDED(r4) /* cancel cede */
832 /* Activate guest mode, so faults get handled by KVM */
833 li r9, KVM_GUEST_MODE_GUEST_HV
834 stb r9, HSTATE_IN_GUEST(r13)
841 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
844 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
851 ld r1, VCPU_GPR(R1)(r4)
852 ld r2, VCPU_GPR(R2)(r4)
853 ld r3, VCPU_GPR(R3)(r4)
854 ld r5, VCPU_GPR(R5)(r4)
855 ld r6, VCPU_GPR(R6)(r4)
856 ld r7, VCPU_GPR(R7)(r4)
857 ld r8, VCPU_GPR(R8)(r4)
858 ld r9, VCPU_GPR(R9)(r4)
859 ld r10, VCPU_GPR(R10)(r4)
860 ld r11, VCPU_GPR(R11)(r4)
861 ld r12, VCPU_GPR(R12)(r4)
862 ld r13, VCPU_GPR(R13)(r4)
866 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
867 ld r0, VCPU_GPR(R0)(r4)
868 ld r4, VCPU_GPR(R4)(r4)
873 /******************************************************************************
877 *****************************************************************************/
880 * We come here from the first-level interrupt handlers.
882 .globl kvmppc_interrupt_hv
886 * R12 = interrupt vector
888 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
889 * guest R13 saved in SPRN_SCRATCH0
891 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
892 std r9, HSTATE_HOST_R2(r13)
894 lbz r9, HSTATE_IN_GUEST(r13)
895 cmpwi r9, KVM_GUEST_MODE_HOST_HV
896 beq kvmppc_bad_host_intr
897 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
898 cmpwi r9, KVM_GUEST_MODE_GUEST
899 ld r9, HSTATE_HOST_R2(r13)
900 beq kvmppc_interrupt_pr
902 /* We're now back in the host but in guest MMU context */
903 li r9, KVM_GUEST_MODE_HOST_HV
904 stb r9, HSTATE_IN_GUEST(r13)
906 ld r9, HSTATE_KVM_VCPU(r13)
910 std r0, VCPU_GPR(R0)(r9)
911 std r1, VCPU_GPR(R1)(r9)
912 std r2, VCPU_GPR(R2)(r9)
913 std r3, VCPU_GPR(R3)(r9)
914 std r4, VCPU_GPR(R4)(r9)
915 std r5, VCPU_GPR(R5)(r9)
916 std r6, VCPU_GPR(R6)(r9)
917 std r7, VCPU_GPR(R7)(r9)
918 std r8, VCPU_GPR(R8)(r9)
919 ld r0, HSTATE_HOST_R2(r13)
920 std r0, VCPU_GPR(R9)(r9)
921 std r10, VCPU_GPR(R10)(r9)
922 std r11, VCPU_GPR(R11)(r9)
923 ld r3, HSTATE_SCRATCH0(r13)
924 lwz r4, HSTATE_SCRATCH1(r13)
925 std r3, VCPU_GPR(R12)(r9)
928 ld r3, HSTATE_CFAR(r13)
929 std r3, VCPU_CFAR(r9)
930 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
932 ld r4, HSTATE_PPR(r13)
934 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
936 /* Restore R1/R2 so we can handle faults */
937 ld r1, HSTATE_HOST_R1(r13)
942 std r10, VCPU_SRR0(r9)
943 std r11, VCPU_SRR1(r9)
944 andi. r0, r12, 2 /* need to read HSRR0/1? */
946 mfspr r10, SPRN_HSRR0
947 mfspr r11, SPRN_HSRR1
949 1: std r10, VCPU_PC(r9)
950 std r11, VCPU_MSR(r9)
954 std r3, VCPU_GPR(R13)(r9)
957 stw r12,VCPU_TRAP(r9)
959 /* Save HEIR (HV emulation assist reg) in last_inst
960 if this is an HEI (HV emulation interrupt, e40) */
961 li r3,KVM_INST_FETCH_FAILED
963 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
966 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
967 11: stw r3,VCPU_LAST_INST(r9)
969 /* these are volatile across C function calls */
976 /* If this is a page table miss then see if it's theirs or ours */
977 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
979 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
981 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
983 /* See if this is a leftover HDEC interrupt */
984 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
990 /* See if this is an hcall we can handle in real mode */
991 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
992 beq hcall_try_real_mode
994 /* Only handle external interrupts here on arch 206 and later */
996 b ext_interrupt_to_host
997 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
999 /* External interrupt ? */
1000 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1001 bne+ ext_interrupt_to_host
1003 /* External interrupt, first check for host_ipi. If this is
1004 * set, we know the host wants us out so let's do it now
1008 bgt ext_interrupt_to_host
1010 /* Check if any CPU is heading out to the host, if so head out too */
1011 ld r5, HSTATE_KVM_VCORE(r13)
1012 lwz r0, VCORE_ENTRY_EXIT(r5)
1014 bge ext_interrupt_to_host
1016 /* Return to guest after delivering any pending interrupt */
1018 b deliver_guest_interrupt
1020 ext_interrupt_to_host:
1022 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1023 /* Save more register state */
1026 std r6, VCPU_DAR(r9)
1027 stw r7, VCPU_DSISR(r9)
1029 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1030 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1032 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1033 std r6, VCPU_FAULT_DAR(r9)
1034 stw r7, VCPU_FAULT_DSISR(r9)
1036 /* See if it is a machine check */
1037 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1038 beq machine_check_realmode
1041 /* Save guest CTRL register, set runlatch to 1 */
1042 6: mfspr r6,SPRN_CTRLF
1043 stw r6,VCPU_CTRL(r9)
1049 /* Read the guest SLB and save it away */
1050 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1056 andis. r0,r8,SLB_ESID_V@h
1058 add r8,r8,r6 /* put index in */
1060 std r8,VCPU_SLB_E(r7)
1061 std r3,VCPU_SLB_V(r7)
1062 addi r7,r7,VCPU_SLB_SIZE
1066 stw r5,VCPU_SLB_MAX(r9)
1069 * Save the guest PURR/SPURR
1075 ld r8,VCPU_SPURR(r9)
1076 std r5,VCPU_PURR(r9)
1077 std r6,VCPU_SPURR(r9)
1082 * Restore host PURR/SPURR and add guest times
1083 * so that the time in the guest gets accounted.
1085 ld r3,HSTATE_PURR(r13)
1086 ld r4,HSTATE_SPURR(r13)
1091 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1098 std r5,VCPU_DEC_EXPIRES(r9)
1102 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1103 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1106 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1109 /* Save POWER8-specific registers */
1113 std r5, VCPU_IAMR(r9)
1114 stw r6, VCPU_PSPB(r9)
1115 std r7, VCPU_FSCR(r9)
1120 std r6, VCPU_VTB(r9)
1121 std r7, VCPU_TAR(r9)
1122 mfspr r5, SPRN_TFHAR
1123 mfspr r6, SPRN_TFIAR
1124 mfspr r7, SPRN_TEXASR
1125 mfspr r8, SPRN_EBBHR
1126 std r5, VCPU_TFHAR(r9)
1127 std r6, VCPU_TFIAR(r9)
1128 std r7, VCPU_TEXASR(r9)
1129 std r8, VCPU_EBBHR(r9)
1130 mfspr r5, SPRN_EBBRR
1131 mfspr r6, SPRN_BESCR
1132 mfspr r7, SPRN_CSIGR
1134 std r5, VCPU_EBBRR(r9)
1135 std r6, VCPU_BESCR(r9)
1136 std r7, VCPU_CSIGR(r9)
1137 std r8, VCPU_TACR(r9)
1138 mfspr r5, SPRN_TCSCR
1142 std r5, VCPU_TCSCR(r9)
1143 std r6, VCPU_ACOP(r9)
1144 stw r7, VCPU_GUEST_PID(r9)
1145 std r8, VCPU_WORT(r9)
1148 /* Save and reset AMR and UAMOR before turning on the MMU */
1153 std r6,VCPU_UAMOR(r9)
1156 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1158 /* Switch DSCR back to host value */
1161 ld r7, HSTATE_DSCR(r13)
1162 std r8, VCPU_DSCR(r9)
1164 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1166 /* Save non-volatile GPRs */
1167 std r14, VCPU_GPR(R14)(r9)
1168 std r15, VCPU_GPR(R15)(r9)
1169 std r16, VCPU_GPR(R16)(r9)
1170 std r17, VCPU_GPR(R17)(r9)
1171 std r18, VCPU_GPR(R18)(r9)
1172 std r19, VCPU_GPR(R19)(r9)
1173 std r20, VCPU_GPR(R20)(r9)
1174 std r21, VCPU_GPR(R21)(r9)
1175 std r22, VCPU_GPR(R22)(r9)
1176 std r23, VCPU_GPR(R23)(r9)
1177 std r24, VCPU_GPR(R24)(r9)
1178 std r25, VCPU_GPR(R25)(r9)
1179 std r26, VCPU_GPR(R26)(r9)
1180 std r27, VCPU_GPR(R27)(r9)
1181 std r28, VCPU_GPR(R28)(r9)
1182 std r29, VCPU_GPR(R29)(r9)
1183 std r30, VCPU_GPR(R30)(r9)
1184 std r31, VCPU_GPR(R31)(r9)
1187 mfspr r3, SPRN_SPRG0
1188 mfspr r4, SPRN_SPRG1
1189 mfspr r5, SPRN_SPRG2
1190 mfspr r6, SPRN_SPRG3
1191 std r3, VCPU_SPRG0(r9)
1192 std r4, VCPU_SPRG1(r9)
1193 std r5, VCPU_SPRG2(r9)
1194 std r6, VCPU_SPRG3(r9)
1200 /* Increment yield count if they have a VPA */
1201 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1204 lwz r3, LPPACA_YIELDCOUNT(r8)
1206 stw r3, LPPACA_YIELDCOUNT(r8)
1208 stb r3, VCPU_VPA_DIRTY(r9)
1210 /* Save PMU registers if requested */
1211 /* r8 and cr0.eq are live here */
1213 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1214 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1215 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1216 mfspr r6, SPRN_MMCRA
1218 /* On P7, clear MMCRA in order to disable SDAR updates */
1220 mtspr SPRN_MMCRA, r7
1221 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1223 beq 21f /* if no VPA, save PMU stuff anyway */
1224 lbz r7, LPPACA_PMCINUSE(r8)
1225 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1227 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1229 21: mfspr r5, SPRN_MMCR1
1232 std r4, VCPU_MMCR(r9)
1233 std r5, VCPU_MMCR + 8(r9)
1234 std r6, VCPU_MMCR + 16(r9)
1235 std r7, VCPU_SIAR(r9)
1236 std r8, VCPU_SDAR(r9)
1244 mfspr r10, SPRN_PMC7
1245 mfspr r11, SPRN_PMC8
1246 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1247 stw r3, VCPU_PMC(r9)
1248 stw r4, VCPU_PMC + 4(r9)
1249 stw r5, VCPU_PMC + 8(r9)
1250 stw r6, VCPU_PMC + 12(r9)
1251 stw r7, VCPU_PMC + 16(r9)
1252 stw r8, VCPU_PMC + 20(r9)
1254 stw r10, VCPU_PMC + 24(r9)
1255 stw r11, VCPU_PMC + 28(r9)
1256 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1258 mfspr r4, SPRN_MMCR2
1260 mfspr r6, SPRN_SPMC1
1261 mfspr r7, SPRN_SPMC2
1262 mfspr r8, SPRN_MMCRS
1263 std r4, VCPU_MMCR + 24(r9)
1264 std r5, VCPU_SIER(r9)
1265 stw r6, VCPU_PMC + 24(r9)
1266 stw r7, VCPU_PMC + 28(r9)
1267 std r8, VCPU_MMCR + 32(r9)
1269 mtspr SPRN_MMCRS, r4
1270 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1278 hdec_soon: /* r12 = trap, r13 = paca */
1281 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1283 * POWER7 guest -> host partition switch code.
1284 * We don't have to lock against tlbies but we do
1285 * have to coordinate the hardware threads.
1287 /* Increment the threads-exiting-guest count in the 0xff00
1288 bits of vcore->entry_exit_count */
1290 ld r5,HSTATE_KVM_VCORE(r13)
1291 addi r6,r5,VCORE_ENTRY_EXIT
1299 * At this point we have an interrupt that we have to pass
1300 * up to the kernel or qemu; we can't handle it in real mode.
1301 * Thus we have to do a partition switch, so we have to
1302 * collect the other threads, if we are the first thread
1303 * to take an interrupt. To do this, we set the HDEC to 0,
1304 * which causes an HDEC interrupt in all threads within 2ns
1305 * because the HDEC register is shared between all 4 threads.
1306 * However, we don't need to bother if this is an HDEC
1307 * interrupt, since the other threads will already be on their
1308 * way here in that case.
1310 cmpwi r3,0x100 /* Are we the first here? */
1312 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1318 * Send an IPI to any napping threads, since an HDEC interrupt
1319 * doesn't wake CPUs up from nap.
1321 lwz r3,VCORE_NAPPING_THREADS(r5)
1322 lbz r4,HSTATE_PTID(r13)
1325 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1327 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1331 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1334 stbcix r0,r7,r8 /* trigger the IPI */
1336 addi r6,r6,PACA_SIZE
1340 /* Secondary threads wait for primary to do partition switch */
1341 43: ld r5,HSTATE_KVM_VCORE(r13)
1342 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1343 lbz r3,HSTATE_PTID(r13)
1347 13: lbz r3,VCORE_IN_GUEST(r5)
1353 /* Primary thread waits for all the secondaries to exit guest */
1354 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1361 /* Primary thread switches back to host partition */
1362 ld r6,KVM_HOST_SDR1(r4)
1363 lwz r7,KVM_HOST_LPID(r4)
1364 li r8,LPID_RSVD /* switch to reserved LPID */
1367 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1372 /* DPDES is shared between threads */
1373 mfspr r7, SPRN_DPDES
1374 std r7, VCORE_DPDES(r5)
1375 /* clear DPDES so we don't get guest doorbells in the host */
1377 mtspr SPRN_DPDES, r8
1378 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1380 /* Subtract timebase offset from timebase */
1381 ld r8,VCORE_TB_OFFSET(r5)
1384 mftb r6 /* current host timebase */
1386 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1387 mftb r7 /* check if lower 24 bits overflowed */
1392 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1396 17: ld r0, VCORE_PCR(r5)
1402 /* Signal secondary CPUs to continue */
1403 stb r0,VCORE_IN_GUEST(r5)
1404 lis r8,0x7fff /* MAX_INT@h */
1407 16: ld r8,KVM_HOST_LPCR(r4)
1413 * PPC970 guest -> host partition switch code.
1414 * We have to lock against concurrent tlbies, and
1415 * we have to flush the whole TLB.
1417 32: ld r5,HSTATE_KVM_VCORE(r13)
1418 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1420 /* Take the guest's tlbie_lock */
1421 #ifdef __BIG_ENDIAN__
1422 lwz r8,PACA_LOCK_TOKEN(r13)
1424 lwz r8,PACAPACAINDEX(r13)
1426 addi r3,r4,KVM_TLBIE_LOCK
1434 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1436 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1440 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1443 stw r0,0(r3) /* drop guest tlbie_lock */
1445 /* invalidate the whole TLB */
1454 /* take native_tlbie_lock */
1455 ld r3,toc_tlbie_lock@toc(2)
1463 ld r6,KVM_HOST_SDR1(r4)
1464 mtspr SPRN_SDR1,r6 /* switch to host page table */
1466 /* Set up host HID4 value */
1471 stw r0,0(r3) /* drop native_tlbie_lock */
1473 lis r8,0x7fff /* MAX_INT@h */
1476 /* Disable HDEC interrupts */
1479 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1489 /* load host SLB entries */
1490 33: ld r8,PACA_SLBSHADOWPTR(r13)
1492 .rept SLB_NUM_BOLTED
1493 ld r5,SLBSHADOW_SAVEAREA(r8)
1494 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1495 andis. r7,r5,SLB_ESID_V@h
1501 /* Unset guest mode */
1502 li r0, KVM_GUEST_MODE_NONE
1503 stb r0, HSTATE_IN_GUEST(r13)
1505 ld r0, 112+PPC_LR_STKOFF(r1)
1511 * Check whether an HDSI is an HPTE not found fault or something else.
1512 * If it is an HPTE not found fault that is due to the guest accessing
1513 * a page that they have mapped but which we have paged out, then
1514 * we continue on with the guest exit path. In all other cases,
1515 * reflect the HDSI to the guest as a DSI.
1519 mfspr r6, SPRN_HDSISR
1520 /* HPTE not found fault or protection fault? */
1521 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1522 beq 1f /* if not, send it to the guest */
1523 andi. r0, r11, MSR_DR /* data relocation enabled? */
1526 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1527 bne 1f /* if no SLB entry found */
1528 4: std r4, VCPU_FAULT_DAR(r9)
1529 stw r6, VCPU_FAULT_DSISR(r9)
1531 /* Search the hash table. */
1532 mr r3, r9 /* vcpu pointer */
1533 li r7, 1 /* data fault */
1534 bl .kvmppc_hpte_hv_fault
1535 ld r9, HSTATE_KVM_VCPU(r13)
1537 ld r11, VCPU_MSR(r9)
1538 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1539 cmpdi r3, 0 /* retry the instruction */
1541 cmpdi r3, -1 /* handle in kernel mode */
1543 cmpdi r3, -2 /* MMIO emulation; need instr word */
1546 /* Synthesize a DSI for the guest */
1547 ld r4, VCPU_FAULT_DAR(r9)
1549 1: mtspr SPRN_DAR, r4
1550 mtspr SPRN_DSISR, r6
1551 mtspr SPRN_SRR0, r10
1552 mtspr SPRN_SRR1, r11
1553 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1554 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1556 fast_interrupt_c_return:
1557 6: ld r7, VCPU_CTR(r9)
1558 lwz r8, VCPU_XER(r9)
1564 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1565 ld r5, KVM_VRMA_SLB_V(r5)
1568 /* If this is for emulated MMIO, load the instruction word */
1569 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1571 /* Set guest mode to 'jump over instruction' so if lwz faults
1572 * we'll just continue at the next IP. */
1573 li r0, KVM_GUEST_MODE_SKIP
1574 stb r0, HSTATE_IN_GUEST(r13)
1576 /* Do the access with MSR:DR enabled */
1578 ori r4, r3, MSR_DR /* Enable paging for data */
1583 /* Store the result */
1584 stw r8, VCPU_LAST_INST(r9)
1586 /* Unset guest mode. */
1587 li r0, KVM_GUEST_MODE_HOST_HV
1588 stb r0, HSTATE_IN_GUEST(r13)
1592 * Similarly for an HISI, reflect it to the guest as an ISI unless
1593 * it is an HPTE not found fault for a page that we have paged out.
1596 andis. r0, r11, SRR1_ISI_NOPT@h
1598 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1601 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1602 bne 1f /* if no SLB entry found */
1604 /* Search the hash table. */
1605 mr r3, r9 /* vcpu pointer */
1608 li r7, 0 /* instruction fault */
1609 bl .kvmppc_hpte_hv_fault
1610 ld r9, HSTATE_KVM_VCPU(r13)
1612 ld r11, VCPU_MSR(r9)
1613 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1614 cmpdi r3, 0 /* retry the instruction */
1615 beq fast_interrupt_c_return
1616 cmpdi r3, -1 /* handle in kernel mode */
1619 /* Synthesize an ISI for the guest */
1621 1: mtspr SPRN_SRR0, r10
1622 mtspr SPRN_SRR1, r11
1623 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1624 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1626 b fast_interrupt_c_return
1628 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1629 ld r5, KVM_VRMA_SLB_V(r6)
1633 * Try to handle an hcall in real mode.
1634 * Returns to the guest if we handle it, or continues on up to
1635 * the kernel if we can't (i.e. if we don't have a handler for
1636 * it, or if the handler returns H_TOO_HARD).
1638 .globl hcall_try_real_mode
1639 hcall_try_real_mode:
1640 ld r3,VCPU_GPR(R3)(r9)
1642 /* sc 1 from userspace - reflect to guest syscall */
1643 bne sc_1_fast_return
1645 cmpldi r3,hcall_real_table_end - hcall_real_table
1647 LOAD_REG_ADDR(r4, hcall_real_table)
1653 mr r3,r9 /* get vcpu pointer */
1654 ld r4,VCPU_GPR(R4)(r9)
1657 beq hcall_real_fallback
1658 ld r4,HSTATE_KVM_VCPU(r13)
1659 std r3,VCPU_GPR(R3)(r4)
1667 li r10, BOOK3S_INTERRUPT_SYSCALL
1668 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1673 /* We've attempted a real mode hcall, but it's punted it back
1674 * to userspace. We need to restore some clobbered volatiles
1675 * before resuming the pass-it-to-qemu path */
1676 hcall_real_fallback:
1677 li r12,BOOK3S_INTERRUPT_SYSCALL
1678 ld r9, HSTATE_KVM_VCPU(r13)
1682 .globl hcall_real_table
1684 .long 0 /* 0 - unused */
1685 .long .kvmppc_h_remove - hcall_real_table
1686 .long .kvmppc_h_enter - hcall_real_table
1687 .long .kvmppc_h_read - hcall_real_table
1688 .long 0 /* 0x10 - H_CLEAR_MOD */
1689 .long 0 /* 0x14 - H_CLEAR_REF */
1690 .long .kvmppc_h_protect - hcall_real_table
1691 .long 0 /* 0x1c - H_GET_TCE */
1692 .long .kvmppc_h_put_tce - hcall_real_table
1693 .long 0 /* 0x24 - H_SET_SPRG0 */
1694 .long .kvmppc_h_set_dabr - hcall_real_table
1709 #ifdef CONFIG_KVM_XICS
1710 .long .kvmppc_rm_h_eoi - hcall_real_table
1711 .long .kvmppc_rm_h_cppr - hcall_real_table
1712 .long .kvmppc_rm_h_ipi - hcall_real_table
1713 .long 0 /* 0x70 - H_IPOLL */
1714 .long .kvmppc_rm_h_xirr - hcall_real_table
1716 .long 0 /* 0x64 - H_EOI */
1717 .long 0 /* 0x68 - H_CPPR */
1718 .long 0 /* 0x6c - H_IPI */
1719 .long 0 /* 0x70 - H_IPOLL */
1720 .long 0 /* 0x74 - H_XIRR */
1748 .long .kvmppc_h_cede - hcall_real_table
1765 .long .kvmppc_h_bulk_remove - hcall_real_table
1766 hcall_real_table_end:
1772 _GLOBAL(kvmppc_h_set_dabr)
1775 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1776 std r4,VCPU_DABR(r3)
1777 /* Work around P7 bug where DABR can get corrupted on mtspr */
1778 1: mtspr SPRN_DABR,r4
1786 _GLOBAL(kvmppc_h_cede)
1788 std r11,VCPU_MSR(r3)
1790 stb r0,VCPU_CEDED(r3)
1791 sync /* order setting ceded vs. testing prodded */
1792 lbz r5,VCPU_PRODDED(r3)
1794 bne kvm_cede_prodded
1795 li r0,0 /* set trap to 0 to say hcall is handled */
1796 stw r0,VCPU_TRAP(r3)
1798 std r0,VCPU_GPR(R3)(r3)
1800 b kvm_cede_exit /* just send it up to host on 970 */
1801 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1804 * Set our bit in the bitmask of napping threads unless all the
1805 * other threads are already napping, in which case we send this
1808 ld r5,HSTATE_KVM_VCORE(r13)
1809 lbz r6,HSTATE_PTID(r13)
1810 lwz r8,VCORE_ENTRY_EXIT(r5)
1814 addi r6,r5,VCORE_NAPPING_THREADS
1823 stb r0,HSTATE_NAPPING(r13)
1824 /* order napping_threads update vs testing entry_exit_count */
1826 lwz r7,VCORE_ENTRY_EXIT(r5)
1828 bge 33f /* another thread already exiting */
1831 * Although not specifically required by the architecture, POWER7
1832 * preserves the following registers in nap mode, even if an SMT mode
1833 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1834 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1836 /* Save non-volatile GPRs */
1837 std r14, VCPU_GPR(R14)(r3)
1838 std r15, VCPU_GPR(R15)(r3)
1839 std r16, VCPU_GPR(R16)(r3)
1840 std r17, VCPU_GPR(R17)(r3)
1841 std r18, VCPU_GPR(R18)(r3)
1842 std r19, VCPU_GPR(R19)(r3)
1843 std r20, VCPU_GPR(R20)(r3)
1844 std r21, VCPU_GPR(R21)(r3)
1845 std r22, VCPU_GPR(R22)(r3)
1846 std r23, VCPU_GPR(R23)(r3)
1847 std r24, VCPU_GPR(R24)(r3)
1848 std r25, VCPU_GPR(R25)(r3)
1849 std r26, VCPU_GPR(R26)(r3)
1850 std r27, VCPU_GPR(R27)(r3)
1851 std r28, VCPU_GPR(R28)(r3)
1852 std r29, VCPU_GPR(R29)(r3)
1853 std r30, VCPU_GPR(R30)(r3)
1854 std r31, VCPU_GPR(R31)(r3)
1860 * Take a nap until a decrementer or external or doobell interrupt
1861 * occurs, with PECE1, PECE0 and PECEDP set in LPCR
1864 stb r0,HSTATE_HWTHREAD_REQ(r13)
1866 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1868 oris r5,r5,LPCR_PECEDP@h
1869 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1873 std r0, HSTATE_SCRATCH0(r13)
1875 ld r0, HSTATE_SCRATCH0(r13)
1887 /* get vcpu pointer */
1888 ld r4, HSTATE_KVM_VCPU(r13)
1890 /* Woken by external or decrementer interrupt */
1891 ld r1, HSTATE_HOST_R1(r13)
1893 /* load up FP state */
1897 ld r14, VCPU_GPR(R14)(r4)
1898 ld r15, VCPU_GPR(R15)(r4)
1899 ld r16, VCPU_GPR(R16)(r4)
1900 ld r17, VCPU_GPR(R17)(r4)
1901 ld r18, VCPU_GPR(R18)(r4)
1902 ld r19, VCPU_GPR(R19)(r4)
1903 ld r20, VCPU_GPR(R20)(r4)
1904 ld r21, VCPU_GPR(R21)(r4)
1905 ld r22, VCPU_GPR(R22)(r4)
1906 ld r23, VCPU_GPR(R23)(r4)
1907 ld r24, VCPU_GPR(R24)(r4)
1908 ld r25, VCPU_GPR(R25)(r4)
1909 ld r26, VCPU_GPR(R26)(r4)
1910 ld r27, VCPU_GPR(R27)(r4)
1911 ld r28, VCPU_GPR(R28)(r4)
1912 ld r29, VCPU_GPR(R29)(r4)
1913 ld r30, VCPU_GPR(R30)(r4)
1914 ld r31, VCPU_GPR(R31)(r4)
1916 /* Check the wake reason in SRR1 to see why we got here */
1917 bl kvmppc_check_wake_reason
1919 /* clear our bit in vcore->napping_threads */
1920 34: ld r5,HSTATE_KVM_VCORE(r13)
1921 lbz r7,HSTATE_PTID(r13)
1924 addi r6,r5,VCORE_NAPPING_THREADS
1930 stb r0,HSTATE_NAPPING(r13)
1932 /* See if the wake reason means we need to exit */
1933 stw r12, VCPU_TRAP(r4)
1938 /* see if any other thread is already exiting */
1939 lwz r0,VCORE_ENTRY_EXIT(r5)
1943 b kvmppc_cede_reentry /* if not go back to guest */
1945 /* cede when already previously prodded case */
1948 stb r0,VCPU_PRODDED(r3)
1949 sync /* order testing prodded vs. clearing ceded */
1950 stb r0,VCPU_CEDED(r3)
1954 /* we've ceded but we want to give control to the host */
1956 b hcall_real_fallback
1958 /* Try to handle a machine check in real mode */
1959 machine_check_realmode:
1960 mr r3, r9 /* get vcpu pointer */
1961 bl .kvmppc_realmode_machine_check
1963 cmpdi r3, 0 /* continue exiting from guest? */
1964 ld r9, HSTATE_KVM_VCPU(r13)
1965 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1967 /* If not, deliver a machine check. SRR0/1 are already set */
1968 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1969 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1971 b fast_interrupt_c_return
1974 * Check the reason we woke from nap, and take appropriate action.
1976 * 0 if nothing needs to be done
1977 * 1 if something happened that needs to be handled by the host
1978 * -1 if there was a guest wakeup (IPI)
1980 * Also sets r12 to the interrupt vector for any interrupt that needs
1981 * to be handled now by the host (0x500 for external interrupt), or zero.
1983 kvmppc_check_wake_reason:
1986 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
1988 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
1989 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
1990 cmpwi r6, 8 /* was it an external interrupt? */
1991 li r12, BOOK3S_INTERRUPT_EXTERNAL
1992 beq kvmppc_read_intr /* if so, see what it was */
1995 cmpwi r6, 6 /* was it the decrementer? */
1998 cmpwi r6, 5 /* privileged doorbell? */
2000 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2001 li r3, 1 /* anything else, return 1 */
2005 * Determine what sort of external interrupt is pending (if any).
2007 * 0 if no interrupt is pending
2008 * 1 if an interrupt is pending that needs to be handled by the host
2009 * -1 if there was a guest wakeup IPI (which has now been cleared)
2012 /* see if a host IPI is pending */
2014 lbz r0, HSTATE_HOST_IPI(r13)
2018 /* Now read the interrupt from the ICP */
2019 ld r6, HSTATE_XICS_PHYS(r13)
2024 rlwinm. r3, r0, 0, 0xffffff
2026 beq 1f /* if nothing pending in the ICP */
2028 /* We found something in the ICP...
2030 * If it's not an IPI, stash it in the PACA and return to
2031 * the host, we don't (yet) handle directing real external
2032 * interrupts directly to the guest
2034 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2037 /* It's an IPI, clear the MFRR and EOI it */
2040 stbcix r3, r6, r8 /* clear the IPI */
2041 stwcix r0, r6, r7 /* EOI it */
2044 /* We need to re-check host IPI now in case it got set in the
2045 * meantime. If it's clear, we bounce the interrupt to the
2048 lbz r0, HSTATE_HOST_IPI(r13)
2052 /* OK, it's an IPI for us */
2056 42: /* It's not an IPI and it's for the host, stash it in the PACA
2057 * before exit, it will be picked up by the host ICP driver
2059 stw r0, HSTATE_SAVED_XIRR(r13)
2063 43: /* We raced with the host, we need to resend that IPI, bummer */
2065 stbcix r0, r6, r8 /* set the IPI */
2071 * Save away FP, VMX and VSX registers.
2073 * N.B. r30 and r31 are volatile across this function,
2074 * thus it is not callable from C.
2081 #ifdef CONFIG_ALTIVEC
2083 oris r8,r8,MSR_VEC@h
2084 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2088 oris r8,r8,MSR_VSX@h
2089 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2093 addi r3,r3,VCPU_FPRS
2095 #ifdef CONFIG_ALTIVEC
2097 addi r3,r31,VCPU_VRS
2099 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2101 mfspr r6,SPRN_VRSAVE
2102 stw r6,VCPU_VRSAVE(r3)
2109 * Load up FP, VMX and VSX registers
2111 * N.B. r30 and r31 are volatile across this function,
2112 * thus it is not callable from C.
2119 #ifdef CONFIG_ALTIVEC
2121 oris r8,r8,MSR_VEC@h
2122 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2126 oris r8,r8,MSR_VSX@h
2127 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2131 addi r3,r4,VCPU_FPRS
2133 #ifdef CONFIG_ALTIVEC
2135 addi r3,r31,VCPU_VRS
2137 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2139 lwz r7,VCPU_VRSAVE(r4)
2140 mtspr SPRN_VRSAVE,r7
2146 * We come here if we get any exception or interrupt while we are
2147 * executing host real mode code while in guest MMU context.
2148 * For now just spin, but we should do something better.
2150 kvmppc_bad_host_intr: