2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 /* Values in HSTATE_NAPPING(r13) */
36 #define NAPPING_CEDE 1
37 #define NAPPING_NOVCPU 2
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
45 * LR = return address to continue at after eventually re-enabling MMU
47 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
49 std r0, PPC_LR_STKOFF(r1)
52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
57 mtmsrd r0,1 /* clear RI in MSR */
63 ld r4, HSTATE_KVM_VCPU(r13)
66 /* Back from guest - restore host state and return to caller */
69 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
74 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
77 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
80 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
84 beq 23f /* skip if not */
86 ld r3, HSTATE_MMCR0(r13)
87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
90 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
126 ld r3, HSTATE_DECEXP(r13)
132 * For external and machine check interrupts, we need
133 * to call the Linux handler to process the interrupt.
134 * We do that by jumping to absolute address 0x500 for
135 * external interrupts, or the machine_check_fwnmi label
136 * for machine checks (since firmware might have patched
137 * the vector area at 0x200). The [h]rfid at the end of the
138 * handler will return to the book3s_hv_interrupts.S code.
139 * For other interrupts we do the rfid to get back
140 * to the book3s_hv_interrupts.S code here.
142 ld r8, 112+PPC_LR_STKOFF(r1)
144 ld r7, HSTATE_HOST_MSR(r13)
146 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
149 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
150 beq cr2, 14f /* HMI check */
152 /* RFI into the highmem handler, or branch to interrupt handler */
156 mtmsrd r6, 1 /* Clear RI in MSR */
159 beq cr1, 13f /* machine check */
162 /* On POWER7, we have external interrupts set to use HSRR0/1 */
163 11: mtspr SPRN_HSRR0, r8
167 13: b machine_check_fwnmi
169 14: mtspr SPRN_HSRR0, r8
171 b hmi_exception_after_realmode
173 kvmppc_primary_no_guest:
174 /* We handle this much like a ceded vcpu */
175 /* set our bit in napping_threads */
176 ld r5, HSTATE_KVM_VCORE(r13)
177 lbz r7, HSTATE_PTID(r13)
180 addi r6, r5, VCORE_NAPPING_THREADS
185 /* order napping_threads update vs testing entry_exit_count */
188 lwz r7, VCORE_ENTRY_EXIT(r5)
190 bge kvm_novcpu_exit /* another thread already exiting */
191 li r3, NAPPING_NOVCPU
192 stb r3, HSTATE_NAPPING(r13)
197 ld r1, HSTATE_HOST_R1(r13)
198 ld r5, HSTATE_KVM_VCORE(r13)
200 stb r0, HSTATE_NAPPING(r13)
201 stb r0, HSTATE_HWTHREAD_REQ(r13)
203 /* check the wake reason */
204 bl kvmppc_check_wake_reason
206 /* see if any other thread is already exiting */
207 lwz r0, VCORE_ENTRY_EXIT(r5)
211 /* clear our bit in napping_threads */
212 lbz r7, HSTATE_PTID(r13)
215 addi r6, r5, VCORE_NAPPING_THREADS
221 /* See if the wake reason means we need to exit */
225 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
226 ld r4, HSTATE_KVM_VCPU(r13)
228 beq kvmppc_primary_no_guest
230 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
231 addi r3, r4, VCPU_TB_RMENTRY
232 bl kvmhv_start_timing
240 * We come in here when wakened from nap mode.
241 * Relocation is off and most register values are lost.
242 * r13 points to the PACA.
244 .globl kvm_start_guest
247 /* Set runlatch bit the minute you wake up from nap */
254 li r0,KVM_HWTHREAD_IN_KVM
255 stb r0,HSTATE_HWTHREAD_STATE(r13)
257 /* NV GPR values from power7_idle() will no longer be valid */
259 stb r0,PACA_NAPSTATELOST(r13)
261 /* were we napping due to cede? */
262 lbz r0,HSTATE_NAPPING(r13)
263 cmpwi r0,NAPPING_CEDE
265 cmpwi r0,NAPPING_NOVCPU
266 beq kvm_novcpu_wakeup
268 ld r1,PACAEMERGSP(r13)
269 subi r1,r1,STACK_FRAME_OVERHEAD
272 * We weren't napping due to cede, so this must be a secondary
273 * thread being woken up to run a guest, or being woken up due
274 * to a stray IPI. (Or due to some machine check or hypervisor
275 * maintenance interrupt while the core is in KVM.)
278 /* Check the wake reason in SRR1 to see why we got here */
279 bl kvmppc_check_wake_reason
283 /* get vcpu pointer, NULL if we have no vcpu to run */
284 ld r4,HSTATE_KVM_VCPU(r13)
286 /* if we have no vcpu to run, go back to sleep */
289 kvm_secondary_got_guest:
291 /* Set HSTATE_DSCR(r13) to something sensible */
292 ld r6, PACA_DSCR(r13)
293 std r6, HSTATE_DSCR(r13)
297 /* Back from the guest, go back to nap */
298 /* Clear our vcpu pointer so we don't come back in early */
300 std r0, HSTATE_KVM_VCPU(r13)
302 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
303 * the nap_count, because once the increment to nap_count is
304 * visible we could be given another vcpu.
308 /* increment the nap count and then go to nap mode */
309 ld r4, HSTATE_KVM_VCORE(r13)
310 addi r4, r4, VCORE_NAP_COUNT
317 * At this point we have finished executing in the guest.
318 * We need to wait for hwthread_req to become zero, since
319 * we may not turn on the MMU while hwthread_req is non-zero.
320 * While waiting we also need to check if we get given a vcpu to run.
323 lbz r3, HSTATE_HWTHREAD_REQ(r13)
327 li r0, KVM_HWTHREAD_IN_KERNEL
328 stb r0, HSTATE_HWTHREAD_STATE(r13)
329 /* need to recheck hwthread_req after a barrier, to avoid race */
331 lbz r3, HSTATE_HWTHREAD_REQ(r13)
335 * We jump to power7_wakeup_loss, which will return to the caller
336 * of power7_nap in the powernv cpu offline loop. The value we
337 * put in r3 becomes the return value for power7_nap.
341 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
347 ld r4, HSTATE_KVM_VCPU(r13)
351 b kvm_secondary_got_guest
353 54: li r0, KVM_HWTHREAD_IN_KVM
354 stb r0, HSTATE_HWTHREAD_STATE(r13)
357 /******************************************************************************
361 *****************************************************************************/
363 .global kvmppc_hv_entry
368 * R4 = vcpu pointer (or NULL)
373 * all other volatile GPRS = free
376 std r0, PPC_LR_STKOFF(r1)
379 /* Save R1 in the PACA */
380 std r1, HSTATE_HOST_R1(r13)
382 li r6, KVM_GUEST_MODE_HOST_HV
383 stb r6, HSTATE_IN_GUEST(r13)
385 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
386 /* Store initial timestamp */
389 addi r3, r4, VCPU_TB_RMENTRY
390 bl kvmhv_start_timing
400 * POWER7/POWER8 host -> guest partition switch code.
401 * We don't have to lock against concurrent tlbies,
402 * but we do have to coordinate across hardware threads.
404 /* Increment entry count iff exit count is zero. */
405 ld r5,HSTATE_KVM_VCORE(r13)
406 addi r9,r5,VCORE_ENTRY_EXIT
408 cmpwi r3,0x100 /* any threads starting to exit? */
409 bge secondary_too_late /* if so we're too late to the party */
414 /* Primary thread switches to guest partition. */
415 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
416 lbz r6,HSTATE_PTID(r13)
421 li r0,LPID_RSVD /* switch to reserved LPID */
424 mtspr SPRN_SDR1,r6 /* switch to partition page table */
428 /* See if we need to flush the TLB */
429 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
430 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
431 srdi r6,r6,6 /* doubleword number */
432 sldi r6,r6,3 /* address offset */
434 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
440 23: ldarx r7,0,r6 /* if set, clear the bit */
444 /* Flush the TLB of any entries for this LPID */
445 /* use arch 2.07S as a proxy for POWER8 */
447 li r6,512 /* POWER8 has 512 sets */
449 li r6,128 /* POWER7 has 128 sets */
450 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
452 li r7,0x800 /* IS field = 0b10 */
459 /* Add timebase offset onto timebase */
460 22: ld r8,VCORE_TB_OFFSET(r5)
463 mftb r6 /* current host timebase */
465 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
466 mftb r7 /* check if lower 24 bits overflowed */
471 addis r8,r8,0x100 /* if so, increment upper 40 bits */
474 /* Load guest PCR value to select appropriate compat mode */
475 37: ld r7, VCORE_PCR(r5)
482 /* DPDES is shared between threads */
483 ld r8, VCORE_DPDES(r5)
485 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
488 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
491 /* Secondary threads wait for primary to have done partition switch */
492 20: lbz r0,VCORE_IN_GUEST(r5)
497 10: ld r8,VCORE_LPCR(r5)
501 /* Check if HDEC expires soon */
503 cmpwi r3,512 /* 1 microsecond */
504 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
507 /* Do we have a guest vcpu to run? */
509 beq kvmppc_primary_no_guest
512 /* Load up guest SLB entries */
513 lwz r5,VCPU_SLB_MAX(r4)
518 1: ld r8,VCPU_SLB_E(r6)
521 addi r6,r6,VCPU_SLB_SIZE
524 /* Increment yield count if they have a VPA */
528 li r6, LPPACA_YIELDCOUNT
533 stb r6, VCPU_VPA_DIRTY(r4)
536 /* Save purr/spurr */
539 std r5,HSTATE_PURR(r13)
540 std r6,HSTATE_SPURR(r13)
547 /* Set partition DABR */
548 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
549 lwz r5,VCPU_DABRX(r4)
554 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
556 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
559 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
561 /* Turn on TM/FP/VSX/VMX so we can restore them. */
567 oris r5, r5, (MSR_VEC | MSR_VSX)@h
571 * The user may change these outside of a transaction, so they must
572 * always be context switched.
574 ld r5, VCPU_TFHAR(r4)
575 ld r6, VCPU_TFIAR(r4)
576 ld r7, VCPU_TEXASR(r4)
579 mtspr SPRN_TEXASR, r7
582 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
583 beq skip_tm /* TM not active in guest */
585 /* Make sure the failure summary is set, otherwise we'll program check
586 * when we trechkpt. It's possible that this might have been not set
587 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
590 oris r7, r7, (TEXASR_FS)@h
591 mtspr SPRN_TEXASR, r7
594 * We need to load up the checkpointed state for the guest.
595 * We need to do this early as it will blow away any GPRs, VSRs and
600 addi r3, r31, VCPU_FPRS_TM
602 addi r3, r31, VCPU_VRS_TM
605 lwz r7, VCPU_VRSAVE_TM(r4)
606 mtspr SPRN_VRSAVE, r7
608 ld r5, VCPU_LR_TM(r4)
609 lwz r6, VCPU_CR_TM(r4)
610 ld r7, VCPU_CTR_TM(r4)
611 ld r8, VCPU_AMR_TM(r4)
612 ld r9, VCPU_TAR_TM(r4)
620 * Load up PPR and DSCR values but don't put them in the actual SPRs
621 * till the last moment to avoid running with userspace PPR and DSCR for
624 ld r29, VCPU_DSCR_TM(r4)
625 ld r30, VCPU_PPR_TM(r4)
627 std r2, PACATMSCRATCH(r13) /* Save TOC */
629 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
633 /* Load GPRs r0-r28 */
636 ld reg, VCPU_GPRS_TM(reg)(r31)
643 /* Load final GPRs */
644 ld 29, VCPU_GPRS_TM(29)(r31)
645 ld 30, VCPU_GPRS_TM(30)(r31)
646 ld 31, VCPU_GPRS_TM(31)(r31)
648 /* TM checkpointed state is now setup. All GPRs are now volatile. */
651 /* Now let's get back the state we need. */
654 ld r29, HSTATE_DSCR(r13)
656 ld r4, HSTATE_KVM_VCPU(r13)
657 ld r1, HSTATE_HOST_R1(r13)
658 ld r2, PACATMSCRATCH(r13)
660 /* Set the MSR RI since we have our registers back. */
666 /* Load guest PMU registers */
667 /* R4 is live here (vcpu pointer) */
669 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
670 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
674 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
677 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
678 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
679 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
680 lwz r6, VCPU_PMC + 8(r4)
681 lwz r7, VCPU_PMC + 12(r4)
682 lwz r8, VCPU_PMC + 16(r4)
683 lwz r9, VCPU_PMC + 20(r4)
691 ld r5, VCPU_MMCR + 8(r4)
692 ld r6, VCPU_MMCR + 16(r4)
700 ld r5, VCPU_MMCR + 24(r4)
702 lwz r7, VCPU_PMC + 24(r4)
703 lwz r8, VCPU_PMC + 28(r4)
704 ld r9, VCPU_MMCR + 32(r4)
710 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
714 /* Load up FP, VMX and VSX registers */
717 ld r14, VCPU_GPR(R14)(r4)
718 ld r15, VCPU_GPR(R15)(r4)
719 ld r16, VCPU_GPR(R16)(r4)
720 ld r17, VCPU_GPR(R17)(r4)
721 ld r18, VCPU_GPR(R18)(r4)
722 ld r19, VCPU_GPR(R19)(r4)
723 ld r20, VCPU_GPR(R20)(r4)
724 ld r21, VCPU_GPR(R21)(r4)
725 ld r22, VCPU_GPR(R22)(r4)
726 ld r23, VCPU_GPR(R23)(r4)
727 ld r24, VCPU_GPR(R24)(r4)
728 ld r25, VCPU_GPR(R25)(r4)
729 ld r26, VCPU_GPR(R26)(r4)
730 ld r27, VCPU_GPR(R27)(r4)
731 ld r28, VCPU_GPR(R28)(r4)
732 ld r29, VCPU_GPR(R29)(r4)
733 ld r30, VCPU_GPR(R30)(r4)
734 ld r31, VCPU_GPR(R31)(r4)
736 /* Switch DSCR to guest value */
741 /* Skip next section on POWER7 */
743 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
744 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
747 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
750 /* Load up POWER8-specific registers */
752 lwz r6, VCPU_PSPB(r4)
758 ld r6, VCPU_DAWRX(r4)
759 ld r7, VCPU_CIABR(r4)
769 ld r8, VCPU_EBBHR(r4)
771 ld r5, VCPU_EBBRR(r4)
772 ld r6, VCPU_BESCR(r4)
773 ld r7, VCPU_CSIGR(r4)
779 ld r5, VCPU_TCSCR(r4)
781 lwz r7, VCPU_GUEST_PID(r4)
790 * Set the decrementer to the guest decrementer.
792 ld r8,VCPU_DEC_EXPIRES(r4)
793 /* r8 is a host timebase value here, convert to guest TB */
794 ld r5,HSTATE_KVM_VCORE(r13)
795 ld r6,VCORE_TB_OFFSET(r5)
802 ld r5, VCPU_SPRG0(r4)
803 ld r6, VCPU_SPRG1(r4)
804 ld r7, VCPU_SPRG2(r4)
805 ld r8, VCPU_SPRG3(r4)
811 /* Load up DAR and DSISR */
813 lwz r6, VCPU_DSISR(r4)
817 /* Restore AMR and UAMOR, set AMOR to all 1s */
825 /* Restore state of CTRL run bit; assume 1 on entry */
839 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
847 deliver_guest_interrupt:
848 /* r11 = vcpu->arch.msr & ~MSR_HV */
849 rldicl r11, r11, 63 - MSR_HV_LG, 1
850 rotldi r11, r11, 1 + MSR_HV_LG
853 /* Check if we can deliver an external or decrementer interrupt now */
854 ld r0, VCPU_PENDING_EXC(r4)
855 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
857 andi. r8, r11, MSR_EE
859 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
860 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
864 li r0, BOOK3S_INTERRUPT_EXTERNAL
868 li r0, BOOK3S_INTERRUPT_DECREMENTER
871 12: mtspr SPRN_SRR0, r10
875 bl kvmppc_msr_interrupt
881 * R10: value for HSRR0
882 * R11: value for HSRR1
887 stb r0,VCPU_CEDED(r4) /* cancel cede */
891 /* Activate guest mode, so faults get handled by KVM */
892 li r9, KVM_GUEST_MODE_GUEST_HV
893 stb r9, HSTATE_IN_GUEST(r13)
895 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
896 /* Accumulate timing */
897 addi r3, r4, VCPU_TB_GUEST
898 bl kvmhv_accumulate_time
906 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
909 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
916 ld r1, VCPU_GPR(R1)(r4)
917 ld r2, VCPU_GPR(R2)(r4)
918 ld r3, VCPU_GPR(R3)(r4)
919 ld r5, VCPU_GPR(R5)(r4)
920 ld r6, VCPU_GPR(R6)(r4)
921 ld r7, VCPU_GPR(R7)(r4)
922 ld r8, VCPU_GPR(R8)(r4)
923 ld r9, VCPU_GPR(R9)(r4)
924 ld r10, VCPU_GPR(R10)(r4)
925 ld r11, VCPU_GPR(R11)(r4)
926 ld r12, VCPU_GPR(R12)(r4)
927 ld r13, VCPU_GPR(R13)(r4)
931 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
932 ld r0, VCPU_GPR(R0)(r4)
933 ld r4, VCPU_GPR(R4)(r4)
938 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
942 addi r3, r4, VCPU_TB_RMEXIT
943 bl kvmhv_accumulate_time
944 11: b kvmhv_switch_to_host
947 ld r4, HSTATE_KVM_VCPU(r13)
950 addi r3, r4, VCPU_TB_RMEXIT
951 bl kvmhv_accumulate_time
955 /******************************************************************************
959 *****************************************************************************/
962 * We come here from the first-level interrupt handlers.
964 .globl kvmppc_interrupt_hv
968 * R12 = interrupt vector
970 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
971 * guest R13 saved in SPRN_SCRATCH0
973 std r9, HSTATE_SCRATCH2(r13)
975 lbz r9, HSTATE_IN_GUEST(r13)
976 cmpwi r9, KVM_GUEST_MODE_HOST_HV
977 beq kvmppc_bad_host_intr
978 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
979 cmpwi r9, KVM_GUEST_MODE_GUEST
980 ld r9, HSTATE_SCRATCH2(r13)
981 beq kvmppc_interrupt_pr
983 /* We're now back in the host but in guest MMU context */
984 li r9, KVM_GUEST_MODE_HOST_HV
985 stb r9, HSTATE_IN_GUEST(r13)
987 ld r9, HSTATE_KVM_VCPU(r13)
991 std r0, VCPU_GPR(R0)(r9)
992 std r1, VCPU_GPR(R1)(r9)
993 std r2, VCPU_GPR(R2)(r9)
994 std r3, VCPU_GPR(R3)(r9)
995 std r4, VCPU_GPR(R4)(r9)
996 std r5, VCPU_GPR(R5)(r9)
997 std r6, VCPU_GPR(R6)(r9)
998 std r7, VCPU_GPR(R7)(r9)
999 std r8, VCPU_GPR(R8)(r9)
1000 ld r0, HSTATE_SCRATCH2(r13)
1001 std r0, VCPU_GPR(R9)(r9)
1002 std r10, VCPU_GPR(R10)(r9)
1003 std r11, VCPU_GPR(R11)(r9)
1004 ld r3, HSTATE_SCRATCH0(r13)
1005 lwz r4, HSTATE_SCRATCH1(r13)
1006 std r3, VCPU_GPR(R12)(r9)
1009 ld r3, HSTATE_CFAR(r13)
1010 std r3, VCPU_CFAR(r9)
1011 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1013 ld r4, HSTATE_PPR(r13)
1014 std r4, VCPU_PPR(r9)
1015 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1017 /* Restore R1/R2 so we can handle faults */
1018 ld r1, HSTATE_HOST_R1(r13)
1021 mfspr r10, SPRN_SRR0
1022 mfspr r11, SPRN_SRR1
1023 std r10, VCPU_SRR0(r9)
1024 std r11, VCPU_SRR1(r9)
1025 andi. r0, r12, 2 /* need to read HSRR0/1? */
1027 mfspr r10, SPRN_HSRR0
1028 mfspr r11, SPRN_HSRR1
1030 1: std r10, VCPU_PC(r9)
1031 std r11, VCPU_MSR(r9)
1035 std r3, VCPU_GPR(R13)(r9)
1038 stw r12,VCPU_TRAP(r9)
1040 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1041 addi r3, r9, VCPU_TB_RMINTR
1043 bl kvmhv_accumulate_time
1044 ld r5, VCPU_GPR(R5)(r9)
1045 ld r6, VCPU_GPR(R6)(r9)
1046 ld r7, VCPU_GPR(R7)(r9)
1047 ld r8, VCPU_GPR(R8)(r9)
1050 /* Save HEIR (HV emulation assist reg) in emul_inst
1051 if this is an HEI (HV emulation interrupt, e40) */
1052 li r3,KVM_INST_FETCH_FAILED
1053 stw r3,VCPU_LAST_INST(r9)
1054 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1057 11: stw r3,VCPU_HEIR(r9)
1059 /* these are volatile across C function calls */
1062 std r3, VCPU_CTR(r9)
1063 stw r4, VCPU_XER(r9)
1065 /* If this is a page table miss then see if it's theirs or ours */
1066 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1068 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1071 /* See if this is a leftover HDEC interrupt */
1072 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1077 bge fast_guest_return
1079 /* See if this is an hcall we can handle in real mode */
1080 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1081 beq hcall_try_real_mode
1083 /* External interrupt ? */
1084 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1085 bne+ guest_exit_cont
1087 /* External interrupt, first check for host_ipi. If this is
1088 * set, we know the host wants us out so let's do it now
1094 /* Check if any CPU is heading out to the host, if so head out too */
1095 ld r5, HSTATE_KVM_VCORE(r13)
1096 lwz r0, VCORE_ENTRY_EXIT(r5)
1099 blt deliver_guest_interrupt
1101 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1102 /* Save more register state */
1105 std r6, VCPU_DAR(r9)
1106 stw r7, VCPU_DSISR(r9)
1107 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1108 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1110 std r6, VCPU_FAULT_DAR(r9)
1111 stw r7, VCPU_FAULT_DSISR(r9)
1113 /* See if it is a machine check */
1114 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1115 beq machine_check_realmode
1117 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1118 addi r3, r9, VCPU_TB_RMEXIT
1120 bl kvmhv_accumulate_time
1123 /* Save guest CTRL register, set runlatch to 1 */
1124 6: mfspr r6,SPRN_CTRLF
1125 stw r6,VCPU_CTRL(r9)
1131 /* Read the guest SLB and save it away */
1132 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1138 andis. r0,r8,SLB_ESID_V@h
1140 add r8,r8,r6 /* put index in */
1142 std r8,VCPU_SLB_E(r7)
1143 std r3,VCPU_SLB_V(r7)
1144 addi r7,r7,VCPU_SLB_SIZE
1148 stw r5,VCPU_SLB_MAX(r9)
1151 * Save the guest PURR/SPURR
1156 ld r8,VCPU_SPURR(r9)
1157 std r5,VCPU_PURR(r9)
1158 std r6,VCPU_SPURR(r9)
1163 * Restore host PURR/SPURR and add guest times
1164 * so that the time in the guest gets accounted.
1166 ld r3,HSTATE_PURR(r13)
1167 ld r4,HSTATE_SPURR(r13)
1178 /* r5 is a guest timebase value here, convert to host TB */
1179 ld r3,HSTATE_KVM_VCORE(r13)
1180 ld r4,VCORE_TB_OFFSET(r3)
1182 std r5,VCPU_DEC_EXPIRES(r9)
1186 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1187 /* Save POWER8-specific registers */
1191 std r5, VCPU_IAMR(r9)
1192 stw r6, VCPU_PSPB(r9)
1193 std r7, VCPU_FSCR(r9)
1198 std r6, VCPU_VTB(r9)
1199 std r7, VCPU_TAR(r9)
1200 mfspr r8, SPRN_EBBHR
1201 std r8, VCPU_EBBHR(r9)
1202 mfspr r5, SPRN_EBBRR
1203 mfspr r6, SPRN_BESCR
1204 mfspr r7, SPRN_CSIGR
1206 std r5, VCPU_EBBRR(r9)
1207 std r6, VCPU_BESCR(r9)
1208 std r7, VCPU_CSIGR(r9)
1209 std r8, VCPU_TACR(r9)
1210 mfspr r5, SPRN_TCSCR
1214 std r5, VCPU_TCSCR(r9)
1215 std r6, VCPU_ACOP(r9)
1216 stw r7, VCPU_GUEST_PID(r9)
1217 std r8, VCPU_WORT(r9)
1220 /* Save and reset AMR and UAMOR before turning on the MMU */
1224 std r6,VCPU_UAMOR(r9)
1228 /* Switch DSCR back to host value */
1230 ld r7, HSTATE_DSCR(r13)
1231 std r8, VCPU_DSCR(r9)
1234 /* Save non-volatile GPRs */
1235 std r14, VCPU_GPR(R14)(r9)
1236 std r15, VCPU_GPR(R15)(r9)
1237 std r16, VCPU_GPR(R16)(r9)
1238 std r17, VCPU_GPR(R17)(r9)
1239 std r18, VCPU_GPR(R18)(r9)
1240 std r19, VCPU_GPR(R19)(r9)
1241 std r20, VCPU_GPR(R20)(r9)
1242 std r21, VCPU_GPR(R21)(r9)
1243 std r22, VCPU_GPR(R22)(r9)
1244 std r23, VCPU_GPR(R23)(r9)
1245 std r24, VCPU_GPR(R24)(r9)
1246 std r25, VCPU_GPR(R25)(r9)
1247 std r26, VCPU_GPR(R26)(r9)
1248 std r27, VCPU_GPR(R27)(r9)
1249 std r28, VCPU_GPR(R28)(r9)
1250 std r29, VCPU_GPR(R29)(r9)
1251 std r30, VCPU_GPR(R30)(r9)
1252 std r31, VCPU_GPR(R31)(r9)
1255 mfspr r3, SPRN_SPRG0
1256 mfspr r4, SPRN_SPRG1
1257 mfspr r5, SPRN_SPRG2
1258 mfspr r6, SPRN_SPRG3
1259 std r3, VCPU_SPRG0(r9)
1260 std r4, VCPU_SPRG1(r9)
1261 std r5, VCPU_SPRG2(r9)
1262 std r6, VCPU_SPRG3(r9)
1268 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1271 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1275 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1279 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1280 beq 1f /* TM not active in guest. */
1282 li r3, TM_CAUSE_KVM_RESCHED
1284 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1288 /* All GPRs are volatile at this point. */
1291 /* Temporarily store r13 and r9 so we have some regs to play with */
1294 std r9, PACATMSCRATCH(r13)
1295 ld r9, HSTATE_KVM_VCPU(r13)
1297 /* Get a few more GPRs free. */
1298 std r29, VCPU_GPRS_TM(29)(r9)
1299 std r30, VCPU_GPRS_TM(30)(r9)
1300 std r31, VCPU_GPRS_TM(31)(r9)
1302 /* Save away PPR and DSCR soon so don't run with user values. */
1305 mfspr r30, SPRN_DSCR
1306 ld r29, HSTATE_DSCR(r13)
1307 mtspr SPRN_DSCR, r29
1309 /* Save all but r9, r13 & r29-r31 */
1312 .if (reg != 9) && (reg != 13)
1313 std reg, VCPU_GPRS_TM(reg)(r9)
1317 /* ... now save r13 */
1319 std r4, VCPU_GPRS_TM(13)(r9)
1320 /* ... and save r9 */
1321 ld r4, PACATMSCRATCH(r13)
1322 std r4, VCPU_GPRS_TM(9)(r9)
1324 /* Reload stack pointer and TOC. */
1325 ld r1, HSTATE_HOST_R1(r13)
1328 /* Set MSR RI now we have r1 and r13 back. */
1332 /* Save away checkpinted SPRs. */
1333 std r31, VCPU_PPR_TM(r9)
1334 std r30, VCPU_DSCR_TM(r9)
1340 std r5, VCPU_LR_TM(r9)
1341 stw r6, VCPU_CR_TM(r9)
1342 std r7, VCPU_CTR_TM(r9)
1343 std r8, VCPU_AMR_TM(r9)
1344 std r10, VCPU_TAR_TM(r9)
1346 /* Restore r12 as trap number. */
1347 lwz r12, VCPU_TRAP(r9)
1350 addi r3, r9, VCPU_FPRS_TM
1352 addi r3, r9, VCPU_VRS_TM
1354 mfspr r6, SPRN_VRSAVE
1355 stw r6, VCPU_VRSAVE_TM(r9)
1358 * We need to save these SPRs after the treclaim so that the software
1359 * error code is recorded correctly in the TEXASR. Also the user may
1360 * change these outside of a transaction, so they must always be
1363 mfspr r5, SPRN_TFHAR
1364 mfspr r6, SPRN_TFIAR
1365 mfspr r7, SPRN_TEXASR
1366 std r5, VCPU_TFHAR(r9)
1367 std r6, VCPU_TFIAR(r9)
1368 std r7, VCPU_TEXASR(r9)
1372 /* Increment yield count if they have a VPA */
1373 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1376 li r4, LPPACA_YIELDCOUNT
1381 stb r3, VCPU_VPA_DIRTY(r9)
1383 /* Save PMU registers if requested */
1384 /* r8 and cr0.eq are live here */
1387 * POWER8 seems to have a hardware bug where setting
1388 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1389 * when some counters are already negative doesn't seem
1390 * to cause a performance monitor alert (and hence interrupt).
1391 * The effect of this is that when saving the PMU state,
1392 * if there is no PMU alert pending when we read MMCR0
1393 * before freezing the counters, but one becomes pending
1394 * before we read the counters, we lose it.
1395 * To work around this, we need a way to freeze the counters
1396 * before reading MMCR0. Normally, freezing the counters
1397 * is done by writing MMCR0 (to set MMCR0[FC]) which
1398 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1399 * we can also freeze the counters using MMCR2, by writing
1400 * 1s to all the counter freeze condition bits (there are
1401 * 9 bits each for 6 counters).
1403 li r3, -1 /* set all freeze bits */
1405 mfspr r10, SPRN_MMCR2
1406 mtspr SPRN_MMCR2, r3
1408 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1410 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1411 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1412 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1413 mfspr r6, SPRN_MMCRA
1414 /* Clear MMCRA in order to disable SDAR updates */
1416 mtspr SPRN_MMCRA, r7
1418 beq 21f /* if no VPA, save PMU stuff anyway */
1419 lbz r7, LPPACA_PMCINUSE(r8)
1420 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1422 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1424 21: mfspr r5, SPRN_MMCR1
1427 std r4, VCPU_MMCR(r9)
1428 std r5, VCPU_MMCR + 8(r9)
1429 std r6, VCPU_MMCR + 16(r9)
1431 std r10, VCPU_MMCR + 24(r9)
1432 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1433 std r7, VCPU_SIAR(r9)
1434 std r8, VCPU_SDAR(r9)
1441 stw r3, VCPU_PMC(r9)
1442 stw r4, VCPU_PMC + 4(r9)
1443 stw r5, VCPU_PMC + 8(r9)
1444 stw r6, VCPU_PMC + 12(r9)
1445 stw r7, VCPU_PMC + 16(r9)
1446 stw r8, VCPU_PMC + 20(r9)
1449 mfspr r6, SPRN_SPMC1
1450 mfspr r7, SPRN_SPMC2
1451 mfspr r8, SPRN_MMCRS
1452 std r5, VCPU_SIER(r9)
1453 stw r6, VCPU_PMC + 24(r9)
1454 stw r7, VCPU_PMC + 28(r9)
1455 std r8, VCPU_MMCR + 32(r9)
1457 mtspr SPRN_MMCRS, r4
1458 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1466 #ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1469 kvmhv_do_exit: /* r12 = trap, r13 = paca */
1471 * POWER7/POWER8 guest -> host partition switch code.
1472 * We don't have to lock against tlbies but we do
1473 * have to coordinate the hardware threads.
1475 /* Increment the threads-exiting-guest count in the 0xff00
1476 bits of vcore->entry_exit_count */
1477 ld r5,HSTATE_KVM_VCORE(r13)
1478 addi r6,r5,VCORE_ENTRY_EXIT
1483 isync /* order stwcx. vs. reading napping_threads */
1486 * At this point we have an interrupt that we have to pass
1487 * up to the kernel or qemu; we can't handle it in real mode.
1488 * Thus we have to do a partition switch, so we have to
1489 * collect the other threads, if we are the first thread
1490 * to take an interrupt. To do this, we set the HDEC to 0,
1491 * which causes an HDEC interrupt in all threads within 2ns
1492 * because the HDEC register is shared between all 4 threads.
1493 * However, we don't need to bother if this is an HDEC
1494 * interrupt, since the other threads will already be on their
1495 * way here in that case.
1497 cmpwi r3,0x100 /* Are we the first here? */
1499 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1505 * Send an IPI to any napping threads, since an HDEC interrupt
1506 * doesn't wake CPUs up from nap.
1508 lwz r3,VCORE_NAPPING_THREADS(r5)
1509 lbz r4,HSTATE_PTID(r13)
1512 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1514 /* Order entry/exit update vs. IPIs */
1516 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1520 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1523 stbcix r0,r7,r8 /* trigger the IPI */
1525 addi r6,r6,PACA_SIZE
1528 #ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1531 kvmhv_switch_to_host:
1532 /* Secondary threads wait for primary to do partition switch */
1533 43: ld r5,HSTATE_KVM_VCORE(r13)
1534 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1535 lbz r3,HSTATE_PTID(r13)
1539 13: lbz r3,VCORE_IN_GUEST(r5)
1545 /* Primary thread waits for all the secondaries to exit guest */
1546 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1553 /* Primary thread switches back to host partition */
1554 ld r6,KVM_HOST_SDR1(r4)
1555 lwz r7,KVM_HOST_LPID(r4)
1556 li r8,LPID_RSVD /* switch to reserved LPID */
1559 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1564 /* DPDES is shared between threads */
1565 mfspr r7, SPRN_DPDES
1566 std r7, VCORE_DPDES(r5)
1567 /* clear DPDES so we don't get guest doorbells in the host */
1569 mtspr SPRN_DPDES, r8
1570 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1572 /* Subtract timebase offset from timebase */
1573 ld r8,VCORE_TB_OFFSET(r5)
1576 mftb r6 /* current guest timebase */
1578 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1579 mftb r7 /* check if lower 24 bits overflowed */
1584 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1588 17: ld r0, VCORE_PCR(r5)
1594 /* Signal secondary CPUs to continue */
1595 stb r0,VCORE_IN_GUEST(r5)
1596 lis r8,0x7fff /* MAX_INT@h */
1599 16: ld r8,KVM_HOST_LPCR(r4)
1603 /* load host SLB entries */
1604 ld r8,PACA_SLBSHADOWPTR(r13)
1606 .rept SLB_NUM_BOLTED
1607 li r3, SLBSHADOW_SAVEAREA
1611 andis. r7,r5,SLB_ESID_V@h
1617 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1618 /* Finish timing, if we have a vcpu */
1619 ld r4, HSTATE_KVM_VCPU(r13)
1623 bl kvmhv_accumulate_time
1626 /* Unset guest mode */
1627 li r0, KVM_GUEST_MODE_NONE
1628 stb r0, HSTATE_IN_GUEST(r13)
1630 ld r0, 112+PPC_LR_STKOFF(r1)
1636 * Check whether an HDSI is an HPTE not found fault or something else.
1637 * If it is an HPTE not found fault that is due to the guest accessing
1638 * a page that they have mapped but which we have paged out, then
1639 * we continue on with the guest exit path. In all other cases,
1640 * reflect the HDSI to the guest as a DSI.
1644 mfspr r6, SPRN_HDSISR
1645 /* HPTE not found fault or protection fault? */
1646 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1647 beq 1f /* if not, send it to the guest */
1648 andi. r0, r11, MSR_DR /* data relocation enabled? */
1651 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1652 bne 1f /* if no SLB entry found */
1653 4: std r4, VCPU_FAULT_DAR(r9)
1654 stw r6, VCPU_FAULT_DSISR(r9)
1656 /* Search the hash table. */
1657 mr r3, r9 /* vcpu pointer */
1658 li r7, 1 /* data fault */
1659 bl kvmppc_hpte_hv_fault
1660 ld r9, HSTATE_KVM_VCPU(r13)
1662 ld r11, VCPU_MSR(r9)
1663 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1664 cmpdi r3, 0 /* retry the instruction */
1666 cmpdi r3, -1 /* handle in kernel mode */
1668 cmpdi r3, -2 /* MMIO emulation; need instr word */
1671 /* Synthesize a DSI for the guest */
1672 ld r4, VCPU_FAULT_DAR(r9)
1674 1: mtspr SPRN_DAR, r4
1675 mtspr SPRN_DSISR, r6
1676 mtspr SPRN_SRR0, r10
1677 mtspr SPRN_SRR1, r11
1678 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1679 bl kvmppc_msr_interrupt
1680 fast_interrupt_c_return:
1681 6: ld r7, VCPU_CTR(r9)
1682 lwz r8, VCPU_XER(r9)
1688 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1689 ld r5, KVM_VRMA_SLB_V(r5)
1692 /* If this is for emulated MMIO, load the instruction word */
1693 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1695 /* Set guest mode to 'jump over instruction' so if lwz faults
1696 * we'll just continue at the next IP. */
1697 li r0, KVM_GUEST_MODE_SKIP
1698 stb r0, HSTATE_IN_GUEST(r13)
1700 /* Do the access with MSR:DR enabled */
1702 ori r4, r3, MSR_DR /* Enable paging for data */
1707 /* Store the result */
1708 stw r8, VCPU_LAST_INST(r9)
1710 /* Unset guest mode. */
1711 li r0, KVM_GUEST_MODE_HOST_HV
1712 stb r0, HSTATE_IN_GUEST(r13)
1716 * Similarly for an HISI, reflect it to the guest as an ISI unless
1717 * it is an HPTE not found fault for a page that we have paged out.
1720 andis. r0, r11, SRR1_ISI_NOPT@h
1722 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1725 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1726 bne 1f /* if no SLB entry found */
1728 /* Search the hash table. */
1729 mr r3, r9 /* vcpu pointer */
1732 li r7, 0 /* instruction fault */
1733 bl kvmppc_hpte_hv_fault
1734 ld r9, HSTATE_KVM_VCPU(r13)
1736 ld r11, VCPU_MSR(r9)
1737 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1738 cmpdi r3, 0 /* retry the instruction */
1739 beq fast_interrupt_c_return
1740 cmpdi r3, -1 /* handle in kernel mode */
1743 /* Synthesize an ISI for the guest */
1745 1: mtspr SPRN_SRR0, r10
1746 mtspr SPRN_SRR1, r11
1747 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1748 bl kvmppc_msr_interrupt
1749 b fast_interrupt_c_return
1751 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1752 ld r5, KVM_VRMA_SLB_V(r6)
1756 * Try to handle an hcall in real mode.
1757 * Returns to the guest if we handle it, or continues on up to
1758 * the kernel if we can't (i.e. if we don't have a handler for
1759 * it, or if the handler returns H_TOO_HARD).
1761 * r5 - r8 contain hcall args,
1762 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1764 hcall_try_real_mode:
1765 ld r3,VCPU_GPR(R3)(r9)
1767 /* sc 1 from userspace - reflect to guest syscall */
1768 bne sc_1_fast_return
1770 cmpldi r3,hcall_real_table_end - hcall_real_table
1772 /* See if this hcall is enabled for in-kernel handling */
1774 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1775 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1777 ld r0, KVM_ENABLED_HCALLS(r4)
1778 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1782 /* Get pointer to handler, if any, and call it */
1783 LOAD_REG_ADDR(r4, hcall_real_table)
1789 mr r3,r9 /* get vcpu pointer */
1790 ld r4,VCPU_GPR(R4)(r9)
1793 beq hcall_real_fallback
1794 ld r4,HSTATE_KVM_VCPU(r13)
1795 std r3,VCPU_GPR(R3)(r4)
1803 li r10, BOOK3S_INTERRUPT_SYSCALL
1804 bl kvmppc_msr_interrupt
1808 /* We've attempted a real mode hcall, but it's punted it back
1809 * to userspace. We need to restore some clobbered volatiles
1810 * before resuming the pass-it-to-qemu path */
1811 hcall_real_fallback:
1812 li r12,BOOK3S_INTERRUPT_SYSCALL
1813 ld r9, HSTATE_KVM_VCPU(r13)
1817 .globl hcall_real_table
1819 .long 0 /* 0 - unused */
1820 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1821 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1822 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1823 .long 0 /* 0x10 - H_CLEAR_MOD */
1824 .long 0 /* 0x14 - H_CLEAR_REF */
1825 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1826 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1827 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1828 .long 0 /* 0x24 - H_SET_SPRG0 */
1829 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1844 #ifdef CONFIG_KVM_XICS
1845 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1846 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1847 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1848 .long 0 /* 0x70 - H_IPOLL */
1849 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1851 .long 0 /* 0x64 - H_EOI */
1852 .long 0 /* 0x68 - H_CPPR */
1853 .long 0 /* 0x6c - H_IPI */
1854 .long 0 /* 0x70 - H_IPOLL */
1855 .long 0 /* 0x74 - H_XIRR */
1883 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1884 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1900 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1904 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2019 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2020 .globl hcall_real_table_end
2021 hcall_real_table_end:
2023 _GLOBAL(kvmppc_h_set_xdabr)
2024 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2026 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2029 6: li r3, H_PARAMETER
2032 _GLOBAL(kvmppc_h_set_dabr)
2033 li r5, DABRX_USER | DABRX_KERNEL
2037 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2038 std r4,VCPU_DABR(r3)
2039 stw r5, VCPU_DABRX(r3)
2040 mtspr SPRN_DABRX, r5
2041 /* Work around P7 bug where DABR can get corrupted on mtspr */
2042 1: mtspr SPRN_DABR,r4
2050 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2051 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2052 rlwimi r5, r4, 1, DAWRX_WT
2054 std r4, VCPU_DAWR(r3)
2055 std r5, VCPU_DAWRX(r3)
2057 mtspr SPRN_DAWRX, r5
2061 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2063 std r11,VCPU_MSR(r3)
2065 stb r0,VCPU_CEDED(r3)
2066 sync /* order setting ceded vs. testing prodded */
2067 lbz r5,VCPU_PRODDED(r3)
2069 bne kvm_cede_prodded
2070 li r0,0 /* set trap to 0 to say hcall is handled */
2071 stw r0,VCPU_TRAP(r3)
2073 std r0,VCPU_GPR(R3)(r3)
2076 * Set our bit in the bitmask of napping threads unless all the
2077 * other threads are already napping, in which case we send this
2080 ld r5,HSTATE_KVM_VCORE(r13)
2081 lbz r6,HSTATE_PTID(r13)
2082 lwz r8,VCORE_ENTRY_EXIT(r5)
2086 addi r6,r5,VCORE_NAPPING_THREADS
2094 /* order napping_threads update vs testing entry_exit_count */
2097 stb r0,HSTATE_NAPPING(r13)
2098 lwz r7,VCORE_ENTRY_EXIT(r5)
2100 bge 33f /* another thread already exiting */
2103 * Although not specifically required by the architecture, POWER7
2104 * preserves the following registers in nap mode, even if an SMT mode
2105 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2106 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2108 /* Save non-volatile GPRs */
2109 std r14, VCPU_GPR(R14)(r3)
2110 std r15, VCPU_GPR(R15)(r3)
2111 std r16, VCPU_GPR(R16)(r3)
2112 std r17, VCPU_GPR(R17)(r3)
2113 std r18, VCPU_GPR(R18)(r3)
2114 std r19, VCPU_GPR(R19)(r3)
2115 std r20, VCPU_GPR(R20)(r3)
2116 std r21, VCPU_GPR(R21)(r3)
2117 std r22, VCPU_GPR(R22)(r3)
2118 std r23, VCPU_GPR(R23)(r3)
2119 std r24, VCPU_GPR(R24)(r3)
2120 std r25, VCPU_GPR(R25)(r3)
2121 std r26, VCPU_GPR(R26)(r3)
2122 std r27, VCPU_GPR(R27)(r3)
2123 std r28, VCPU_GPR(R28)(r3)
2124 std r29, VCPU_GPR(R29)(r3)
2125 std r30, VCPU_GPR(R30)(r3)
2126 std r31, VCPU_GPR(R31)(r3)
2131 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2132 ld r4, HSTATE_KVM_VCPU(r13)
2133 addi r3, r4, VCPU_TB_CEDE
2134 bl kvmhv_accumulate_time
2138 * Take a nap until a decrementer or external or doobell interrupt
2139 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
2140 * runlatch bit before napping.
2143 mfspr r0, SPRN_CTRLF
2145 mtspr SPRN_CTRLT, r0
2148 stb r0,HSTATE_HWTHREAD_REQ(r13)
2150 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2152 oris r5,r5,LPCR_PECEDP@h
2153 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2157 std r0, HSTATE_SCRATCH0(r13)
2159 ld r0, HSTATE_SCRATCH0(r13)
2171 /* get vcpu pointer */
2172 ld r4, HSTATE_KVM_VCPU(r13)
2174 /* Woken by external or decrementer interrupt */
2175 ld r1, HSTATE_HOST_R1(r13)
2177 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2178 addi r3, r4, VCPU_TB_RMINTR
2179 bl kvmhv_accumulate_time
2182 /* load up FP state */
2186 ld r14, VCPU_GPR(R14)(r4)
2187 ld r15, VCPU_GPR(R15)(r4)
2188 ld r16, VCPU_GPR(R16)(r4)
2189 ld r17, VCPU_GPR(R17)(r4)
2190 ld r18, VCPU_GPR(R18)(r4)
2191 ld r19, VCPU_GPR(R19)(r4)
2192 ld r20, VCPU_GPR(R20)(r4)
2193 ld r21, VCPU_GPR(R21)(r4)
2194 ld r22, VCPU_GPR(R22)(r4)
2195 ld r23, VCPU_GPR(R23)(r4)
2196 ld r24, VCPU_GPR(R24)(r4)
2197 ld r25, VCPU_GPR(R25)(r4)
2198 ld r26, VCPU_GPR(R26)(r4)
2199 ld r27, VCPU_GPR(R27)(r4)
2200 ld r28, VCPU_GPR(R28)(r4)
2201 ld r29, VCPU_GPR(R29)(r4)
2202 ld r30, VCPU_GPR(R30)(r4)
2203 ld r31, VCPU_GPR(R31)(r4)
2205 /* Check the wake reason in SRR1 to see why we got here */
2206 bl kvmppc_check_wake_reason
2208 /* clear our bit in vcore->napping_threads */
2209 34: ld r5,HSTATE_KVM_VCORE(r13)
2210 lbz r7,HSTATE_PTID(r13)
2213 addi r6,r5,VCORE_NAPPING_THREADS
2219 stb r0,HSTATE_NAPPING(r13)
2221 /* See if the wake reason means we need to exit */
2222 stw r12, VCPU_TRAP(r4)
2227 /* see if any other thread is already exiting */
2228 lwz r0,VCORE_ENTRY_EXIT(r5)
2232 b kvmppc_cede_reentry /* if not go back to guest */
2234 /* cede when already previously prodded case */
2237 stb r0,VCPU_PRODDED(r3)
2238 sync /* order testing prodded vs. clearing ceded */
2239 stb r0,VCPU_CEDED(r3)
2243 /* we've ceded but we want to give control to the host */
2245 b hcall_real_fallback
2247 /* Try to handle a machine check in real mode */
2248 machine_check_realmode:
2249 mr r3, r9 /* get vcpu pointer */
2250 bl kvmppc_realmode_machine_check
2252 cmpdi r3, 0 /* Did we handle MCE ? */
2253 ld r9, HSTATE_KVM_VCPU(r13)
2254 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2256 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2257 * machine check interrupt (set HSRR0 to 0x200). And for handled
2258 * errors (no-fatal), just go back to guest execution with current
2259 * HSRR0 instead of exiting guest. This new approach will inject
2260 * machine check to guest for fatal error causing guest to crash.
2262 * The old code used to return to host for unhandled errors which
2263 * was causing guest to hang with soft lockups inside guest and
2264 * makes it difficult to recover guest instance.
2267 ld r11, VCPU_MSR(r9)
2268 bne 2f /* Continue guest execution. */
2269 /* If not, deliver a machine check. SRR0/1 are already set */
2270 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2271 ld r11, VCPU_MSR(r9)
2272 bl kvmppc_msr_interrupt
2273 2: b fast_interrupt_c_return
2276 * Check the reason we woke from nap, and take appropriate action.
2278 * 0 if nothing needs to be done
2279 * 1 if something happened that needs to be handled by the host
2280 * -1 if there was a guest wakeup (IPI)
2282 * Also sets r12 to the interrupt vector for any interrupt that needs
2283 * to be handled now by the host (0x500 for external interrupt), or zero.
2284 * Modifies r0, r6, r7, r8.
2286 kvmppc_check_wake_reason:
2289 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2291 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2292 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2293 cmpwi r6, 8 /* was it an external interrupt? */
2294 li r12, BOOK3S_INTERRUPT_EXTERNAL
2295 beq kvmppc_read_intr /* if so, see what it was */
2298 cmpwi r6, 6 /* was it the decrementer? */
2301 cmpwi r6, 5 /* privileged doorbell? */
2303 cmpwi r6, 3 /* hypervisor doorbell? */
2305 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2306 li r3, 1 /* anything else, return 1 */
2309 /* hypervisor doorbell */
2310 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2315 * Determine what sort of external interrupt is pending (if any).
2317 * 0 if no interrupt is pending
2318 * 1 if an interrupt is pending that needs to be handled by the host
2319 * -1 if there was a guest wakeup IPI (which has now been cleared)
2320 * Modifies r0, r6, r7, r8, returns value in r3.
2323 /* see if a host IPI is pending */
2325 lbz r0, HSTATE_HOST_IPI(r13)
2329 /* Now read the interrupt from the ICP */
2330 ld r6, HSTATE_XICS_PHYS(r13)
2336 * Save XIRR for later. Since we get in in reverse endian on LE
2337 * systems, save it byte reversed and fetch it back in host endian.
2339 li r3, HSTATE_SAVED_XIRR
2341 #ifdef __LITTLE_ENDIAN__
2342 lwz r3, HSTATE_SAVED_XIRR(r13)
2346 rlwinm. r3, r3, 0, 0xffffff
2348 beq 1f /* if nothing pending in the ICP */
2350 /* We found something in the ICP...
2352 * If it's not an IPI, stash it in the PACA and return to
2353 * the host, we don't (yet) handle directing real external
2354 * interrupts directly to the guest
2356 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2359 /* It's an IPI, clear the MFRR and EOI it */
2362 stbcix r3, r6, r8 /* clear the IPI */
2363 stwcix r0, r6, r7 /* EOI it */
2366 /* We need to re-check host IPI now in case it got set in the
2367 * meantime. If it's clear, we bounce the interrupt to the
2370 lbz r0, HSTATE_HOST_IPI(r13)
2374 /* OK, it's an IPI for us */
2378 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2379 * the PACA earlier, it will be picked up by the host ICP driver
2384 43: /* We raced with the host, we need to resend that IPI, bummer */
2386 stbcix r0, r6, r8 /* set the IPI */
2392 * Save away FP, VMX and VSX registers.
2394 * N.B. r30 and r31 are volatile across this function,
2395 * thus it is not callable from C.
2402 #ifdef CONFIG_ALTIVEC
2404 oris r8,r8,MSR_VEC@h
2405 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2409 oris r8,r8,MSR_VSX@h
2410 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2413 addi r3,r3,VCPU_FPRS
2415 #ifdef CONFIG_ALTIVEC
2417 addi r3,r31,VCPU_VRS
2419 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2421 mfspr r6,SPRN_VRSAVE
2422 stw r6,VCPU_VRSAVE(r31)
2427 * Load up FP, VMX and VSX registers
2429 * N.B. r30 and r31 are volatile across this function,
2430 * thus it is not callable from C.
2437 #ifdef CONFIG_ALTIVEC
2439 oris r8,r8,MSR_VEC@h
2440 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2444 oris r8,r8,MSR_VSX@h
2445 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2448 addi r3,r4,VCPU_FPRS
2450 #ifdef CONFIG_ALTIVEC
2452 addi r3,r31,VCPU_VRS
2454 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2456 lwz r7,VCPU_VRSAVE(r31)
2457 mtspr SPRN_VRSAVE,r7
2463 * We come here if we get any exception or interrupt while we are
2464 * executing host real mode code while in guest MMU context.
2465 * For now just spin, but we should do something better.
2467 kvmppc_bad_host_intr:
2471 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2472 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2473 * r11 has the guest MSR value (in/out)
2474 * r9 has a vcpu pointer (in)
2475 * r0 is used as a scratch register
2477 kvmppc_msr_interrupt:
2478 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2479 cmpwi r0, 2 /* Check if we are in transactional state.. */
2480 ld r11, VCPU_INTR_MSR(r9)
2482 /* ... if transactional, change to suspended */
2484 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2488 * This works around a hardware bug on POWER8E processors, where
2489 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2490 * performance monitor interrupt. Instead, when we need to have
2491 * an interrupt pending, we have to arrange for a counter to overflow.
2495 mtspr SPRN_MMCR2, r3
2496 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2497 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2498 mtspr SPRN_MMCR0, r3
2505 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2507 * Start timing an activity
2508 * r3 = pointer to time accumulation struct, r4 = vcpu
2511 ld r5, HSTATE_KVM_VCORE(r13)
2512 lbz r6, VCORE_IN_GUEST(r5)
2514 beq 5f /* if in guest, need to */
2515 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2518 std r3, VCPU_CUR_ACTIVITY(r4)
2519 std r5, VCPU_ACTIVITY_START(r4)
2523 * Accumulate time to one activity and start another.
2524 * r3 = pointer to new time accumulation struct, r4 = vcpu
2526 kvmhv_accumulate_time:
2527 ld r5, HSTATE_KVM_VCORE(r13)
2528 lbz r8, VCORE_IN_GUEST(r5)
2530 beq 4f /* if in guest, need to */
2531 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2532 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2533 ld r6, VCPU_ACTIVITY_START(r4)
2534 std r3, VCPU_CUR_ACTIVITY(r4)
2537 std r7, VCPU_ACTIVITY_START(r4)
2541 ld r8, TAS_SEQCOUNT(r5)
2544 std r8, TAS_SEQCOUNT(r5)
2546 ld r7, TAS_TOTAL(r5)
2548 std r7, TAS_TOTAL(r5)
2554 3: std r3, TAS_MIN(r5)
2560 std r8, TAS_SEQCOUNT(r5)