2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Generation of main entry point for the guest, exception handling.
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 * Copyright (C) 2016 Imagination Technologies Ltd.
14 #include <linux/kvm_host.h>
16 #include <asm/setup.h>
27 #if _MIPS_SIM == _MIPS_SIM_ABI32
32 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
34 #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
39 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
50 /* Some CP0 registers */
51 #define C0_HWRENA 7, 0
52 #define C0_BADVADDR 8, 0
53 #define C0_ENTRYHI 10, 0
54 #define C0_STATUS 12, 0
55 #define C0_CAUSE 13, 0
57 #define C0_EBASE 15, 1
58 #define C0_CONFIG3 16, 3
59 #define C0_CONFIG5 16, 5
60 #define C0_DDATA_LO 28, 3
61 #define C0_ERROREPC 30, 0
63 #define CALLFRAME_SIZ 32
74 UASM_L_LA(_return_to_host)
75 UASM_L_LA(_kernel_asid)
77 static void *kvm_mips_build_enter_guest(void *addr);
78 static void *kvm_mips_build_ret_from_exit(void *addr);
79 static void *kvm_mips_build_ret_to_guest(void *addr);
80 static void *kvm_mips_build_ret_to_host(void *addr);
83 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
84 * @addr: Address to start writing code.
86 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
87 * conforms to the following prototype:
89 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
91 * The exit from the guest and return to the caller is handled by the code
92 * generated by kvm_mips_build_ret_to_host().
94 * Returns: Next address after end of written function.
96 void *kvm_mips_build_vcpu_run(void *addr)
106 /* k0/k1 not being used in host kernel context */
107 uasm_i_addiu(&p, K1, SP, -(int)sizeof(struct pt_regs));
108 for (i = 16; i < 32; ++i) {
111 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
116 UASM_i_SW(&p, V0, offsetof(struct pt_regs, lo), K1);
118 UASM_i_SW(&p, V1, offsetof(struct pt_regs, hi), K1);
120 /* Save host status */
121 uasm_i_mfc0(&p, V0, C0_STATUS);
122 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
124 /* Save DDATA_LO, will be used to store pointer to vcpu */
125 uasm_i_mfc0(&p, V1, C0_DDATA_LO);
126 UASM_i_SW(&p, V1, offsetof(struct pt_regs, cp0_epc), K1);
128 /* DDATA_LO has pointer to vcpu */
129 uasm_i_mtc0(&p, A1, C0_DDATA_LO);
131 /* Offset into vcpu->arch */
132 uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
135 * Save the host stack to VCPU, used for exception processing
136 * when we exit from the Guest
138 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
140 /* Save the kernel gp as well */
141 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
144 * Setup status register for running the guest in UM, interrupts
147 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV);
148 uasm_i_mtc0(&p, K0, C0_STATUS);
151 /* load up the new EBASE */
152 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
153 uasm_i_mtc0(&p, K0, C0_EBASE);
156 * Now that the new EBASE has been loaded, unset BEV, set
157 * interrupt mask as it was but make sure that timer interrupts
160 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE);
161 uasm_i_andi(&p, V0, V0, ST0_IM);
162 uasm_i_or(&p, K0, K0, V0);
163 uasm_i_mtc0(&p, K0, C0_STATUS);
166 p = kvm_mips_build_enter_guest(p);
172 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
173 * @addr: Address to start writing code.
175 * Assemble the code to resume guest execution. This code is common between the
176 * initial entry into the guest from the host, and returning from the exit
177 * handler back to the guest.
179 * Returns: Next address after end of written function.
181 static void *kvm_mips_build_enter_guest(void *addr)
185 struct uasm_label labels[2];
186 struct uasm_reloc relocs[2];
187 struct uasm_label *l = labels;
188 struct uasm_reloc *r = relocs;
190 memset(labels, 0, sizeof(labels));
191 memset(relocs, 0, sizeof(relocs));
194 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
195 uasm_i_mtc0(&p, T0, C0_EPC);
197 /* Set the ASID for the Guest Kernel */
198 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
199 UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
201 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
202 uasm_i_xori(&p, T0, T0, KSU_USER);
203 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
204 uasm_i_addiu(&p, T1, K1,
205 offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
207 uasm_i_addiu(&p, T1, K1,
208 offsetof(struct kvm_vcpu_arch, guest_user_asid));
209 uasm_l_kernel_asid(&l, p);
211 /* t1: contains the base of the ASID array, need to get the cpu id */
212 /* smp_processor_id */
213 UASM_i_LW(&p, T2, offsetof(struct thread_info, cpu), GP);
215 uasm_i_sll(&p, T2, T2, 2);
216 UASM_i_ADDU(&p, T3, T1, T2);
217 UASM_i_LW(&p, K0, 0, T3);
218 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
219 /* x sizeof(struct cpuinfo_mips)/4 */
220 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
221 uasm_i_mul(&p, T2, T2, T3);
223 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
224 UASM_i_ADDU(&p, AT, AT, T2);
225 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
226 uasm_i_and(&p, K0, K0, T2);
228 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
230 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
233 /* Disable RDHWR access */
234 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
236 /* load the guest context from VCPU and return */
237 for (i = 1; i < 32; ++i) {
238 /* Guest k0/k1 loaded later */
239 if (i == K0 || i == K1)
241 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
245 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
248 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
251 /* Restore the guest's k0/k1 registers */
252 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
253 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
258 uasm_resolve_relocs(relocs, labels);
264 * kvm_mips_build_exception() - Assemble first level guest exception handler.
265 * @addr: Address to start writing code.
267 * Assemble exception vector code for guest execution. The generated vector will
268 * jump to the common exception handler generated by kvm_mips_build_exit().
270 * Returns: Next address after end of written function.
272 void *kvm_mips_build_exception(void *addr)
277 uasm_i_mtc0(&p, K0, C0_ERROREPC);
281 uasm_i_mfc0(&p, K0, C0_EBASE);
282 /* Get rid of CPUNum */
283 uasm_i_srl(&p, K0, K0, 10);
284 uasm_i_sll(&p, K0, K0, 10);
285 /* Save k1 @ offset 0x3000 */
286 UASM_i_SW(&p, K1, 0x3000, K0);
288 /* Exception handler is installed @ offset 0x2000 */
289 uasm_i_addiu(&p, K0, K0, 0x2000);
290 /* Jump to the function */
298 * kvm_mips_build_exit() - Assemble common guest exit handler.
299 * @addr: Address to start writing code.
301 * Assemble the generic guest exit handling code. This is called by the
302 * exception vectors (generated by kvm_mips_build_exception()), and calls
303 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
304 * depending on the return value.
306 * Returns: Next address after end of written function.
308 void *kvm_mips_build_exit(void *addr)
312 struct uasm_label labels[3];
313 struct uasm_reloc relocs[3];
314 struct uasm_label *l = labels;
315 struct uasm_reloc *r = relocs;
317 memset(labels, 0, sizeof(labels));
318 memset(relocs, 0, sizeof(relocs));
321 * Generic Guest exception handler. We end up here when the guest
322 * does something that causes a trap to kernel mode.
325 /* Get the VCPU pointer from DDATA_LO */
326 uasm_i_mfc0(&p, K1, C0_DDATA_LO);
327 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
329 /* Start saving Guest context to VCPU */
330 for (i = 0; i < 32; ++i) {
331 /* Guest k0/k1 saved later */
332 if (i == K0 || i == K1)
334 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
337 /* We need to save hi/lo and restore them on the way out */
339 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
342 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
344 /* Finally save guest k0/k1 to VCPU */
345 uasm_i_mfc0(&p, T0, C0_ERROREPC);
346 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
348 /* Get GUEST k1 and save it in VCPU */
349 uasm_i_addiu(&p, T1, ZERO, ~0x2ff);
350 uasm_i_mfc0(&p, T0, C0_EBASE);
351 uasm_i_and(&p, T0, T0, T1);
352 UASM_i_LW(&p, T0, 0x3000, T0);
353 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
355 /* Now that context has been saved, we can use other registers */
358 uasm_i_mfc0(&p, A1, C0_DDATA_LO);
359 uasm_i_move(&p, S1, A1);
361 /* Restore run (vcpu->run) */
362 UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
363 /* Save pointer to run in s0, will be saved by the compiler */
364 uasm_i_move(&p, S0, A0);
367 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
370 uasm_i_mfc0(&p, K0, C0_EPC);
371 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
373 uasm_i_mfc0(&p, K0, C0_BADVADDR);
374 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
377 uasm_i_mfc0(&p, K0, C0_CAUSE);
378 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
380 /* Now restore the host state just enough to run the handlers */
382 /* Switch EBASE to the one used by Linux */
383 /* load up the host EBASE */
384 uasm_i_mfc0(&p, V0, C0_STATUS);
386 uasm_i_lui(&p, AT, ST0_BEV >> 16);
387 uasm_i_or(&p, K0, V0, AT);
389 uasm_i_mtc0(&p, K0, C0_STATUS);
392 UASM_i_LA_mostly(&p, K0, (long)&ebase);
393 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
394 uasm_i_mtc0(&p, K0, C0_EBASE);
396 if (raw_cpu_has_fpu) {
398 * If FPU is enabled, save FCR31 and clear it so that later
399 * ctc1's don't trigger FPE for pending exceptions.
401 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
402 uasm_i_and(&p, V1, V0, AT);
403 uasm_il_beqz(&p, &r, V1, label_fpu_1);
405 uasm_i_cfc1(&p, T0, 31);
406 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
408 uasm_i_ctc1(&p, ZERO, 31);
412 #ifdef CONFIG_CPU_HAS_MSA
414 * If MSA is enabled, save MSACSR and clear it so that later
415 * instructions don't trigger MSAFPE for pending exceptions.
417 uasm_i_mfc0(&p, T0, C0_CONFIG3);
418 uasm_i_ext(&p, T0, T0, 28, 1); /* MIPS_CONF3_MSAP */
419 uasm_il_beqz(&p, &r, T0, label_msa_1);
421 uasm_i_mfc0(&p, T0, C0_CONFIG5);
422 uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
423 uasm_il_beqz(&p, &r, T0, label_msa_1);
425 uasm_i_cfcmsa(&p, T0, MSA_CSR);
426 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
428 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
432 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
433 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
434 uasm_i_and(&p, V0, V0, AT);
435 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
436 uasm_i_or(&p, V0, V0, AT);
437 uasm_i_mtc0(&p, V0, C0_STATUS);
440 /* Load up host GP */
441 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
443 /* Need a stack before we can jump to "C" */
444 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
446 /* Saved host state */
447 uasm_i_addiu(&p, SP, SP, -(int)sizeof(struct pt_regs));
450 * XXXKYMA do we need to load the host ASID, maybe not because the
451 * kernel entries are marked GLOBAL, need to verify
454 /* Restore host DDATA_LO */
455 UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), SP);
456 uasm_i_mtc0(&p, K0, C0_DDATA_LO);
458 /* Restore RDHWR access */
459 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
460 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
461 uasm_i_mtc0(&p, K0, C0_HWRENA);
463 /* Jump to handler */
465 * XXXKYMA: not sure if this is safe, how large is the stack??
466 * Now jump to the kvm_mips_handle_exit() to see if we can deal
467 * with this in the kernel
469 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
470 uasm_i_jalr(&p, RA, T9);
471 uasm_i_addiu(&p, SP, SP, -CALLFRAME_SIZ);
473 uasm_resolve_relocs(relocs, labels);
475 p = kvm_mips_build_ret_from_exit(p);
481 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
482 * @addr: Address to start writing code.
484 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
485 * resuming the guest or returning to the host depending on the return value.
487 * Returns: Next address after end of written function.
489 static void *kvm_mips_build_ret_from_exit(void *addr)
492 struct uasm_label labels[2];
493 struct uasm_reloc relocs[2];
494 struct uasm_label *l = labels;
495 struct uasm_reloc *r = relocs;
497 memset(labels, 0, sizeof(labels));
498 memset(relocs, 0, sizeof(relocs));
500 /* Return from handler Make sure interrupts are disabled */
505 * XXXKYMA: k0/k1 could have been blown away if we processed
506 * an exception while we were handling the exception from the
510 uasm_i_move(&p, K1, S1);
511 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
514 * Check return value, should tell us if we are returning to the
515 * host (handle I/O etc)or resuming the guest
517 uasm_i_andi(&p, T0, V0, RESUME_HOST);
518 uasm_il_bnez(&p, &r, T0, label_return_to_host);
521 p = kvm_mips_build_ret_to_guest(p);
523 uasm_l_return_to_host(&l, p);
524 p = kvm_mips_build_ret_to_host(p);
526 uasm_resolve_relocs(relocs, labels);
532 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
533 * @addr: Address to start writing code.
535 * Assemble the code to handle return from the guest exit handler
536 * (kvm_mips_handle_exit()) back to the guest.
538 * Returns: Next address after end of written function.
540 static void *kvm_mips_build_ret_to_guest(void *addr)
544 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
545 uasm_i_mtc0(&p, S1, C0_DDATA_LO);
547 /* Load up the Guest EBASE to minimize the window where BEV is set */
548 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
550 /* Switch EBASE back to the one used by KVM */
551 uasm_i_mfc0(&p, V1, C0_STATUS);
552 uasm_i_lui(&p, AT, ST0_BEV >> 16);
553 uasm_i_or(&p, K0, V1, AT);
554 uasm_i_mtc0(&p, K0, C0_STATUS);
556 uasm_i_mtc0(&p, T0, C0_EBASE);
558 /* Setup status register for running guest in UM */
559 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
560 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
561 uasm_i_and(&p, V1, V1, AT);
562 uasm_i_mtc0(&p, V1, C0_STATUS);
565 p = kvm_mips_build_enter_guest(p);
571 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
572 * @addr: Address to start writing code.
574 * Assemble the code to handle return from the guest exit handler
575 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
576 * function generated by kvm_mips_build_vcpu_run().
578 * Returns: Next address after end of written function.
580 static void *kvm_mips_build_ret_to_host(void *addr)
585 /* EBASE is already pointing to Linux */
586 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
587 uasm_i_addiu(&p, K1, K1, -(int)sizeof(struct pt_regs));
589 /* Restore host DDATA_LO */
590 UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), K1);
591 uasm_i_mtc0(&p, K0, C0_DDATA_LO);
594 * r2/v0 is the return code, shift it down by 2 (arithmetic)
595 * to recover the err code
597 uasm_i_sra(&p, K0, V0, 2);
598 uasm_i_move(&p, V0, K0);
600 /* Load context saved on the host stack */
601 for (i = 16; i < 31; ++i) {
604 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
607 UASM_i_LW(&p, K0, offsetof(struct pt_regs, hi), K1);
610 UASM_i_LW(&p, K0, offsetof(struct pt_regs, lo), K1);
613 /* Restore RDHWR access */
614 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
615 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
616 uasm_i_mtc0(&p, K0, C0_HWRENA);
618 /* Restore RA, which is the address we will return to */
619 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);