]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
powerpc: Don't corrupt user registers on 32-bit
authorPaul Mackerras <paulus@samba.org>
Wed, 23 Oct 2013 08:40:02 +0000 (09:40 +0100)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 23 Oct 2013 11:34:19 +0000 (22:34 +1100)
Commit de79f7b9f6 ("powerpc: Put FP/VSX and VR state into structures")
modified load_up_fpu() and load_up_altivec() in such a way that they
now use r7 and r8.  Unfortunately, the callers of these functions on
32-bit machines then return to userspace via fast_exception_return,
which doesn't restore all of the volatile GPRs, but only r1, r3 -- r6
and r9 -- r12.  This was causing userspace segfaults and other
userspace misbehaviour on 32-bit machines.

This fixes the problem by changing the register usage of load_up_fpu()
and load_up_altivec() to avoid using r7 and r8 and instead use r6 and
r10.  This also adds comments to those functions saying which registers
may be used.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Tested-by: Scott Wood <scottwood@freescale.com> (on e500mc, so no altivec)
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/vector.S

index 4dca05e91e953e85b495722ca1a44633ca917988..f7f5b8bed68f59500964f75ff9f7d9b2aa211d0c 100644 (file)
@@ -106,6 +106,8 @@ _GLOBAL(store_fp_state)
  * and save its floating-point registers in its thread_struct.
  * Load up this task's FP registers from its thread_struct,
  * enable the FPU for the current task and return to the task.
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
  */
 _GLOBAL(load_up_fpu)
        mfmsr   r5
@@ -131,10 +133,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        beq     1f
        toreal(r4)
        addi    r4,r4,THREAD            /* want last_task_used_math->thread */
-       addi    r8,r4,THREAD_FPSTATE
-       SAVE_32FPVSRS(0, R5, R8)
+       addi    r10,r4,THREAD_FPSTATE
+       SAVE_32FPVSRS(0, R5, R10)
        mffs    fr0
-       stfd    fr0,FPSTATE_FPSCR(r8)
+       stfd    fr0,FPSTATE_FPSCR(r10)
        PPC_LL  r5,PT_REGS(r4)
        toreal(r5)
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -157,10 +159,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        or      r12,r12,r4
        std     r12,_MSR(r1)
 #endif
-       addi    r7,r5,THREAD_FPSTATE
-       lfd     fr0,FPSTATE_FPSCR(r7)
+       addi    r10,r5,THREAD_FPSTATE
+       lfd     fr0,FPSTATE_FPSCR(r10)
        MTFSF_L(fr0)
-       REST_32FPVSRS(0, R4, R7)
+       REST_32FPVSRS(0, R4, R10)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
        fromreal(r4)
index eacda4eea2d70af507771df52dfa37bd20c73f55..0458a9aaba9d135867d6a4263404a8264148a7cd 100644 (file)
@@ -64,6 +64,9 @@ _GLOBAL(store_vr_state)
  * Enables the VMX for use in the kernel on return.
  * On SMP we know the VMX is free, since we give it up every
  * switch (ie, no lazy save of the vector registers).
+ *
+ * Note that on 32-bit this can only use registers that will be
+ * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
  */
 _GLOBAL(load_up_altivec)
        mfmsr   r5                      /* grab the current MSR */
@@ -89,11 +92,11 @@ _GLOBAL(load_up_altivec)
        /* Save VMX state to last_task_used_altivec's THREAD struct */
        toreal(r4)
        addi    r4,r4,THREAD
-       addi    r7,r4,THREAD_VRSTATE
-       SAVE_32VRS(0,r5,r7)
+       addi    r6,r4,THREAD_VRSTATE
+       SAVE_32VRS(0,r5,r6)
        mfvscr  vr0
        li      r10,VRSTATE_VSCR
-       stvx    vr0,r10,r7
+       stvx    vr0,r10,r6
        /* Disable VMX for last_task_used_altivec */
        PPC_LL  r5,PT_REGS(r4)
        toreal(r5)
@@ -125,13 +128,13 @@ _GLOBAL(load_up_altivec)
        oris    r12,r12,MSR_VEC@h
        std     r12,_MSR(r1)
 #endif
-       addi    r7,r5,THREAD_VRSTATE
+       addi    r6,r5,THREAD_VRSTATE
        li      r4,1
        li      r10,VRSTATE_VSCR
        stw     r4,THREAD_USED_VR(r5)
-       lvx     vr0,r10,r7
+       lvx     vr0,r10,r6
        mtvscr  vr0
-       REST_32VRS(0,r4,r7)
+       REST_32VRS(0,r4,r6)
 #ifndef CONFIG_SMP
        /* Update last_task_used_altivec to 'current' */
        subi    r4,r5,THREAD            /* Back to 'current' */