]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
x86/fpu: Remove 'struct task_struct' usage from drop_fpu()
authorIngo Molnar <mingo@kernel.org>
Thu, 23 Apr 2015 10:33:50 +0000 (12:33 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:25 +0000 (15:47 +0200)
Migrate this function to pure 'struct fpu' usage.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/fpu-internal.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xsave.c
arch/x86/kernel/process.c

index 9311126571ab8c84fc420a7c1195520591dbe7f9..e8f7134f0ffbd4adb73d98f337bd68e48308572a 100644 (file)
@@ -358,14 +358,13 @@ static inline void __thread_fpu_begin(struct fpu *fpu)
        __thread_set_has_fpu(fpu);
 }
 
-static inline void drop_fpu(struct task_struct *tsk)
+static inline void drop_fpu(struct fpu *fpu)
 {
-       struct fpu *fpu = &tsk->thread.fpu;
        /*
         * Forget coprocessor state..
         */
        preempt_disable();
-       tsk->thread.fpu.counter = 0;
+       fpu->counter = 0;
 
        if (fpu->has_fpu) {
                /* Ignore delayed exceptions from user space */
@@ -394,8 +393,10 @@ static inline void restore_init_xstate(void)
  */
 static inline void fpu_reset_state(struct task_struct *tsk)
 {
+       struct fpu *fpu = &tsk->thread.fpu;
+
        if (!use_eager_fpu())
-               drop_fpu(tsk);
+               drop_fpu(fpu);
        else
                restore_init_xstate();
 }
index 9e7f9e7b2cca58539b503fedf8e15d974a35e08b..ba539fc018d749f3f9127ca911461643e8a182ce 100644 (file)
@@ -389,7 +389,7 @@ void fpu__flush_thread(struct task_struct *tsk)
 
        if (!use_eager_fpu()) {
                /* FPU state will be reallocated lazily at the first use. */
-               drop_fpu(tsk);
+               drop_fpu(fpu);
                fpstate_free(&tsk->thread.fpu);
        } else {
                if (!fpu->fpstate_active) {
index dc346e19c0df2543b8c8885928ac59e995e561d6..049dc619481de42c46c3c5205acbd12f225b4d73 100644 (file)
@@ -392,7 +392,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
                 * We will be ready to restore/save the state only after
                 * fpu->fpstate_active is again set.
                 */
-               drop_fpu(tsk);
+               drop_fpu(fpu);
 
                if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
                    __copy_from_user(&env, buf, sizeof(env))) {
index bb7d4abcdad6eb5b9dd922d1ad0c06445590287b..50d503a2d8c39e3b5afb2bd57ec00a3153107502 100644 (file)
@@ -104,6 +104,7 @@ void exit_thread(void)
        struct task_struct *me = current;
        struct thread_struct *t = &me->thread;
        unsigned long *bp = t->io_bitmap_ptr;
+       struct fpu *fpu = &t->fpu;
 
        if (bp) {
                struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
@@ -119,7 +120,7 @@ void exit_thread(void)
                kfree(bp);
        }
 
-       drop_fpu(me);
+       drop_fpu(fpu);
 }
 
 void flush_thread(void)