]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'tile/master'
authorStephen Rothwell <sfr@canb.auug.org.au>
Wed, 4 Nov 2015 23:50:19 +0000 (10:50 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 4 Nov 2015 23:50:19 +0000 (10:50 +1100)
15 files changed:
arch/tile/Kconfig
arch/tile/include/asm/insn.h [new file with mode: 0644]
arch/tile/include/asm/jump_label.h [new file with mode: 0644]
arch/tile/include/asm/page.h
arch/tile/include/asm/processor.h
arch/tile/include/asm/thread_info.h
arch/tile/kernel/Makefile
arch/tile/kernel/ftrace.c
arch/tile/kernel/intvec_32.S
arch/tile/kernel/intvec_64.S
arch/tile/kernel/jump_label.c [new file with mode: 0644]
arch/tile/kernel/kgdb.c
arch/tile/kernel/kprobes.c
arch/tile/kernel/process.c
arch/tile/kernel/time.c

index 106c21bd7f449d947094db5fdefce8a9a6e1b142..dff39e25f61d2cc9f1ed697c30ca7438985f572e 100644 (file)
@@ -33,6 +33,7 @@ config TILE
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_JUMP_LABEL
 
 # FIXME: investigate whether we need/want these options.
 #      select HAVE_IOREMAP_PROT
diff --git a/arch/tile/include/asm/insn.h b/arch/tile/include/asm/insn.h
new file mode 100644 (file)
index 0000000..f78ba5c
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2015 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+#ifndef __ASM_TILE_INSN_H
+#define __ASM_TILE_INSN_H
+
+#include <arch/opcode.h>
+
+static inline tilegx_bundle_bits NOP(void)
+{
+       return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
+               create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
+               create_Opcode_X0(RRR_0_OPCODE_X0) |
+               create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
+               create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
+               create_Opcode_X1(RRR_0_OPCODE_X1);
+}
+
+static inline tilegx_bundle_bits tilegx_gen_branch(unsigned long pc,
+                                           unsigned long addr,
+                                           bool link)
+{
+       tilegx_bundle_bits opcode_x0, opcode_x1;
+       long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
+
+       if (link) {
+               /* opcode: jal addr */
+               opcode_x1 =
+                       create_Opcode_X1(JUMP_OPCODE_X1) |
+                       create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
+                       create_JumpOff_X1(pcrel_by_instr);
+       } else {
+               /* opcode: j addr */
+               opcode_x1 =
+                       create_Opcode_X1(JUMP_OPCODE_X1) |
+                       create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
+                       create_JumpOff_X1(pcrel_by_instr);
+       }
+
+       /* opcode: fnop */
+       opcode_x0 =
+               create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
+               create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
+               create_Opcode_X0(RRR_0_OPCODE_X0);
+
+       return opcode_x1 | opcode_x0;
+}
+
+#endif /* __ASM_TILE_INSN_H */
diff --git a/arch/tile/include/asm/jump_label.h b/arch/tile/include/asm/jump_label.h
new file mode 100644 (file)
index 0000000..cde7573
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_JUMP_LABEL_H
+#define _ASM_TILE_JUMP_LABEL_H
+
+#include <arch/opcode.h>
+
+#define JUMP_LABEL_NOP_SIZE    TILE_BUNDLE_SIZE_IN_BYTES
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+                                              bool branch)
+{
+       asm_volatile_goto("1:\n\t"
+               "nop" "\n\t"
+               ".pushsection __jump_table,  \"aw\"\n\t"
+               ".quad 1b, %l[l_yes], %0 + %1 \n\t"
+               ".popsection\n\t"
+               : :  "i" (key), "i" (branch) : : l_yes);
+       return false;
+l_yes:
+       return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+                                                   bool branch)
+{
+       asm_volatile_goto("1:\n\t"
+               "j %l[l_yes]" "\n\t"
+               ".pushsection __jump_table,  \"aw\"\n\t"
+               ".quad 1b, %l[l_yes], %0 + %1 \n\t"
+               ".popsection\n\t"
+               : :  "i" (key), "i" (branch) : : l_yes);
+       return false;
+l_yes:
+       return true;
+}
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+       jump_label_t code;
+       jump_label_t target;
+       jump_label_t key;
+};
+
+#endif /* _ASM_TILE_JUMP_LABEL_H */
index a213a8d84a95ac48a149de807558290c21dbe2cb..5cee2cbff2b1b0fd5e094a0cd5a87ee520cc6895 100644 (file)
@@ -319,6 +319,16 @@ static inline int pfn_valid(unsigned long pfn)
 #define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
 #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
 
+/*
+ * The kernel text is mapped at MEM_SV_START as read-only.  To allow
+ * modifying kernel text, it is also mapped at PAGE_OFFSET as read-write.
+ * This macro converts a kernel address to its writable kernel text mapping,
+ * which is used to modify the text code on a running kernel by kgdb,
+ * ftrace, kprobe, jump label, etc.
+ */
+#define ktext_writable_addr(kaddr) \
+       ((unsigned long)(kaddr) - MEM_SV_START + PAGE_OFFSET)
+
 struct mm_struct;
 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
 extern pte_t *virt_to_kpte(unsigned long kaddr);
index 139dfdee013404dfd9e15263d01d431f40ab2332..0684e88aacd8ebb8a58a52a82f360bd4e1c77f59 100644 (file)
@@ -212,7 +212,7 @@ static inline void release_thread(struct task_struct *dead_task)
        /* Nothing for now */
 }
 
-extern int do_work_pending(struct pt_regs *regs, u32 flags);
+extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
 
 
 /*
index dc1fb28d9636271962aa61250439a2b966e25379..4b7cef9e94e05917476335cb8248de28a6f25696 100644 (file)
@@ -140,10 +140,14 @@ extern void _cpu_idle(void);
 #define _TIF_POLLING_NRFLAG    (1<<TIF_POLLING_NRFLAG)
 #define _TIF_NOHZ              (1<<TIF_NOHZ)
 
+/* Work to do as we loop to exit to user space. */
+#define _TIF_WORK_MASK \
+       (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+        _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)
+
 /* Work to do on any return to user space. */
 #define _TIF_ALLWORK_MASK \
-       (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | \
-        _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME | _TIF_NOHZ)
+       (_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
 
 /* Work to do at syscall entry. */
 #define _TIF_SYSCALL_ENTRY_WORK \
index 21f77bf68c690a438c8ca9540e0f9e7574872c17..09936d0bcb42a811a1da6b2d72b970e8a40277a7 100644 (file)
@@ -32,5 +32,6 @@ obj-$(CONFIG_TILE_HVGLUE_TRACE)       += hvglue_trace.o
 obj-$(CONFIG_FUNCTION_TRACER)  += ftrace.o mcount_64.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_KGDB)             += kgdb.o
+obj-$(CONFIG_JUMP_LABEL)       += jump_label.o
 
 obj-y                          += vdso/
index 0c0996175b1ed613cf6fe4d3e89cec85ea5c6a6f..4a572088b270bec26a0b10cce0453ab2d15e6c20 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/ftrace.h>
 #include <asm/sections.h>
+#include <asm/insn.h>
 
 #include <arch/opcode.h>
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
-static inline tilegx_bundle_bits NOP(void)
-{
-       return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
-               create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
-               create_Opcode_X0(RRR_0_OPCODE_X0) |
-               create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
-               create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
-               create_Opcode_X1(RRR_0_OPCODE_X1);
-}
-
 static int machine_stopped __read_mostly;
 
 int ftrace_arch_code_modify_prepare(void)
@@ -117,7 +108,7 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
                return -EINVAL;
 
        /* Operate on writable kernel text mapping. */
-       pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
+       pc_wr = ktext_writable_addr(pc);
 
        if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
                return -EPERM;
index fbbe2ea882ea72281e42e39fe64176a927778322..33d48812872a6541f0418e63b0b378379908ccc5 100644 (file)
@@ -845,18 +845,6 @@ STD_ENTRY(interrupt_return)
 .Lresume_userspace:
        FEEDBACK_REENTER(interrupt_return)
 
-       /*
-        * Use r33 to hold whether we have already loaded the callee-saves
-        * into ptregs.  We don't want to do it twice in this loop, since
-        * then we'd clobber whatever changes are made by ptrace, etc.
-        * Get base of stack in r32.
-        */
-       {
-        GET_THREAD_INFO(r32)
-        movei  r33, 0
-       }
-
-.Lretry_work_pending:
        /*
         * Disable interrupts so as to make sure we don't
         * miss an interrupt that sets any of the thread flags (like
@@ -867,33 +855,27 @@ STD_ENTRY(interrupt_return)
        IRQ_DISABLE(r20, r21)
        TRACE_IRQS_OFF  /* Note: clobbers registers r0-r29 */
 
-
-       /* Check to see if there is any work to do before returning to user. */
+       /*
+        * See if there are any work items (including single-shot items)
+        * to do.  If so, save the callee-save registers to pt_regs
+        * and then dispatch to C code.
+        */
+       GET_THREAD_INFO(r21)
        {
-        addi   r29, r32, THREAD_INFO_FLAGS_OFFSET
-        moveli r1, lo16(_TIF_ALLWORK_MASK)
+        addi   r22, r21, THREAD_INFO_FLAGS_OFFSET
+        moveli r20, lo16(_TIF_ALLWORK_MASK)
        }
        {
-        lw     r29, r29
-        auli   r1, r1, ha16(_TIF_ALLWORK_MASK)
+        lw     r22, r22
+        auli   r20, r20, ha16(_TIF_ALLWORK_MASK)
        }
-       and     r1, r29, r1
-       bzt     r1, .Lrestore_all
-
-       /*
-        * Make sure we have all the registers saved for signal
-        * handling, notify-resume, or single-step.  Call out to C
-        * code to figure out exactly what we need to do for each flag bit,
-        * then if necessary, reload the flags and recheck.
-        */
+       and     r1, r22, r20
        {
         PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
-        bnz    r33, 1f
+        bzt    r1, .Lrestore_all
        }
        push_extra_callee_saves r0
-       movei   r33, 1
-1:     jal     do_work_pending
-       bnz     r0, .Lretry_work_pending
+       jal     prepare_exit_to_usermode
 
        /*
         * In the NMI case we
@@ -1327,7 +1309,7 @@ STD_ENTRY(ret_from_kernel_thread)
        FEEDBACK_REENTER(ret_from_kernel_thread)
        {
         movei  r30, 0               /* not an NMI */
-        j      .Lresume_userspace   /* jump into middle of interrupt_return */
+        j      interrupt_return
        }
        STD_ENDPROC(ret_from_kernel_thread)
 
index 58964d209d4db541a1dd651c9d2e9db907526fd2..a41c994ce237ac5848b08814dd839af18eb55043 100644 (file)
@@ -878,20 +878,6 @@ STD_ENTRY(interrupt_return)
 .Lresume_userspace:
        FEEDBACK_REENTER(interrupt_return)
 
-       /*
-        * Use r33 to hold whether we have already loaded the callee-saves
-        * into ptregs.  We don't want to do it twice in this loop, since
-        * then we'd clobber whatever changes are made by ptrace, etc.
-        */
-       {
-        movei  r33, 0
-        move   r32, sp
-       }
-
-       /* Get base of stack in r32. */
-       EXTRACT_THREAD_INFO(r32)
-
-.Lretry_work_pending:
        /*
         * Disable interrupts so as to make sure we don't
         * miss an interrupt that sets any of the thread flags (like
@@ -902,33 +888,28 @@ STD_ENTRY(interrupt_return)
        IRQ_DISABLE(r20, r21)
        TRACE_IRQS_OFF  /* Note: clobbers registers r0-r29 */
 
-
-       /* Check to see if there is any work to do before returning to user. */
+       /*
+        * See if there are any work items (including single-shot items)
+        * to do.  If so, save the callee-save registers to pt_regs
+        * and then dispatch to C code.
+        */
+       move    r21, sp
+       EXTRACT_THREAD_INFO(r21)
        {
-        addi   r29, r32, THREAD_INFO_FLAGS_OFFSET
-        moveli r1, hw1_last(_TIF_ALLWORK_MASK)
+        addi   r22, r21, THREAD_INFO_FLAGS_OFFSET
+        moveli r20, hw1_last(_TIF_ALLWORK_MASK)
        }
        {
-        ld     r29, r29
-        shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
+        ld     r22, r22
+        shl16insli r20, r20, hw0(_TIF_ALLWORK_MASK)
        }
-       and     r1, r29, r1
-       beqzt   r1, .Lrestore_all
-
-       /*
-        * Make sure we have all the registers saved for signal
-        * handling or notify-resume.  Call out to C code to figure out
-        * exactly what we need to do for each flag bit, then if
-        * necessary, reload the flags and recheck.
-        */
+       and     r1, r22, r20
        {
         PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
-        bnez   r33, 1f
+        beqzt  r1, .Lrestore_all
        }
        push_extra_callee_saves r0
-       movei   r33, 1
-1:     jal     do_work_pending
-       bnez    r0, .Lretry_work_pending
+       jal     prepare_exit_to_usermode
 
        /*
         * In the NMI case we
@@ -1411,7 +1392,7 @@ STD_ENTRY(ret_from_kernel_thread)
        FEEDBACK_REENTER(ret_from_kernel_thread)
        {
         movei  r30, 0               /* not an NMI */
-        j      .Lresume_userspace   /* jump into middle of interrupt_return */
+        j      interrupt_return
        }
        STD_ENDPROC(ret_from_kernel_thread)
 
diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
new file mode 100644 (file)
index 0000000..07802d5
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ *
+ * jump label TILE-Gx support
+ */
+
+#include <linux/jump_label.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __jump_label_transform(struct jump_entry *e,
+                                  enum jump_label_type type)
+{
+       tilegx_bundle_bits opcode;
+       /* Operate on writable kernel text mapping. */
+       unsigned long pc_wr = ktext_writable_addr(e->code);
+
+       if (type == JUMP_LABEL_JMP)
+               opcode = tilegx_gen_branch(e->code, e->target, false);
+       else
+               opcode = NOP();
+
+       *(tilegx_bundle_bits *)pc_wr = opcode;
+       /* Make sure that above mem writes were issued towards the memory. */
+       smp_wmb();
+}
+
+void arch_jump_label_transform(struct jump_entry *e,
+                               enum jump_label_type type)
+{
+       get_online_cpus();
+       mutex_lock(&text_mutex);
+
+       __jump_label_transform(e, type);
+       flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
+
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+}
+
+__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
+                                               enum jump_label_type type)
+{
+       __jump_label_transform(e, type);
+}
+
+#endif /* HAVE_JUMP_LABEL */
index ff5335ae050d475dcff52fb0a53c484abaee6a56..a506c2c28943715770ab43fa451797a8cb1566e4 100644 (file)
@@ -164,7 +164,7 @@ static unsigned long writable_address(unsigned long addr)
        unsigned long ret = 0;
 
        if (core_kernel_text(addr))
-               ret = addr - MEM_SV_START + PAGE_OFFSET;
+               ret = ktext_writable_addr(addr);
        else if (is_module_text_address(addr))
                ret = addr;
        else
index f8a45c51e9e48c057897d1c28de11ad3e7b0a3c7..c68694bb1ad2b3fb81121fd8733b233049a19290 100644 (file)
@@ -116,7 +116,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
        unsigned long addr_wr;
 
        /* Operate on writable kernel text mapping. */
-       addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
+       addr_wr = ktext_writable_addr(p->addr);
 
        if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
                sizeof(breakpoint_insn)))
@@ -131,7 +131,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *kp)
        unsigned long addr_wr;
 
        /* Operate on writable kernel text mapping. */
-       addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
+       addr_wr = ktext_writable_addr(kp->addr);
 
        if (probe_kernel_write((void *)addr_wr, &kp->opcode,
                sizeof(kp->opcode)))
index 7d5769310bef88daaa2202f1b8d805ef747121a4..b5f30d376ce1401e2520992112508e144ce978ae 100644 (file)
@@ -462,54 +462,57 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
 
 /*
  * This routine is called on return from interrupt if any of the
- * TIF_WORK_MASK flags are set in thread_info->flags.  It is
- * entered with interrupts disabled so we don't miss an event
- * that modified the thread_info flags.  If any flag is set, we
- * handle it and return, and the calling assembly code will
- * re-disable interrupts, reload the thread flags, and call back
- * if more flags need to be handled.
- *
- * We return whether we need to check the thread_info flags again
- * or not.  Note that we don't clear TIF_SINGLESTEP here, so it's
- * important that it be tested last, and then claim that we don't
- * need to recheck the flags.
+ * TIF_ALLWORK_MASK flags are set in thread_info->flags.  It is
+ * entered with interrupts disabled so we don't miss an event that
+ * modified the thread_info flags.  We loop until all the tested flags
+ * are clear.  Note that the function is called on certain conditions
+ * that are not listed in the loop condition here (e.g. SINGLESTEP)
+ * which guarantees we will do those things once, and redo them if any
+ * of the other work items is re-done, but won't continue looping if
+ * all the other work is done.
  */
-int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
+void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
 {
-       /* If we enter in kernel mode, do nothing and exit the caller loop. */
-       if (!user_mode(regs))
-               return 0;
+       if (WARN_ON(!user_mode(regs)))
+               return;
 
-       user_exit();
+       do {
+               local_irq_enable();
 
-       /* Enable interrupts; they are disabled again on return to caller. */
-       local_irq_enable();
+               if (thread_info_flags & _TIF_NEED_RESCHED)
+                       schedule();
 
-       if (thread_info_flags & _TIF_NEED_RESCHED) {
-               schedule();
-               return 1;
-       }
 #if CHIP_HAS_TILE_DMA()
-       if (thread_info_flags & _TIF_ASYNC_TLB) {
-               do_async_page_fault(regs);
-               return 1;
-       }
+               if (thread_info_flags & _TIF_ASYNC_TLB)
+                       do_async_page_fault(regs);
 #endif
-       if (thread_info_flags & _TIF_SIGPENDING) {
-               do_signal(regs);
-               return 1;
-       }
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
-               clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(regs);
-               return 1;
-       }
-       if (thread_info_flags & _TIF_SINGLESTEP)
+
+               if (thread_info_flags & _TIF_SIGPENDING)
+                       do_signal(regs);
+
+               if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+                       clear_thread_flag(TIF_NOTIFY_RESUME);
+                       tracehook_notify_resume(regs);
+               }
+
+               local_irq_disable();
+               thread_info_flags = READ_ONCE(current_thread_info()->flags);
+
+       } while (thread_info_flags & _TIF_WORK_MASK);
+
+       if (thread_info_flags & _TIF_SINGLESTEP) {
                single_step_once(regs);
+#ifndef __tilegx__
+               /*
+                * FIXME: on tilepro, since we enable interrupts in
+                * this routine, it's possible that we miss a signal
+                * or other asynchronous event.
+                */
+               local_irq_disable();
+#endif
+       }
 
        user_enter();
-
-       return 0;
 }
 
 unsigned long get_wchan(struct task_struct *p)
index 178989e6d3e3ae1403ae9dc1f108126fedb6e2c8..fbedf380d9d4f96555b479ea70323773326f703a 100644 (file)
@@ -159,6 +159,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
        .set_next_event = tile_timer_set_next_event,
        .set_state_shutdown = tile_timer_shutdown,
        .set_state_oneshot = tile_timer_shutdown,
+       .set_state_oneshot_stopped = tile_timer_shutdown,
        .tick_resume = tile_timer_shutdown,
 };