* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: (29 commits)
braino in internal.h
convert simple cases of nfs-related ->get_sb() to ->mount()
convert btrfs
convert ceph
convert gfs2
convert afs
convert ecryptfs
convert sysfs
convert cgroup and cpuset
switch get_sb_ns() users
switch procfs to ->mount()
setting ->proc_mnt doesn't belong in proc_get_sb()
convert cifs
convert nilfs
switch logfs to ->mount()
logfs: fix a leak in get_sb
logfs get_sb, part 3
logfs get_sb, part 2
logfs get_sb massage, part 1
convert v9fs
...
endif
# Additional ARCH settings for sparc
+ifeq ($(ARCH),sparc32)
+ SRCARCH := sparc
+endif
ifeq ($(ARCH),sparc64)
SRCARCH := sparc
endif
select RTC_CLASS
select RTC_DRV_M48T59
select HAVE_IRQ_WORK
- select HAVE_PERF_EVENTS
- select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
- select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
unsigned long flags;
unsigned int cpu_irq;
int ret;
-#ifdef CONFIG_SMP
+#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
INSTANTIATE(sparc_ttable)
-#ifdef CONFIG_SMP
+#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
static inline unsigned long do_swap(volatile unsigned long *ptr,
unsigned long val)
{
- __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val)
- : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
+ __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val)
+ : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS)
: "memory");
return val;
}
call do_notify_resume
add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
- /* Fall through. */
- ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
- clr %l6
+ b signal_p
+ ld [%curptr + TI_FLAGS], %g2
+
ret_trap_continue:
sethi %hi(PSR_SYSCALL), %g1
andn %t_psr, %g1, %t_psr
__handle_user_windows:
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Redo sched+sig checks */
- ldx [%g6 + TI_FLAGS], %l0
- andcc %l0, _TIF_NEED_RESCHED, %g0
-
- be,pt %xcc, 1f
- nop
- call schedule
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- ldx [%g6 + TI_FLAGS], %l0
-
-1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
- be,pt %xcc, __handle_user_windows_continue
- nop
- mov %l5, %o1
- add %sp, PTREGS_OFF, %o0
- mov %l0, %o2
-
- call do_notify_resume
- wrpr %g0, RTRAP_PSTATE, %pstate
- wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- /* Signal delivery can modify pt_regs tstate, so we must
- * reload it.
- */
- ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
- sethi %hi(0xf << 20), %l4
- and %l1, %l4, %l4
- ba,pt %xcc, __handle_user_windows_continue
+ ba,pt %xcc, __handle_preemption_continue
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
- andn %l1, %l4, %l1
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
- ba,pt %xcc, __handle_signal_continue
+ ba,pt %xcc, __handle_preemption_continue
andn %l1, %l4, %l1
/* When returning from a NMI (%pil==15) interrupt we want to
bne,pn %xcc, __handle_preemption
andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
bne,pn %xcc, __handle_signal
-__handle_signal_continue:
ldub [%g6 + TI_WSAVED], %o2
brnz,pn %o2, __handle_user_windows
nop
-__handle_user_windows_continue:
sethi %hi(TSTATE_PEF), %o0
andcc %l1, %o0, %g0
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
+static void check_stack_aligned(unsigned long sp)
+{
+ if (sp & 0x7UL)
+ force_sig(SIGILL, current);
+}
+
void window_overflow_fault(void)
{
unsigned long sp;
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
+
+ check_stack_aligned(sp);
}
void window_underflow_fault(unsigned long sp)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
+
+ check_stack_aligned(sp);
}
void window_ret_fault(struct pt_regs *regs)
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
+
+ check_stack_aligned(sp);
}
static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
{
- hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x23 : 0x21);
+ hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23);
}
/**
u16 mcr;
pci_read_config_word(dev, mcr_addr, &mcr);
- pci_write_config_word(dev, mcr_addr, (mcr | 0x8000));
- /* now read cable id register */
+ pci_write_config_word(dev, mcr_addr, mcr | 0x8000);
+ /* Debounce, then read cable ID register */
+ udelay(10);
pci_read_config_byte(dev, 0x5a, &scr1);
pci_write_config_word(dev, mcr_addr, mcr);
} else if (chip_type >= HPT370) {
u8 scr2 = 0;
pci_read_config_byte(dev, 0x5b, &scr2);
- pci_write_config_byte(dev, 0x5b, (scr2 & ~1));
- /* now read cable id register */
+ pci_write_config_byte(dev, 0x5b, scr2 & ~1);
+ /* Debounce, then read cable ID register */
+ udelay(10);
pci_read_config_byte(dev, 0x5a, &scr1);
- pci_write_config_byte(dev, 0x5b, scr2);
+ pci_write_config_byte(dev, 0x5b, scr2);
} else
pci_read_config_byte(dev, 0x5a, &scr1);
ide_hwif_t *hwif = drive->hwif;
const struct ide_dma_ops *dma_ops = hwif->dma_ops;
struct ide_cmd *cmd = &hwif->cmd;
- struct request *rq;
ide_startstop_t ret = ide_stopped;
/*
ide_dma_off_quietly(drive);
/*
- * un-busy drive etc and make sure request is sane
+ * make sure request is sane
*/
- rq = hwif->rq;
- if (rq) {
- hwif->rq = NULL;
- rq->errors = 0;
- ide_requeue_and_plug(drive, rq);
- }
+ if (hwif->rq)
+ hwif->rq->errors = 0;
return ret;
}
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p, int on_rq);
+ void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
- set_task_rq(tsk, task_cpu(tsk));
-
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->moved_group)
- tsk->sched_class->moved_group(tsk, on_rq);
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
+ else
#endif
+ set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void moved_group_fair(struct task_struct *p, int on_rq)
+static void task_move_group_fair(struct task_struct *p, int on_rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
-
- update_curr(cfs_rq);
+ /*
+ * If the task was not on the rq at the time of this cgroup movement
+ * it must have been asleep, sleeping tasks keep their ->vruntime
+ * absolute on their old rq until wakeup (needed for the fair sleeper
+ * bonus in place_entity()).
+ *
+ * If it was on the rq, we've just 'preempted' it, which does convert
+ * ->vruntime to a relative base.
+ *
+ * Make sure both cases convert their relative position when migrating
+ * to another cgroup's rq. This does somewhat interfere with the
+ * fair sleeper stuff for the first placement, but who cares.
+ */
+ if (!on_rq)
+ p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+ set_task_rq(p, task_cpu(p));
if (!on_rq)
- place_entity(cfs_rq, &p->se, 1);
+ p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
}
#endif
.get_rr_interval = get_rr_interval_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
- .moved_group = moved_group_fair,
+ .task_move_group = task_move_group_fair,
#endif
};
}
/*
- * Called when a process is dequeued from the active array and given
- * the cpu. We should note that with the exception of interactive
- * tasks, the expired queue will become the active queue after the active
- * queue is empty, without explicitly dequeuing and requeuing tasks in the
- * expired queue. (Interactive tasks may be requeued directly to the
- * active queue, thus delaying tasks in the expired queue from running;
- * see scheduler_tick()).
- *
- * Though we are interested in knowing how long it was from the *first* time a
+ * We are interested in knowing how long it was from the *first* time a
* task was queued to the time that it finally hit a cpu, we call this routine
* from dequeue_task() to account for possible rq->clock skew across cpus. The
* delta taken on each cpu would annul the skew.
}
/*
- * Called when a process is queued into either the active or expired
- * array. The time is noted and later used to determine how long we
- * had to wait for us to reach the cpu. Since the expired queue will
- * become the active queue after active queue is empty, without dequeuing
- * and requeuing any tasks, we are interested in queuing to either. It
- * is unusual but not impossible for tasks to be dequeued and immediately
- * requeued in the same or another array: this can happen in sched_yield(),
- * set_user_nice(), and even load_balance() as it moves tasks from runqueue
- * to runqueue.
- *
* This function is only called from enqueue_task(), but also only updates
* the timestamp if it is already not set. It's assumed that
* sched_info_dequeued() will clear that stamp when appropriate.