]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - kernel/sched/sched.h
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / kernel / sched / sched.h
index 767aab3505a81d14789382686f9c4ccba095cfaa..7808ab05059991a2dad05bf073f1fee539f0fbbd 100644 (file)
@@ -1331,15 +1331,17 @@ extern const u32 sched_prio_to_wmult[40];
 #define DEQUEUE_SLEEP          0x01
 #define DEQUEUE_SAVE           0x02 /* matches ENQUEUE_RESTORE */
 #define DEQUEUE_MOVE           0x04 /* matches ENQUEUE_MOVE */
+#define DEQUEUE_NOCLOCK                0x08 /* matches ENQUEUE_NOCLOCK */
 
 #define ENQUEUE_WAKEUP         0x01
 #define ENQUEUE_RESTORE                0x02
 #define ENQUEUE_MOVE           0x04
+#define ENQUEUE_NOCLOCK                0x08
 
-#define ENQUEUE_HEAD           0x08
-#define ENQUEUE_REPLENISH      0x10
+#define ENQUEUE_HEAD           0x10
+#define ENQUEUE_REPLENISH      0x20
 #ifdef CONFIG_SMP
-#define ENQUEUE_MIGRATED       0x20
+#define ENQUEUE_MIGRATED       0x40
 #else
 #define ENQUEUE_MIGRATED       0x00
 #endif
@@ -1624,6 +1626,7 @@ static inline void sched_avg_update(struct rq *rq) { }
 
 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
        __acquires(rq->lock);
+
 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
        __acquires(p->pi_lock)
        __acquires(rq->lock);
@@ -1645,6 +1648,62 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
        raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
 
+static inline void
+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
+       __acquires(rq->lock)
+{
+       raw_spin_lock_irqsave(&rq->lock, rf->flags);
+       rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_lock_irq(struct rq *rq, struct rq_flags *rf)
+       __acquires(rq->lock)
+{
+       raw_spin_lock_irq(&rq->lock);
+       rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_lock(struct rq *rq, struct rq_flags *rf)
+       __acquires(rq->lock)
+{
+       raw_spin_lock(&rq->lock);
+       rq_pin_lock(rq, rf);
+}
+
+static inline void
+rq_relock(struct rq *rq, struct rq_flags *rf)
+       __acquires(rq->lock)
+{
+       raw_spin_lock(&rq->lock);
+       rq_repin_lock(rq, rf);
+}
+
+static inline void
+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
+       __releases(rq->lock)
+{
+       rq_unpin_lock(rq, rf);
+       raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
+}
+
+static inline void
+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
+       __releases(rq->lock)
+{
+       rq_unpin_lock(rq, rf);
+       raw_spin_unlock_irq(&rq->lock);
+}
+
+static inline void
+rq_unlock(struct rq *rq, struct rq_flags *rf)
+       __releases(rq->lock)
+{
+       rq_unpin_lock(rq, rf);
+       raw_spin_unlock(&rq->lock);
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT