]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
irq_poll: fold irq_poll_sched_prep into irq_poll_sched
authorChristoph Hellwig <hch@lst.de>
Mon, 7 Dec 2015 14:41:11 +0000 (06:41 -0800)
committerChristoph Hellwig <hch@lst.de>
Fri, 11 Dec 2015 19:52:26 +0000 (11:52 -0800)
There is no good reason to keep them apart, and this makes using the API
a bit simpler.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/ipr.c
include/linux/irq_poll.h
lib/irq_poll.c

index 471e2b9424350996437aabbec8c9080029f608e3..cb9072a841be19cbfde9ff655fec50c523edf8b3 100644 (file)
@@ -910,8 +910,7 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
        num_eq_processed = 0;
        while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
                                & EQE_VALID_MASK) {
-               if (!irq_poll_sched_prep(&pbe_eq->iopoll))
-                       irq_poll_sched(&pbe_eq->iopoll);
+               irq_poll_sched(&pbe_eq->iopoll);
 
                AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
                queue_tail_inc(eq);
@@ -972,8 +971,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
                        spin_unlock_irqrestore(&phba->isr_lock, flags);
                        num_mcceq_processed++;
                } else {
-                       if (!irq_poll_sched_prep(&pbe_eq->iopoll))
-                               irq_poll_sched(&pbe_eq->iopoll);
+                       irq_poll_sched(&pbe_eq->iopoll);
                        num_ioeq_processed++;
                }
                AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
index 402e4ca32d70aa81a378ae4cc6e2b66c06a5002f..82031e00b2e903fd521d67cffcb223d47e0632bd 100644 (file)
@@ -5692,8 +5692,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
                if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
                       hrrq->toggle_bit) {
-                       if (!irq_poll_sched_prep(&hrrq->iopoll))
-                               irq_poll_sched(&hrrq->iopoll);
+                       irq_poll_sched(&hrrq->iopoll);
                        spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
                        return IRQ_HANDLED;
                }
index 50c39dcd2cba117ac611b8a8011e4cdfebcde498..57efae66140028391eba5032c5e188ba3ffb4bef 100644 (file)
@@ -18,19 +18,6 @@ enum {
        IRQ_POLL_F_DISABLE      = 1,
 };
 
-/*
- * Returns 0 if we successfully set the IRQ_POLL_F_SCHED bit, indicating
- * that we were the first to acquire this iop for scheduling. If this iop
- * is currently disabled, return "failure".
- */
-static inline int irq_poll_sched_prep(struct irq_poll *iop)
-{
-       if (!test_bit(IRQ_POLL_F_DISABLE, &iop->state))
-               return test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state);
-
-       return 1;
-}
-
 static inline int irq_poll_disable_pending(struct irq_poll *iop)
 {
        return test_bit(IRQ_POLL_F_DISABLE, &iop->state);
index 88af87971e8ca51fe5c2a26aaf0f42fd1d8d9129..43a3370a09fd69769f0817dda7829e535587d859 100644 (file)
@@ -21,13 +21,17 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
  *
  * Description:
  *     Add this irq_poll structure to the pending poll list and trigger the
- *     raise of the blk iopoll softirq. The driver must already have gotten a
- *     successful return from irq_poll_sched_prep() before calling this.
+ *     raise of the blk iopoll softirq.
  **/
 void irq_poll_sched(struct irq_poll *iop)
 {
        unsigned long flags;
 
+       if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
+               return;
+       if (!test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
+               return;
+
        local_irq_save(flags);
        list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
        __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
@@ -58,7 +62,7 @@ EXPORT_SYMBOL(__irq_poll_complete);
  * Description:
  *     If a driver consumes less than the assigned budget in its run of the
  *     iopoll handler, it'll end the polled mode by calling this function. The
- *     iopoll handler will not be invoked again before irq_poll_sched_prep()
+ *     iopoll handler will not be invoked again before irq_poll_sched()
  *     is called.
  **/
 void irq_poll_complete(struct irq_poll *iop)