]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
scsi: bnx2fc: Plug CPU hotplug race
authorThomas Gleixner <tglx@linutronix.de>
Mon, 24 Jul 2017 10:52:56 +0000 (12:52 +0200)
committerMartin K. Petersen <martin.petersen@oracle.com>
Thu, 27 Jul 2017 01:51:24 +0000 (21:51 -0400)
bnx2fc_process_new_cqes() has protection against CPU hotplug, which relies
on the per cpu thread pointer. This protection is racy because it happens
only partially with the per cpu fp_work_lock held.

If the CPU is unplugged after the lock is dropped, the wakeup code can
dereference a NULL pointer or access freed and potentially reused memory.

Restructure the code so the thread check and wakeup happens with the
fp_work_lock held.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Chad Dupuis <chad.dupuis@cavium.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/bnx2fc/bnx2fc_hwi.c

index 913c750205ce2a3f1a90f5d255d98d3b39f90145..26de61d65a4d259fa41e7e070648f775031a9662 100644 (file)
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
        return work;
 }
 
+/* Pending work request completion */
+static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+{
+       unsigned int cpu = wqe % num_possible_cpus();
+       struct bnx2fc_percpu_s *fps;
+       struct bnx2fc_work *work;
+
+       fps = &per_cpu(bnx2fc_percpu, cpu);
+       spin_lock_bh(&fps->fp_work_lock);
+       if (fps->iothread) {
+               work = bnx2fc_alloc_work(tgt, wqe);
+               if (work) {
+                       list_add_tail(&work->list, &fps->work_list);
+                       wake_up_process(fps->iothread);
+                       spin_unlock_bh(&fps->fp_work_lock);
+                       return;
+               }
+       }
+       spin_unlock_bh(&fps->fp_work_lock);
+       bnx2fc_process_cq_compl(tgt, wqe);
+}
+
 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
 {
        struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
                        /* Unsolicited event notification */
                        bnx2fc_process_unsol_compl(tgt, wqe);
                } else {
-                       /* Pending work request completion */
-                       struct bnx2fc_work *work = NULL;
-                       struct bnx2fc_percpu_s *fps = NULL;
-                       unsigned int cpu = wqe % num_possible_cpus();
-
-                       fps = &per_cpu(bnx2fc_percpu, cpu);
-                       spin_lock_bh(&fps->fp_work_lock);
-                       if (unlikely(!fps->iothread))
-                               goto unlock;
-
-                       work = bnx2fc_alloc_work(tgt, wqe);
-                       if (work)
-                               list_add_tail(&work->list,
-                                             &fps->work_list);
-unlock:
-                       spin_unlock_bh(&fps->fp_work_lock);
-
-                       /* Pending work request completion */
-                       if (fps->iothread && work)
-                               wake_up_process(fps->iothread);
-                       else
-                               bnx2fc_process_cq_compl(tgt, wqe);
+                       bnx2fc_pending_work(tgt, wqe);
                        num_free_sqes++;
                }
                cqe++;