]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-3.6/drivers' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Aug 2012 16:06:47 +0000 (09:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Aug 2012 16:06:47 +0000 (09:06 -0700)
Pull block driver changes from Jens Axboe:

 - Making the plugging support for drivers a bit more sane from Neil.
   This supersedes the plugging change from Shaohua as well.

 - The usual round of drbd updates.

 - Using a tail add instead of a head add in the request completion for
   ndb, making us find the most completed request more quickly.

 - A few floppy changes, getting rid of a duplicated flag and also
   running the floppy init async (since it takes forever in boot terms)
   from Andi.

* 'for-3.6/drivers' of git://git.kernel.dk/linux-block:
  floppy: remove duplicated flag FD_RAW_NEED_DISK
  blk: pass from_schedule to non-request unplug functions.
  block: stack unplug
  blk: centralize non-request unplug handling.
  md: remove plug_cnt feature of plugging.
  block/nbd: micro-optimization in nbd request completion
  drbd: announce FLUSH/FUA capability to upper layers
  drbd: fix max_bio_size to be unsigned
  drbd: flush drbd work queue before invalidate/invalidate remote
  drbd: fix potential access after free
  drbd: call local-io-error handler early
  drbd: do not reset rs_pending_cnt too early
  drbd: reset congestion information before reporting it in /proc/drbd
  drbd: report congestion if we are waiting for some userland callback
  drbd: differentiate between normal and forced detach
  drbd: cleanup, remove two unused global flags
  floppy: Run floppy initialization asynchronous

1  2 
drivers/block/nbd.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c

diff --combined drivers/block/nbd.c
index 76bc96fd01c85316813b5baeece41eeadb4b22ff,8957b9f0cfaddee18e9b8a47ac50d22db4bfb7c2..d07c9f7fded600d76192330ef37c35c2135c0fb5
@@@ -154,7 -154,6 +154,7 @@@ static int sock_xmit(struct nbd_device 
        struct msghdr msg;
        struct kvec iov;
        sigset_t blocked, oldset;
 +      unsigned long pflags = current->flags;
  
        if (unlikely(!sock)) {
                dev_err(disk_to_dev(nbd->disk),
        siginitsetinv(&blocked, sigmask(SIGKILL));
        sigprocmask(SIG_SETMASK, &blocked, &oldset);
  
 +      current->flags |= PF_MEMALLOC;
        do {
 -              sock->sk->sk_allocation = GFP_NOIO;
 +              sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
                iov.iov_base = buf;
                iov.iov_len = size;
                msg.msg_name = NULL;
        } while (size > 0);
  
        sigprocmask(SIG_SETMASK, &oldset, NULL);
 +      tsk_restore_flags(current, pflags, PF_MEMALLOC);
  
        return result;
  }
@@@ -408,7 -405,6 +408,7 @@@ static int nbd_do_it(struct nbd_device 
  
        BUG_ON(nbd->magic != NBD_MAGIC);
  
 +      sk_set_memalloc(nbd->sock->sk);
        nbd->pid = task_pid_nr(current);
        ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
        if (ret) {
@@@ -485,7 -481,7 +485,7 @@@ static void nbd_handle_req(struct nbd_d
                nbd_end_request(req);
        } else {
                spin_lock(&nbd->queue_lock);
-               list_add(&req->queuelist, &nbd->queue_head);
+               list_add_tail(&req->queuelist, &nbd->queue_head);
                spin_unlock(&nbd->queue_lock);
        }
  
diff --combined drivers/md/md.c
index f6c46109b071310fdc0a28b89a916cd67cf69b9d,db02d2efb76f943185ec0d4101c4913fe0b3a41e..fcd098794d37d16c7b48b12c54b5ba6f5e6ba60e
@@@ -498,61 -498,13 +498,13 @@@ void md_flush_request(struct mddev *mdd
  }
  EXPORT_SYMBOL(md_flush_request);
  
- /* Support for plugging.
-  * This mirrors the plugging support in request_queue, but does not
-  * require having a whole queue or request structures.
-  * We allocate an md_plug_cb for each md device and each thread it gets
-  * plugged on.  This links tot the private plug_handle structure in the
-  * personality data where we keep a count of the number of outstanding
-  * plugs so other code can see if a plug is active.
-  */
- struct md_plug_cb {
-       struct blk_plug_cb cb;
-       struct mddev *mddev;
- };
- static void plugger_unplug(struct blk_plug_cb *cb)
- {
-       struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
-       if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
-               md_wakeup_thread(mdcb->mddev->thread);
-       kfree(mdcb);
- }
- /* Check that an unplug wakeup will come shortly.
-  * If not, wakeup the md thread immediately
-  */
- int mddev_check_plugged(struct mddev *mddev)
+ void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
  {
-       struct blk_plug *plug = current->plug;
-       struct md_plug_cb *mdcb;
-       if (!plug)
-               return 0;
-       list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
-               if (mdcb->cb.callback == plugger_unplug &&
-                   mdcb->mddev == mddev) {
-                       /* Already on the list, move to top */
-                       if (mdcb != list_first_entry(&plug->cb_list,
-                                                   struct md_plug_cb,
-                                                   cb.list))
-                               list_move(&mdcb->cb.list, &plug->cb_list);
-                       return 1;
-               }
-       }
-       /* Not currently on the callback list */
-       mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
-       if (!mdcb)
-               return 0;
-       mdcb->mddev = mddev;
-       mdcb->cb.callback = plugger_unplug;
-       atomic_inc(&mddev->plug_cnt);
-       list_add(&mdcb->cb.list, &plug->cb_list);
-       return 1;
+       struct mddev *mddev = cb->data;
+       md_wakeup_thread(mddev->thread);
+       kfree(cb);
  }
- EXPORT_SYMBOL_GPL(mddev_check_plugged);
+ EXPORT_SYMBOL(md_unplug);
  
  static inline struct mddev *mddev_get(struct mddev *mddev)
  {
@@@ -602,7 -554,6 +554,6 @@@ void mddev_init(struct mddev *mddev
        atomic_set(&mddev->active, 1);
        atomic_set(&mddev->openers, 0);
        atomic_set(&mddev->active_io, 0);
-       atomic_set(&mddev->plug_cnt, 0);
        spin_lock_init(&mddev->write_lock);
        atomic_set(&mddev->flush_pending, 0);
        init_waitqueue_head(&mddev->sb_wait);
@@@ -3942,13 -3893,17 +3893,13 @@@ array_state_store(struct mddev *mddev, 
                break;
        case clear:
                /* stopping an active array */
 -              if (atomic_read(&mddev->openers) > 0)
 -                      return -EBUSY;
                err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
 -              if (mddev->pers) {
 -                      if (atomic_read(&mddev->openers) > 0)
 -                              return -EBUSY;
 +              if (mddev->pers)
                        err = do_md_stop(mddev, 2, NULL);
 -              else
 +              else
                        err = 0; /* already inactive */
                break;
        case suspended:
diff --combined drivers/md/raid1.c
index 197f62681db562bd38d643a115be92940c46b9a6,36a8fc059ac36efc068603c51348132324d5d399..9f7f8bee84423f1a7dd35cc33bda9874407af06e
   */
  #define       NR_RAID1_BIOS 256
  
 +/* when we get a read error on a read-only array, we redirect to another
 + * device without failing the first device, or trying to over-write to
 + * correct the read error.  To keep track of bad blocks on a per-bio
 + * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 + */
 +#define IO_BLOCKED ((struct bio *)1)
 +/* When we successfully write to a known bad-block, we need to remove the
 + * bad-block marking which must be done from process context.  So we record
 + * the success by setting devs[n].bio to IO_MADE_GOOD
 + */
 +#define IO_MADE_GOOD ((struct bio *)2)
 +
 +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
 +
  /* When there are this many requests queue to be written by
   * the raid1 thread, we become 'congested' to provide back-pressure
   * for writeback.
@@@ -497,14 -483,12 +497,14 @@@ static int read_balance(struct r1conf *
        const sector_t this_sector = r1_bio->sector;
        int sectors;
        int best_good_sectors;
 -      int start_disk;
 -      int best_disk;
 -      int i;
 +      int best_disk, best_dist_disk, best_pending_disk;
 +      int has_nonrot_disk;
 +      int disk;
        sector_t best_dist;
 +      unsigned int min_pending;
        struct md_rdev *rdev;
        int choose_first;
 +      int choose_next_idle;
  
        rcu_read_lock();
        /*
   retry:
        sectors = r1_bio->sectors;
        best_disk = -1;
 +      best_dist_disk = -1;
        best_dist = MaxSector;
 +      best_pending_disk = -1;
 +      min_pending = UINT_MAX;
        best_good_sectors = 0;
 +      has_nonrot_disk = 0;
 +      choose_next_idle = 0;
  
        if (conf->mddev->recovery_cp < MaxSector &&
 -          (this_sector + sectors >= conf->next_resync)) {
 +          (this_sector + sectors >= conf->next_resync))
                choose_first = 1;
 -              start_disk = 0;
 -      } else {
 +      else
                choose_first = 0;
 -              start_disk = conf->last_used;
 -      }
  
 -      for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
 +      for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
                sector_t dist;
                sector_t first_bad;
                int bad_sectors;
 -
 -              int disk = start_disk + i;
 -              if (disk >= conf->raid_disks * 2)
 -                      disk -= conf->raid_disks * 2;
 +              unsigned int pending;
 +              bool nonrot;
  
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
                } else
                        best_good_sectors = sectors;
  
 +              nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
 +              has_nonrot_disk |= nonrot;
 +              pending = atomic_read(&rdev->nr_pending);
                dist = abs(this_sector - conf->mirrors[disk].head_position);
 -              if (choose_first
 -                  /* Don't change to another disk for sequential reads */
 -                  || conf->next_seq_sect == this_sector
 -                  || dist == 0
 -                  /* If device is idle, use it */
 -                  || atomic_read(&rdev->nr_pending) == 0) {
 +              if (choose_first) {
 +                      best_disk = disk;
 +                      break;
 +              }
 +              /* Don't change to another disk for sequential reads */
 +              if (conf->mirrors[disk].next_seq_sect == this_sector
 +                  || dist == 0) {
 +                      int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
 +                      struct raid1_info *mirror = &conf->mirrors[disk];
 +
 +                      best_disk = disk;
 +                      /*
 +                       * If buffered sequential IO size exceeds optimal
 +                       * iosize, check if there is idle disk. If yes, choose
 +                       * the idle disk. read_balance could already choose an
 +                       * idle disk before noticing it's a sequential IO in
 +                       * this disk. This doesn't matter because this disk
 +                       * will idle, next time it will be utilized after the
 +                       * first disk has IO size exceeds optimal iosize. In
 +                       * this way, iosize of the first disk will be optimal
 +                       * iosize at least. iosize of the second disk might be
 +                       * small, but not a big deal since when the second disk
 +                       * starts IO, the first disk is likely still busy.
 +                       */
 +                      if (nonrot && opt_iosize > 0 &&
 +                          mirror->seq_start != MaxSector &&
 +                          mirror->next_seq_sect > opt_iosize &&
 +                          mirror->next_seq_sect - opt_iosize >=
 +                          mirror->seq_start) {
 +                              choose_next_idle = 1;
 +                              continue;
 +                      }
 +                      break;
 +              }
 +              /* If device is idle, use it */
 +              if (pending == 0) {
                        best_disk = disk;
                        break;
                }
 +
 +              if (choose_next_idle)
 +                      continue;
 +
 +              if (min_pending > pending) {
 +                      min_pending = pending;
 +                      best_pending_disk = disk;
 +              }
 +
                if (dist < best_dist) {
                        best_dist = dist;
 -                      best_disk = disk;
 +                      best_dist_disk = disk;
                }
        }
  
 +      /*
 +       * If all disks are rotational, choose the closest disk. If any disk is
 +       * non-rotational, choose the disk with less pending request even the
 +       * disk is rotational, which might/might not be optimal for raids with
 +       * mixed ratation/non-rotational disks depending on workload.
 +       */
 +      if (best_disk == -1) {
 +              if (has_nonrot_disk)
 +                      best_disk = best_pending_disk;
 +              else
 +                      best_disk = best_dist_disk;
 +      }
 +
        if (best_disk >= 0) {
                rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
                if (!rdev)
                        goto retry;
                }
                sectors = best_good_sectors;
 -              conf->next_seq_sect = this_sector + sectors;
 -              conf->last_used = best_disk;
 +
 +              if (conf->mirrors[best_disk].next_seq_sect != this_sector)
 +                      conf->mirrors[best_disk].seq_start = this_sector;
 +
 +              conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
        }
        rcu_read_unlock();
        *max_sectors = sectors;
@@@ -947,7 -873,7 +947,7 @@@ do_sync_io
  static void make_request(struct mddev *mddev, struct bio * bio)
  {
        struct r1conf *conf = mddev->private;
 -      struct mirror_info *mirror;
 +      struct raid1_info *mirror;
        struct r1bio *r1_bio;
        struct bio *read_bio;
        int i, disks;
@@@ -1438,7 -1364,7 +1438,7 @@@ static int raid1_add_disk(struct mddev 
        struct r1conf *conf = mddev->private;
        int err = -EEXIST;
        int mirror = 0;
 -      struct mirror_info *p;
 +      struct raid1_info *p;
        int first = 0;
        int last = conf->raid_disks - 1;
        struct request_queue *q = bdev_get_queue(rdev->bdev);
@@@ -1507,7 -1433,7 +1507,7 @@@ static int raid1_remove_disk(struct mdd
        struct r1conf *conf = mddev->private;
        int err = 0;
        int number = rdev->raid_disk;
 -      struct mirror_info *p = conf->mirrors+ number;
 +      struct raid1_info *p = conf->mirrors + number;
  
        if (rdev != p->rdev)
                p = conf->mirrors + conf->raid_disks + number;
@@@ -2247,8 -2173,7 +2247,7 @@@ static void raid1d(struct mddev *mddev
        blk_start_plug(&plug);
        for (;;) {
  
-               if (atomic_read(&mddev->plug_cnt) == 0)
-                       flush_pending_writes(conf);
+               flush_pending_writes(conf);
  
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@@ -2445,18 -2370,6 +2444,18 @@@ static sector_t sync_request(struct mdd
                                bio->bi_rw = READ;
                                bio->bi_end_io = end_sync_read;
                                read_targets++;
 +                      } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
 +                              test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
 +                              !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
 +                              /*
 +                               * The device is suitable for reading (InSync),
 +                               * but has bad block(s) here. Let's try to correct them,
 +                               * if we are doing resync or repair. Otherwise, leave
 +                               * this device alone for this sync request.
 +                               */
 +                              bio->bi_rw = WRITE;
 +                              bio->bi_end_io = end_sync_write;
 +                              write_targets++;
                        }
                }
                if (bio->bi_end_io) {
                /* There is nowhere to write, so all non-sync
                 * drives must be failed - so we are finished
                 */
 -              sector_t rv = max_sector - sector_nr;
 +              sector_t rv;
 +              if (min_bad > 0)
 +                      max_sector = sector_nr + min_bad;
 +              rv = max_sector - sector_nr;
                *skipped = 1;
                put_buf(r1_bio);
                return rv;
@@@ -2610,7 -2520,7 +2609,7 @@@ static struct r1conf *setup_conf(struc
  {
        struct r1conf *conf;
        int i;
 -      struct mirror_info *disk;
 +      struct raid1_info *disk;
        struct md_rdev *rdev;
        int err = -ENOMEM;
  
        if (!conf)
                goto abort;
  
 -      conf->mirrors = kzalloc(sizeof(struct mirror_info)
 +      conf->mirrors = kzalloc(sizeof(struct raid1_info)
                                * mddev->raid_disks * 2,
                                 GFP_KERNEL);
        if (!conf->mirrors)
                        mddev->merge_check_needed = 1;
  
                disk->head_position = 0;
 +              disk->seq_start = MaxSector;
        }
        conf->raid_disks = mddev->raid_disks;
        conf->mddev = mddev;
        conf->recovery_disabled = mddev->recovery_disabled - 1;
  
        err = -EIO;
 -      conf->last_used = -1;
        for (i = 0; i < conf->raid_disks * 2; i++) {
  
                disk = conf->mirrors + i;
                        if (disk->rdev &&
                            (disk->rdev->saved_raid_disk < 0))
                                conf->fullsync = 1;
 -              } else if (conf->last_used < 0)
 -                      /*
 -                       * The first working device is used as a
 -                       * starting point to read balancing.
 -                       */
 -                      conf->last_used = i;
 +              }
        }
  
 -      if (conf->last_used < 0) {
 -              printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
 -                     mdname(mddev));
 -              goto abort;
 -      }
        err = -ENOMEM;
        conf->thread = md_register_thread(raid1d, mddev, "raid1");
        if (!conf->thread) {
@@@ -2877,7 -2797,7 +2876,7 @@@ static int raid1_reshape(struct mddev *
         */
        mempool_t *newpool, *oldpool;
        struct pool_info *newpoolinfo;
 -      struct mirror_info *newmirrors;
 +      struct raid1_info *newmirrors;
        struct r1conf *conf = mddev->private;
        int cnt, raid_disks;
        unsigned long flags;
                kfree(newpoolinfo);
                return -ENOMEM;
        }
 -      newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
 +      newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
                             GFP_KERNEL);
        if (!newmirrors) {
                kfree(newpoolinfo);
        conf->raid_disks = mddev->raid_disks = raid_disks;
        mddev->delta_disks = 0;
  
 -      conf->last_used = 0; /* just make sure it is in-range */
        lower_barrier(conf);
  
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --combined drivers/md/raid10.c
index e2549deab7c3a87c9f35c44499fa21e2f9f57b63,5d33603a497d6b7a0ee53c54d51501b99c4ee2ac..de5ed6fd8806c3c3db39bbd894fabb04734b4523
   */
  #define       NR_RAID10_BIOS 256
  
 -/* When there are this many requests queue to be written by
 +/* when we get a read error on a read-only array, we redirect to another
 + * device without failing the first device, or trying to over-write to
 + * correct the read error.  To keep track of bad blocks on a per-bio
 + * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 + */
 +#define IO_BLOCKED ((struct bio *)1)
 +/* When we successfully write to a known bad-block, we need to remove the
 + * bad-block marking which must be done from process context.  So we record
 + * the success by setting devs[n].bio to IO_MADE_GOOD
 + */
 +#define IO_MADE_GOOD ((struct bio *)2)
 +
 +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
 +
 +/* When there are this many requests queued to be written by
   * the raid10 thread, we become 'congested' to provide back-pressure
   * for writeback.
   */
@@@ -731,7 -717,7 +731,7 @@@ static struct md_rdev *read_balance(str
        int sectors = r10_bio->sectors;
        int best_good_sectors;
        sector_t new_distance, best_dist;
 -      struct md_rdev *rdev, *best_rdev;
 +      struct md_rdev *best_rdev, *rdev = NULL;
        int do_balance;
        int best_slot;
        struct geom *geo = &conf->geo;
@@@ -853,8 -839,9 +853,8 @@@ retry
        return rdev;
  }
  
 -static int raid10_congested(void *data, int bits)
 +int md_raid10_congested(struct mddev *mddev, int bits)
  {
 -      struct mddev *mddev = data;
        struct r10conf *conf = mddev->private;
        int i, ret = 0;
  
            conf->pending_count >= max_queued_requests)
                return 1;
  
 -      if (mddev_congested(mddev, bits))
 -              return 1;
        rcu_read_lock();
        for (i = 0;
             (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
        rcu_read_unlock();
        return ret;
  }
 +EXPORT_SYMBOL_GPL(md_raid10_congested);
 +
 +static int raid10_congested(void *data, int bits)
 +{
 +      struct mddev *mddev = data;
 +
 +      return mddev_congested(mddev, bits) ||
 +              md_raid10_congested(mddev, bits);
 +}
  
  static void flush_pending_writes(struct r10conf *conf)
  {
@@@ -1566,7 -1546,7 +1566,7 @@@ static void error(struct mddev *mddev, 
  static void print_conf(struct r10conf *conf)
  {
        int i;
 -      struct mirror_info *tmp;
 +      struct raid10_info *tmp;
  
        printk(KERN_DEBUG "RAID10 conf printout:\n");
        if (!conf) {
@@@ -1600,7 -1580,7 +1600,7 @@@ static int raid10_spare_active(struct m
  {
        int i;
        struct r10conf *conf = mddev->private;
 -      struct mirror_info *tmp;
 +      struct raid10_info *tmp;
        int count = 0;
        unsigned long flags;
  
@@@ -1675,7 -1655,7 +1675,7 @@@ static int raid10_add_disk(struct mdde
        else
                mirror = first;
        for ( ; mirror <= last ; mirror++) {
 -              struct mirror_info *p = &conf->mirrors[mirror];
 +              struct raid10_info *p = &conf->mirrors[mirror];
                if (p->recovery_disabled == mddev->recovery_disabled)
                        continue;
                if (p->rdev) {
@@@ -1729,7 -1709,7 +1729,7 @@@ static int raid10_remove_disk(struct md
        int err = 0;
        int number = rdev->raid_disk;
        struct md_rdev **rdevp;
 -      struct mirror_info *p = conf->mirrors + number;
 +      struct raid10_info *p = conf->mirrors + number;
  
        print_conf(conf);
        if (rdev == p->rdev)
@@@ -2680,8 -2660,7 +2680,7 @@@ static void raid10d(struct mddev *mddev
        blk_start_plug(&plug);
        for (;;) {
  
-               if (atomic_read(&mddev->plug_cnt) == 0)
-                       flush_pending_writes(conf);
+               flush_pending_writes(conf);
  
                spin_lock_irqsave(&conf->device_lock, flags);
                if (list_empty(head)) {
@@@ -2896,7 -2875,7 +2895,7 @@@ static sector_t sync_request(struct mdd
                        sector_t sect;
                        int must_sync;
                        int any_working;
 -                      struct mirror_info *mirror = &conf->mirrors[i];
 +                      struct raid10_info *mirror = &conf->mirrors[i];
  
                        if ((mirror->rdev == NULL ||
                             test_bit(In_sync, &mirror->rdev->flags))
@@@ -3408,7 -3387,7 +3407,7 @@@ static struct r10conf *setup_conf(struc
                goto out;
  
        /* FIXME calc properly */
 -      conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
 +      conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
                                                            max(0,mddev->delta_disks)),
                                GFP_KERNEL);
        if (!conf->mirrors)
@@@ -3472,7 -3451,7 +3471,7 @@@ static int run(struct mddev *mddev
  {
        struct r10conf *conf;
        int i, disk_idx, chunk_size;
 -      struct mirror_info *disk;
 +      struct raid10_info *disk;
        struct md_rdev *rdev;
        sector_t size;
        sector_t min_offset_diff = 0;
        conf->thread = NULL;
  
        chunk_size = mddev->chunk_sectors << 9;
 -      blk_queue_io_min(mddev->queue, chunk_size);
 -      if (conf->geo.raid_disks % conf->geo.near_copies)
 -              blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
 -      else
 -              blk_queue_io_opt(mddev->queue, chunk_size *
 -                               (conf->geo.raid_disks / conf->geo.near_copies));
 +      if (mddev->queue) {
 +              blk_queue_io_min(mddev->queue, chunk_size);
 +              if (conf->geo.raid_disks % conf->geo.near_copies)
 +                      blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
 +              else
 +                      blk_queue_io_opt(mddev->queue, chunk_size *
 +                                       (conf->geo.raid_disks / conf->geo.near_copies));
 +      }
  
        rdev_for_each(rdev, mddev) {
                long long diff;
                if (first || diff < min_offset_diff)
                        min_offset_diff = diff;
  
 -              disk_stack_limits(mddev->gendisk, rdev->bdev,
 -                                rdev->data_offset << 9);
 +              if (mddev->gendisk)
 +                      disk_stack_limits(mddev->gendisk, rdev->bdev,
 +                                        rdev->data_offset << 9);
  
                disk->head_position = 0;
        }
        md_set_array_sectors(mddev, size);
        mddev->resync_max_sectors = size;
  
 -      mddev->queue->backing_dev_info.congested_fn = raid10_congested;
 -      mddev->queue->backing_dev_info.congested_data = mddev;
 -
 -      /* Calculate max read-ahead size.
 -       * We need to readahead at least twice a whole stripe....
 -       * maybe...
 -       */
 -      {
 +      if (mddev->queue) {
                int stripe = conf->geo.raid_disks *
                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
 +              mddev->queue->backing_dev_info.congested_fn = raid10_congested;
 +              mddev->queue->backing_dev_info.congested_data = mddev;
 +
 +              /* Calculate max read-ahead size.
 +               * We need to readahead at least twice a whole stripe....
 +               * maybe...
 +               */
                stripe /= conf->geo.near_copies;
                if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
                        mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
 +              blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
        }
  
 -      blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
  
        if (md_integrity_register(mddev))
                goto out_free_conf;
@@@ -3664,10 -3640,7 +3663,10 @@@ static int stop(struct mddev *mddev
        lower_barrier(conf);
  
        md_unregister_thread(&mddev->thread);
 -      blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 +      if (mddev->queue)
 +              /* the unplug fn references 'conf'*/
 +              blk_sync_queue(mddev->queue);
 +
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
        kfree(conf->mirrors);
@@@ -3831,7 -3804,7 +3830,7 @@@ static int raid10_check_reshape(struct 
        if (mddev->delta_disks > 0) {
                /* allocate new 'mirrors' list */
                conf->mirrors_new = kzalloc(
 -                      sizeof(struct mirror_info)
 +                      sizeof(struct raid10_info)
                        *(mddev->raid_disks +
                          mddev->delta_disks),
                        GFP_KERNEL);
@@@ -3956,7 -3929,7 +3955,7 @@@ static int raid10_start_reshape(struct 
        spin_lock_irq(&conf->device_lock);
        if (conf->mirrors_new) {
                memcpy(conf->mirrors_new, conf->mirrors,
 -                     sizeof(struct mirror_info)*conf->prev.raid_disks);
 +                     sizeof(struct raid10_info)*conf->prev.raid_disks);
                smp_mb();
                kfree(conf->mirrors_old); /* FIXME and elsewhere */
                conf->mirrors_old = conf->mirrors;
diff --combined drivers/md/raid5.c
index 259f519814ca0f083696d875dc16fdc4bd141659,bde9da2baa392d76a1ea885a136790487ccf962f..87a2d0bdedd1187a695a4d7f25a6d2e5bc2164fe
@@@ -99,40 -99,34 +99,40 @@@ static inline struct bio *r5_next_bio(s
   * We maintain a biased count of active stripes in the bottom 16 bits of
   * bi_phys_segments, and a count of processed stripes in the upper 16 bits
   */
 -static inline int raid5_bi_phys_segments(struct bio *bio)
 +static inline int raid5_bi_processed_stripes(struct bio *bio)
  {
 -      return bio->bi_phys_segments & 0xffff;
 +      atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 +      return (atomic_read(segments) >> 16) & 0xffff;
  }
  
 -static inline int raid5_bi_hw_segments(struct bio *bio)
 +static inline int raid5_dec_bi_active_stripes(struct bio *bio)
  {
 -      return (bio->bi_phys_segments >> 16) & 0xffff;
 +      atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 +      return atomic_sub_return(1, segments) & 0xffff;
  }
  
 -static inline int raid5_dec_bi_phys_segments(struct bio *bio)
 +static inline void raid5_inc_bi_active_stripes(struct bio *bio)
  {
 -      --bio->bi_phys_segments;
 -      return raid5_bi_phys_segments(bio);
 +      atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 +      atomic_inc(segments);
  }
  
 -static inline int raid5_dec_bi_hw_segments(struct bio *bio)
 +static inline void raid5_set_bi_processed_stripes(struct bio *bio,
 +      unsigned int cnt)
  {
 -      unsigned short val = raid5_bi_hw_segments(bio);
 +      atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 +      int old, new;
  
 -      --val;
 -      bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
 -      return val;
 +      do {
 +              old = atomic_read(segments);
 +              new = (old & 0xffff) | (cnt << 16);
 +      } while (atomic_cmpxchg(segments, old, new) != old);
  }
  
 -static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
 +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
  {
 -      bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
 +      atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 +      atomic_set(segments, cnt);
  }
  
  /* Find first data disk in a raid6 stripe */
@@@ -196,56 -190,49 +196,56 @@@ static int stripe_operations_active(str
               test_bit(STRIPE_COMPUTE_RUN, &sh->state);
  }
  
 -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
 +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
  {
 -      if (atomic_dec_and_test(&sh->count)) {
 -              BUG_ON(!list_empty(&sh->lru));
 -              BUG_ON(atomic_read(&conf->active_stripes)==0);
 -              if (test_bit(STRIPE_HANDLE, &sh->state)) {
 -                      if (test_bit(STRIPE_DELAYED, &sh->state) &&
 -                          !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 -                              list_add_tail(&sh->lru, &conf->delayed_list);
 -                      else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 -                                 sh->bm_seq - conf->seq_write > 0)
 -                              list_add_tail(&sh->lru, &conf->bitmap_list);
 -                      else {
 -                              clear_bit(STRIPE_DELAYED, &sh->state);
 -                              clear_bit(STRIPE_BIT_DELAY, &sh->state);
 -                              list_add_tail(&sh->lru, &conf->handle_list);
 -                      }
 -                      md_wakeup_thread(conf->mddev->thread);
 -              } else {
 -                      BUG_ON(stripe_operations_active(sh));
 -                      if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 -                              if (atomic_dec_return(&conf->preread_active_stripes)
 -                                  < IO_THRESHOLD)
 -                                      md_wakeup_thread(conf->mddev->thread);
 -                      atomic_dec(&conf->active_stripes);
 -                      if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
 -                              list_add_tail(&sh->lru, &conf->inactive_list);
 -                              wake_up(&conf->wait_for_stripe);
 -                              if (conf->retry_read_aligned)
 -                                      md_wakeup_thread(conf->mddev->thread);
 -                      }
 +      BUG_ON(!list_empty(&sh->lru));
 +      BUG_ON(atomic_read(&conf->active_stripes)==0);
 +      if (test_bit(STRIPE_HANDLE, &sh->state)) {
 +              if (test_bit(STRIPE_DELAYED, &sh->state) &&
 +                  !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 +                      list_add_tail(&sh->lru, &conf->delayed_list);
 +              else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 +                         sh->bm_seq - conf->seq_write > 0)
 +                      list_add_tail(&sh->lru, &conf->bitmap_list);
 +              else {
 +                      clear_bit(STRIPE_DELAYED, &sh->state);
 +                      clear_bit(STRIPE_BIT_DELAY, &sh->state);
 +                      list_add_tail(&sh->lru, &conf->handle_list);
 +              }
 +              md_wakeup_thread(conf->mddev->thread);
 +      } else {
 +              BUG_ON(stripe_operations_active(sh));
 +              if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 +                      if (atomic_dec_return(&conf->preread_active_stripes)
 +                          < IO_THRESHOLD)
 +                              md_wakeup_thread(conf->mddev->thread);
 +              atomic_dec(&conf->active_stripes);
 +              if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
 +                      list_add_tail(&sh->lru, &conf->inactive_list);
 +                      wake_up(&conf->wait_for_stripe);
 +                      if (conf->retry_read_aligned)
 +                              md_wakeup_thread(conf->mddev->thread);
                }
        }
  }
  
 +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
 +{
 +      if (atomic_dec_and_test(&sh->count))
 +              do_release_stripe(conf, sh);
 +}
 +
  static void release_stripe(struct stripe_head *sh)
  {
        struct r5conf *conf = sh->raid_conf;
        unsigned long flags;
  
 -      spin_lock_irqsave(&conf->device_lock, flags);
 -      __release_stripe(conf, sh);
 -      spin_unlock_irqrestore(&conf->device_lock, flags);
 +      local_irq_save(flags);
 +      if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
 +              do_release_stripe(conf, sh);
 +              spin_unlock(&conf->device_lock);
 +      }
 +      local_irq_restore(flags);
  }
  
  static inline void remove_hash(struct stripe_head *sh)
@@@ -653,9 -640,6 +653,9 @@@ static void ops_run_io(struct stripe_he
                        else
                                bi->bi_sector = (sh->sector
                                                 + rdev->data_offset);
 +                      if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 +                              bi->bi_rw |= REQ_FLUSH;
 +
                        bi->bi_flags = 1 << BIO_UPTODATE;
                        bi->bi_idx = 0;
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@@ -765,12 -749,14 +765,12 @@@ static void ops_complete_biofill(void *
  {
        struct stripe_head *sh = stripe_head_ref;
        struct bio *return_bi = NULL;
 -      struct r5conf *conf = sh->raid_conf;
        int i;
  
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
  
        /* clear completed biofills */
 -      spin_lock_irq(&conf->device_lock);
        for (i = sh->disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
  
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                rbi2 = r5_next_bio(rbi, dev->sector);
 -                              if (!raid5_dec_bi_phys_segments(rbi)) {
 +                              if (!raid5_dec_bi_active_stripes(rbi)) {
                                        rbi->bi_next = return_bi;
                                        return_bi = rbi;
                                }
                        }
                }
        }
 -      spin_unlock_irq(&conf->device_lock);
        clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
  
        return_io(return_bi);
  static void ops_run_biofill(struct stripe_head *sh)
  {
        struct dma_async_tx_descriptor *tx = NULL;
 -      struct r5conf *conf = sh->raid_conf;
        struct async_submit_ctl submit;
        int i;
  
                struct r5dev *dev = &sh->dev[i];
                if (test_bit(R5_Wantfill, &dev->flags)) {
                        struct bio *rbi;
 -                      spin_lock_irq(&conf->device_lock);
 +                      spin_lock_irq(&sh->stripe_lock);
                        dev->read = rbi = dev->toread;
                        dev->toread = NULL;
 -                      spin_unlock_irq(&conf->device_lock);
 +                      spin_unlock_irq(&sh->stripe_lock);
                        while (rbi && rbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
                                tx = async_copy_data(0, rbi, dev->page,
@@@ -1156,12 -1144,12 +1156,12 @@@ ops_run_biodrain(struct stripe_head *sh
                if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
                        struct bio *wbi;
  
 -                      spin_lock_irq(&sh->raid_conf->device_lock);
 +                      spin_lock_irq(&sh->stripe_lock);
                        chosen = dev->towrite;
                        dev->towrite = NULL;
                        BUG_ON(dev->written);
                        wbi = dev->written = chosen;
 -                      spin_unlock_irq(&sh->raid_conf->device_lock);
 +                      spin_unlock_irq(&sh->stripe_lock);
  
                        while (wbi && wbi->bi_sector <
                                dev->sector + STRIPE_SECTORS) {
@@@ -1466,8 -1454,6 +1466,8 @@@ static int grow_one_stripe(struct r5con
        init_waitqueue_head(&sh->ops.wait_for_ops);
        #endif
  
 +      spin_lock_init(&sh->stripe_lock);
 +
        if (grow_buffers(sh)) {
                shrink_buffers(sh);
                kmem_cache_free(conf->slab_cache, sh);
@@@ -1753,9 -1739,7 +1753,9 @@@ static void raid5_end_read_request(stru
                        atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
 -              }
 +              } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
 +                      clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 +
                if (atomic_read(&rdev->read_errors))
                        atomic_set(&rdev->read_errors, 0);
        } else {
                else
                        retry = 1;
                if (retry)
 -                      set_bit(R5_ReadError, &sh->dev[i].flags);
 +                      if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
 +                              set_bit(R5_ReadError, &sh->dev[i].flags);
 +                              clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
 +                      } else
 +                              set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
                else {
                        clear_bit(R5_ReadError, &sh->dev[i].flags);
                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@@ -2360,18 -2340,11 +2360,18 @@@ static int add_stripe_bio(struct stripe
                (unsigned long long)bi->bi_sector,
                (unsigned long long)sh->sector);
  
 -
 -      spin_lock_irq(&conf->device_lock);
 +      /*
 +       * If several bio share a stripe. The bio bi_phys_segments acts as a
 +       * reference count to avoid race. The reference count should already be
 +       * increased before this function is called (for example, in
 +       * make_request()), so other bio sharing this stripe will not free the
 +       * stripe. If a stripe is owned by one stripe, the stripe lock will
 +       * protect it.
 +       */
 +      spin_lock_irq(&sh->stripe_lock);
        if (forwrite) {
                bip = &sh->dev[dd_idx].towrite;
 -              if (*bip == NULL && sh->dev[dd_idx].written == NULL)
 +              if (*bip == NULL)
                        firstwrite = 1;
        } else
                bip = &sh->dev[dd_idx].toread;
        if (*bip)
                bi->bi_next = *bip;
        *bip = bi;
 -      bi->bi_phys_segments++;
 +      raid5_inc_bi_active_stripes(bi);
  
        if (forwrite) {
                /* check if page is covered */
                if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
                        set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
        }
 -      spin_unlock_irq(&conf->device_lock);
 +      spin_unlock_irq(&sh->stripe_lock);
  
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)(*bip)->bi_sector,
  
   overlap:
        set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
 -      spin_unlock_irq(&conf->device_lock);
 +      spin_unlock_irq(&sh->stripe_lock);
        return 0;
  }
  
@@@ -2468,11 -2441,10 +2468,11 @@@ handle_failed_stripe(struct r5conf *con
                                rdev_dec_pending(rdev, conf->mddev);
                        }
                }
 -              spin_lock_irq(&conf->device_lock);
 +              spin_lock_irq(&sh->stripe_lock);
                /* fail all writes first */
                bi = sh->dev[i].towrite;
                sh->dev[i].towrite = NULL;
 +              spin_unlock_irq(&sh->stripe_lock);
                if (bi) {
                        s->to_write--;
                        bitmap_end = 1;
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
 -                      if (!raid5_dec_bi_phys_segments(bi)) {
 +                      if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
                        }
                        bi = nextbi;
                }
 +              if (bitmap_end)
 +                      bitmap_endwrite(conf->mddev->bitmap, sh->sector,
 +                              STRIPE_SECTORS, 0, 0);
 +              bitmap_end = 0;
                /* and fail all 'written' */
                bi = sh->dev[i].written;
                sh->dev[i].written = NULL;
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
 -                      if (!raid5_dec_bi_phys_segments(bi)) {
 +                      if (!raid5_dec_bi_active_stripes(bi)) {
                                md_write_end(conf->mddev);
                                bi->bi_next = *return_bi;
                                *return_bi = bi;
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
                                clear_bit(BIO_UPTODATE, &bi->bi_flags);
 -                              if (!raid5_dec_bi_phys_segments(bi)) {
 +                              if (!raid5_dec_bi_active_stripes(bi)) {
                                        bi->bi_next = *return_bi;
                                        *return_bi = bi;
                                }
                                bi = nextbi;
                        }
                }
 -              spin_unlock_irq(&conf->device_lock);
                if (bitmap_end)
                        bitmap_endwrite(conf->mddev->bitmap, sh->sector,
                                        STRIPE_SECTORS, 0, 0);
@@@ -2738,23 -2707,30 +2738,23 @@@ static void handle_stripe_clean_event(s
                                test_bit(R5_UPTODATE, &dev->flags)) {
                                /* We can return any write requests */
                                struct bio *wbi, *wbi2;
 -                              int bitmap_end = 0;
                                pr_debug("Return write for disc %d\n", i);
 -                              spin_lock_irq(&conf->device_lock);
                                wbi = dev->written;
                                dev->written = NULL;
                                while (wbi && wbi->bi_sector <
                                        dev->sector + STRIPE_SECTORS) {
                                        wbi2 = r5_next_bio(wbi, dev->sector);
 -                                      if (!raid5_dec_bi_phys_segments(wbi)) {
 +                                      if (!raid5_dec_bi_active_stripes(wbi)) {
                                                md_write_end(conf->mddev);
                                                wbi->bi_next = *return_bi;
                                                *return_bi = wbi;
                                        }
                                        wbi = wbi2;
                                }
 -                              if (dev->towrite == NULL)
 -                                      bitmap_end = 1;
 -                              spin_unlock_irq(&conf->device_lock);
 -                              if (bitmap_end)
 -                                      bitmap_endwrite(conf->mddev->bitmap,
 -                                                      sh->sector,
 -                                                      STRIPE_SECTORS,
 +                              bitmap_endwrite(conf->mddev->bitmap, sh->sector,
 +                                              STRIPE_SECTORS,
                                         !test_bit(STRIPE_DEGRADED, &sh->state),
 -                                                      0);
 +                                              0);
                        }
                }
  
@@@ -3206,6 -3182,7 +3206,6 @@@ static void analyse_stripe(struct strip
  
        /* Now to look around and see what can be done */
        rcu_read_lock();
 -      spin_lock_irq(&conf->device_lock);
        for (i=disks; i--; ) {
                struct md_rdev *rdev;
                sector_t first_bad;
                                do_recovery = 1;
                }
        }
 -      spin_unlock_irq(&conf->device_lock);
        if (test_bit(STRIPE_SYNCING, &sh->state)) {
                /* If there is a failed device being replaced,
                 *     we must be recovering.
@@@ -3813,7 -3791,7 +3813,7 @@@ static struct bio *remove_bio_from_retr
                 * this sets the active strip count to 1 and the processed
                 * strip count to zero (upper 8 bits)
                 */
 -              bi->bi_phys_segments = 1; /* biased count of active stripes */
 +              raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
        }
  
        return bi;
@@@ -4135,7 -4113,7 +4135,7 @@@ static void make_request(struct mddev *
                        finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
 -                      if ((bi->bi_rw & REQ_SYNC) &&
 +                      if ((bi->bi_rw & REQ_NOIDLE) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
                        mddev_check_plugged(mddev);
                }
        }
  
 -      spin_lock_irq(&conf->device_lock);
 -      remaining = raid5_dec_bi_phys_segments(bi);
 -      spin_unlock_irq(&conf->device_lock);
 +      remaining = raid5_dec_bi_active_stripes(bi);
        if (remaining == 0) {
  
                if ( rw == WRITE )
@@@ -4504,7 -4484,7 +4504,7 @@@ static int  retry_aligned_read(struct r
                     sector += STRIPE_SECTORS,
                     scnt++) {
  
 -              if (scnt < raid5_bi_hw_segments(raid_bio))
 +              if (scnt < raid5_bi_processed_stripes(raid_bio))
                        /* already done this stripe */
                        continue;
  
  
                if (!sh) {
                        /* failed to get a stripe - must wait */
 -                      raid5_set_bi_hw_segments(raid_bio, scnt);
 +                      raid5_set_bi_processed_stripes(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
  
                if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
                        release_stripe(sh);
 -                      raid5_set_bi_hw_segments(raid_bio, scnt);
 +                      raid5_set_bi_processed_stripes(raid_bio, scnt);
                        conf->retry_read_aligned = raid_bio;
                        return handled;
                }
  
 +              set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
                handle_stripe(sh);
                release_stripe(sh);
                handled++;
        }
 -      spin_lock_irq(&conf->device_lock);
 -      remaining = raid5_dec_bi_phys_segments(raid_bio);
 -      spin_unlock_irq(&conf->device_lock);
 +      remaining = raid5_dec_bi_active_stripes(raid_bio);
        if (remaining == 0)
                bio_endio(raid_bio, 0);
        if (atomic_dec_and_test(&conf->active_aligned_reads))
@@@ -4562,7 -4543,7 +4562,7 @@@ static void raid5d(struct mddev *mddev
        while (1) {
                struct bio *bio;
  
-               if (atomic_read(&mddev->plug_cnt) == 0 &&
+               if (
                    !list_empty(&conf->bitmap_list)) {
                        /* Now is a good time to flush some bitmap updates */
                        conf->seq_flush++;
                        conf->seq_write = conf->seq_flush;
                        activate_bit_delay(conf);
                }
-               if (atomic_read(&mddev->plug_cnt) == 0)
-                       raid5_activate_delayed(conf);
+               raid5_activate_delayed(conf);
  
                while ((bio = remove_bio_from_retry(conf))) {
                        int ok;