]> git.kernelconcepts.de Git - mv-sheeva.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Dec 2010 17:19:46 +0000 (09:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Dec 2010 17:19:46 +0000 (09:19 -0800)
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  cciss: fix cciss_revalidate panic
  block: max hardware sectors limit wrapper
  block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead
  blk-throttle: Correct the placement of smp_rmb()
  blk-throttle: Trim/adjust slice_end once a bio has been dispatched
  block: check for proper length of iov entries earlier in blk_rq_map_user_iov()
  drbd: fix for spin_lock_irqsave in endio callback
  drbd: don't recvmsg with zero length

1  2 
drivers/md/md.c
drivers/scsi/scsi_lib.c

diff --combined drivers/md/md.c
index e71c5fa527f59ec3e6ea5fd6ce7d09746bfc437e,52694d29663d04f352017c6b315a9e1bd037f617..175c424f201f33caa0c50525a41b7f8fc3569387
@@@ -371,15 -371,10 +371,15 @@@ static void md_end_flush(struct bio *bi
        bio_put(bio);
  }
  
 -static void submit_flushes(mddev_t *mddev)
 +static void md_submit_flush_data(struct work_struct *ws);
 +
 +static void submit_flushes(struct work_struct *ws)
  {
 +      mddev_t *mddev = container_of(ws, mddev_t, flush_work);
        mdk_rdev_t *rdev;
  
 +      INIT_WORK(&mddev->flush_work, md_submit_flush_data);
 +      atomic_set(&mddev->flush_pending, 1);
        rcu_read_lock();
        list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
                if (rdev->raid_disk >= 0 &&
                        rdev_dec_pending(rdev, mddev);
                }
        rcu_read_unlock();
 +      if (atomic_dec_and_test(&mddev->flush_pending))
 +              queue_work(md_wq, &mddev->flush_work);
  }
  
  static void md_submit_flush_data(struct work_struct *ws)
        mddev_t *mddev = container_of(ws, mddev_t, flush_work);
        struct bio *bio = mddev->flush_bio;
  
 -      atomic_set(&mddev->flush_pending, 1);
 -
        if (bio->bi_size == 0)
                /* an empty barrier - all done */
                bio_endio(bio, 0);
                if (mddev->pers->make_request(mddev, bio))
                        generic_make_request(bio);
        }
 -      if (atomic_dec_and_test(&mddev->flush_pending)) {
 -              mddev->flush_bio = NULL;
 -              wake_up(&mddev->sb_wait);
 -      }
 +
 +      mddev->flush_bio = NULL;
 +      wake_up(&mddev->sb_wait);
  }
  
  void md_flush_request(mddev_t *mddev, struct bio *bio)
        mddev->flush_bio = bio;
        spin_unlock_irq(&mddev->write_lock);
  
 -      atomic_set(&mddev->flush_pending, 1);
 -      INIT_WORK(&mddev->flush_work, md_submit_flush_data);
 -
 -      submit_flushes(mddev);
 -
 -      if (atomic_dec_and_test(&mddev->flush_pending))
 -              queue_work(md_wq, &mddev->flush_work);
 +      INIT_WORK(&mddev->flush_work, submit_flushes);
 +      queue_work(md_wq, &mddev->flush_work);
  }
  EXPORT_SYMBOL(md_flush_request);
  
@@@ -4295,9 -4296,6 +4295,6 @@@ static int md_alloc(dev_t dev, char *na
                goto abort;
        mddev->queue->queuedata = mddev;
  
-       /* Can be unlocked because the queue is new: no concurrency */
-       queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
        blk_queue_make_request(mddev->queue, md_make_request);
  
        disk = alloc_disk(1 << shift);
@@@ -5159,7 -5157,7 +5156,7 @@@ static int add_new_disk(mddev_t * mddev
                                PTR_ERR(rdev));
                        return PTR_ERR(rdev);
                }
 -              /* set save_raid_disk if appropriate */
 +              /* set saved_raid_disk if appropriate */
                if (!mddev->persistent) {
                        if (info->state & (1<<MD_DISK_SYNC)  &&
                            info->raid_disk < mddev->raid_disks)
                } else
                        super_types[mddev->major_version].
                                validate_super(mddev, rdev);
 -              rdev->saved_raid_disk = rdev->raid_disk;
 +              if (test_bit(In_sync, &rdev->flags))
 +                      rdev->saved_raid_disk = rdev->raid_disk;
 +              else
 +                      rdev->saved_raid_disk = -1;
  
                clear_bit(In_sync, &rdev->flags); /* just to be sure */
                if (info->state & (1<<MD_DISK_WRITEMOSTLY))
@@@ -6044,8 -6039,9 +6041,8 @@@ static int md_thread(void * arg
                         || kthread_should_stop(),
                         thread->timeout);
  
 -              clear_bit(THREAD_WAKEUP, &thread->flags);
 -
 -              thread->run(thread->mddev);
 +              if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags))
 +                      thread->run(thread->mddev);
        }
  
        return 0;
diff --combined drivers/scsi/scsi_lib.c
index 5b6bbaea59fec92565d2af4d708cedce034295ae,9d7ba07dc5ef73f2379bbf963cb6311e6624a290..4a3842212c5062811c7973413e4d7aec62679951
@@@ -1402,6 -1402,11 +1402,6 @@@ static void scsi_softirq_done(struct re
        int disposition;
  
        INIT_LIST_HEAD(&cmd->eh_entry);
 -
 -      /*
 -       * Set the serial numbers back to zero
 -       */
 -      cmd->serial_number = 0;
  
        atomic_inc(&cmd->device->iodone_cnt);
        if (cmd->result)
@@@ -1637,9 -1642,8 +1637,8 @@@ struct request_queue *__scsi_alloc_queu
  
        blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
  
-       /* New queue, no concurrency on queue_flags */
        if (!shost->use_clustering)
-               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+               q->limits.cluster = 0;
  
        /*
         * set a reasonable default alignment on word boundaries: the