]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'block/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 9 Oct 2012 01:07:50 +0000 (12:07 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 9 Oct 2012 01:07:50 +0000 (12:07 +1100)
Conflicts:
init/Kconfig

47 files changed:
Documentation/ABI/testing/sysfs-block
Documentation/block/biodoc.txt
Documentation/percpu-rw-semaphore.txt [new file with mode: 0644]
MAINTAINERS
block/blk-core.c
block/blk-lib.c
block/blk-merge.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-tag.c
block/blk.h
block/elevator.c
block/genhd.c
block/ioctl.c
drivers/block/Kconfig
drivers/block/cciss.c
drivers/block/drbd/drbd_main.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/osdblk.c
drivers/block/pktcdvd.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/char/raw.c
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid0.c
drivers/memstick/core/Kconfig
drivers/memstick/core/Makefile
drivers/memstick/core/ms_block.c [new file with mode: 0644]
drivers/memstick/core/ms_block.h [new file with mode: 0644]
drivers/target/target_core_iblock.c
fs/bio-integrity.c
fs/bio.c
fs/block_dev.c
fs/exofs/ore.c
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/fs.h
include/linux/percpu-rwsem.h [new file with mode: 0644]
include/linux/scatterlist.h
lib/scatterlist.c

index c1eb41cb9876083d3df79a6a995b692762acd21b..279da08f754192c07ba10c47eb0a4ea8e8c95892 100644 (file)
@@ -206,3 +206,17 @@ Description:
                when a discarded area is read the discard_zeroes_data
                parameter will be set to one. Otherwise it will be 0 and
                the result of reading a discarded area is undefined.
+
+What:          /sys/block/<disk>/queue/write_same_max_bytes
+Date:          January 2012
+Contact:       Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+               Some devices support a write same operation in which a
+               single data block can be written to a range of several
+               contiguous blocks on storage. This can be used to wipe
+               areas on disk or to initialize drives in a RAID
+               configuration. write_same_max_bytes indicates how many
+               bytes can be written in a single write same command. If
+               write_same_max_bytes is 0, write same is not supported
+               by the device.
+
index e418dc0a7086631488963786f9ddb0f3d897b147..8df5e8e6dceba06846042d0c6155fd4e986addd8 100644 (file)
@@ -465,7 +465,6 @@ struct bio {
        bio_end_io_t    *bi_end_io;  /* bi_end_io (bio) */
        atomic_t                bi_cnt;      /* pin count: free when it hits zero */
        void             *bi_private;
-       bio_destructor_t *bi_destructor; /* bi_destructor (bio) */
 };
 
 With this multipage bio design:
@@ -647,10 +646,6 @@ for a non-clone bio. There are the 6 pools setup for different size biovecs,
 so bio_alloc(gfp_mask, nr_iovecs) will allocate a vec_list of the
 given size from these slabs.
 
-The bi_destructor() routine takes into account the possibility of the bio
-having originated from a different source (see later discussions on
-n/w to block transfers and kvec_cb)
-
 The bio_get() routine may be used to hold an extra reference on a bio prior
 to i/o submission, if the bio fields are likely to be accessed after the
 i/o is issued (since the bio may otherwise get freed in case i/o completion
diff --git a/Documentation/percpu-rw-semaphore.txt b/Documentation/percpu-rw-semaphore.txt
new file mode 100644 (file)
index 0000000..7d3c824
--- /dev/null
@@ -0,0 +1,27 @@
+Percpu rw semaphores
+--------------------
+
+Percpu rw semaphores is a new read-write semaphore design that is
+optimized for locking for reading.
+
+The problem with traditional read-write semaphores is that when multiple
+cores take the lock for reading, the cache line containing the semaphore
+is bouncing between L1 caches of the cores, causing performance
+degradation.
+
+Locking for reading is very fast, it uses RCU and it avoids any atomic
+instruction in the lock and unlock path. On the other hand, locking for
+writing is very expensive, it calls synchronize_rcu() that can take
+hundreds of milliseconds.
+
+The lock is declared with "struct percpu_rw_semaphore" type.
+The lock is initialized percpu_init_rwsem, it returns 0 on success and
+-ENOMEM on allocation failure.
+The lock must be freed with percpu_free_rwsem to avoid memory leak.
+
+The lock is locked for read with percpu_down_read, percpu_up_read and
+for write with percpu_down_write, percpu_up_write.
+
+The idea of using RCU for optimized rw-lock was introduced by
+Eric Dumazet <eric.dumazet@gmail.com>.
+The code was written by Mikulas Patocka <mpatocka@redhat.com>
index 4084b958b61bedf318202fd8af6f9c36987f54ea..862736e3a99f77bf2fc252bddb94b5c8c651ef60 100644 (file)
@@ -5616,7 +5616,7 @@ S:        Maintained
 F:     drivers/pinctrl/spear/
 
 PKTCDVD DRIVER
-M:     Peter Osterlund <petero2@telia.com>
+M:     Jiri Kosina <jkosina@suse.cz>
 S:     Maintained
 F:     drivers/block/pktcdvd.c
 F:     include/linux/pktcdvd.h
@@ -6700,6 +6700,11 @@ W:       http://tifmxx.berlios.de/
 S:     Maintained
 F:     drivers/memstick/host/tifm_ms.c
 
+SONY MEMORYSTICK STANDARD SUPPORT
+M:     Maxim Levitsky <maximlevitsky@gmail.com>
+S:     Maintained
+F:     drivers/memstick/core/ms_block.*
+
 SOUND
 M:     Jaroslav Kysela <perex@perex.cz>
 M:     Takashi Iwai <tiwai@suse.de>
index d2da64170513caae07726bca207f71ba13065b46..a33870b1847bb70c6ef1937f7bef4e93bdf9c980 100644 (file)
@@ -606,8 +606,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        /*
         * A queue starts its life with bypass turned on to avoid
         * unnecessary bypass on/off overhead and nasty surprises during
-        * init.  The initial bypass will be finished at the end of
-        * blk_init_allocated_queue().
+        * init.  The initial bypass will be finished when the queue is
+        * registered by blk_register_queue().
         */
        q->bypass_depth = 1;
        __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -694,7 +694,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        q->request_fn           = rfn;
        q->prep_rq_fn           = NULL;
        q->unprep_rq_fn         = NULL;
-       q->queue_flags          = QUEUE_FLAG_DEFAULT;
+       q->queue_flags          |= QUEUE_FLAG_DEFAULT;
 
        /* Override internal queue lock with supplied lock pointer */
        if (lock)
@@ -710,11 +710,6 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
        /* init elevator */
        if (elevator_init(q, NULL))
                return NULL;
-
-       blk_queue_congestion_threshold(q);
-
-       /* all done, end the initial bypass */
-       blk_queue_bypass_end(q);
        return q;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1657,8 +1652,8 @@ generic_make_request_checks(struct bio *bio)
                goto end_io;
        }
 
-       if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
-                    nr_sectors > queue_max_hw_sectors(q))) {
+       if (likely(bio_is_rw(bio) &&
+                  nr_sectors > queue_max_hw_sectors(q))) {
                printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                       bdevname(bio->bi_bdev, b),
                       bio_sectors(bio),
@@ -1699,8 +1694,12 @@ generic_make_request_checks(struct bio *bio)
 
        if ((bio->bi_rw & REQ_DISCARD) &&
            (!blk_queue_discard(q) ||
-            ((bio->bi_rw & REQ_SECURE) &&
-             !blk_queue_secdiscard(q)))) {
+            ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
+               err = -EOPNOTSUPP;
+               goto end_io;
+       }
+
+       if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
                err = -EOPNOTSUPP;
                goto end_io;
        }
@@ -1810,15 +1809,20 @@ EXPORT_SYMBOL(generic_make_request);
  */
 void submit_bio(int rw, struct bio *bio)
 {
-       int count = bio_sectors(bio);
-
        bio->bi_rw |= rw;
 
        /*
         * If it's a regular read/write or a barrier with data attached,
         * go through the normal accounting stuff before submission.
         */
-       if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+       if (bio_has_data(bio)) {
+               unsigned int count;
+
+               if (unlikely(rw & REQ_WRITE_SAME))
+                       count = bdev_logical_block_size(bio->bi_bdev) >> 9;
+               else
+                       count = bio_sectors(bio);
+
                if (rw & WRITE) {
                        count_vm_events(PGPGOUT, count);
                } else {
@@ -1864,11 +1868,10 @@ EXPORT_SYMBOL(submit_bio);
  */
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 {
-       if (rq->cmd_flags & REQ_DISCARD)
+       if (!rq_mergeable(rq))
                return 0;
 
-       if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
-           blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
+       if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@ -2340,7 +2343,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
        req->buffer = bio_data(req->bio);
 
        /* update sector only for requests with clear definition of sector */
-       if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
+       if (req->cmd_type == REQ_TYPE_FS)
                req->__sector += total_bytes >> 9;
 
        /* mixed attributes always follow the first bio */
@@ -2781,16 +2784,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
        blk_rq_init(NULL, rq);
 
        __rq_for_each_bio(bio_src, rq_src) {
-               bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+               bio = bio_clone_bioset(bio_src, gfp_mask, bs);
                if (!bio)
                        goto free_and_out;
 
-               __bio_clone(bio, bio_src);
-
-               if (bio_integrity(bio_src) &&
-                   bio_integrity_clone(bio, bio_src, gfp_mask, bs))
-                       goto free_and_out;
-
                if (bio_ctr && bio_ctr(bio, bio_src, data))
                        goto free_and_out;
 
@@ -2807,7 +2804,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 
 free_and_out:
        if (bio)
-               bio_free(bio, bs);
+               bio_put(bio);
        blk_rq_unprep_clone(rq);
 
        return -ENOMEM;
index 19cc761cacb2a4b71fe9d4579226025324ffddf5..9373b58dfab185878baf9cb196bcd61dd7637894 100644 (file)
@@ -129,6 +129,80 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
 
+/**
+ * blkdev_issue_write_same - queue a write same operation
+ * @bdev:      target blockdev
+ * @sector:    start sector
+ * @nr_sects:  number of sectors to write
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
+ * @page:      page containing data to write
+ *
+ * Description:
+ *    Issue a write same request for the sectors in question.
+ */
+int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+                           sector_t nr_sects, gfp_t gfp_mask,
+                           struct page *page)
+{
+       DECLARE_COMPLETION_ONSTACK(wait);
+       struct request_queue *q = bdev_get_queue(bdev);
+       unsigned int max_write_same_sectors;
+       struct bio_batch bb;
+       struct bio *bio;
+       int ret = 0;
+
+       if (!q)
+               return -ENXIO;
+
+       max_write_same_sectors = q->limits.max_write_same_sectors;
+
+       if (max_write_same_sectors == 0)
+               return -EOPNOTSUPP;
+
+       atomic_set(&bb.done, 1);
+       bb.flags = 1 << BIO_UPTODATE;
+       bb.wait = &wait;
+
+       while (nr_sects) {
+               bio = bio_alloc(gfp_mask, 1);
+               if (!bio) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               bio->bi_sector = sector;
+               bio->bi_end_io = bio_batch_end_io;
+               bio->bi_bdev = bdev;
+               bio->bi_private = &bb;
+               bio->bi_vcnt = 1;
+               bio->bi_io_vec->bv_page = page;
+               bio->bi_io_vec->bv_offset = 0;
+               bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
+
+               if (nr_sects > max_write_same_sectors) {
+                       bio->bi_size = max_write_same_sectors << 9;
+                       nr_sects -= max_write_same_sectors;
+                       sector += max_write_same_sectors;
+               } else {
+                       bio->bi_size = nr_sects << 9;
+                       nr_sects = 0;
+               }
+
+               atomic_inc(&bb.done);
+               submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
+       }
+
+       /* Wait for bios in-flight */
+       if (!atomic_dec_and_test(&bb.done))
+               wait_for_completion(&wait);
+
+       if (!test_bit(BIO_UPTODATE, &bb.flags))
+               ret = -ENOTSUPP;
+
+       return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_write_same);
+
 /**
  * blkdev_issue_zeroout - generate number of zero filed write bios
  * @bdev:      blockdev to issue
@@ -140,7 +214,7 @@ EXPORT_SYMBOL(blkdev_issue_discard);
  *  Generate and issue number of bios with zerofiled pages.
  */
 
-int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        sector_t nr_sects, gfp_t gfp_mask)
 {
        int ret;
@@ -190,4 +264,32 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 
        return ret;
 }
+
+/**
+ * blkdev_issue_zeroout - zero-fill a block range
+ * @bdev:      blockdev to write
+ * @sector:    start sector
+ * @nr_sects:  number of sectors to write
+ * @gfp_mask:  memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ *  Generate and issue number of bios with zerofiled pages.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+                        sector_t nr_sects, gfp_t gfp_mask)
+{
+       if (bdev_write_same(bdev)) {
+               unsigned char bdn[BDEVNAME_SIZE];
+
+               if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+                                            ZERO_PAGE(0)))
+                       return 0;
+
+               bdevname(bdev, bdn);
+               pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
+       }
+
+       return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
+}
 EXPORT_SYMBOL(blkdev_issue_zeroout);
index e76279e411622519eebb54d4802eb11b796788db..936a110de0b9c63ac8a3ab6d44d80d60d5533088 100644 (file)
@@ -275,14 +275,8 @@ no_merge:
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
                     struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
-               max_sectors = queue_max_hw_sectors(q);
-       else
-               max_sectors = queue_max_sectors(q);
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -299,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 int ll_front_merge_fn(struct request_queue *q, struct request *req,
                      struct bio *bio)
 {
-       unsigned short max_sectors;
-
-       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
-               max_sectors = queue_max_hw_sectors(q);
-       else
-               max_sectors = queue_max_sectors(q);
-
-
-       if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+       if (blk_rq_sectors(req) + bio_sectors(bio) >
+           blk_rq_get_max_sectors(req)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -338,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
+           blk_rq_get_max_sectors(req))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -417,16 +405,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return 0;
 
-       /*
-        * Don't merge file system requests and discard requests
-        */
-       if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
-               return 0;
-
-       /*
-        * Don't merge discard requests and secure discard requests
-        */
-       if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+       if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
                return 0;
 
        /*
@@ -440,6 +419,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
+       if (req->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(req->bio, next->bio))
+               return 0;
+
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -521,15 +504,10 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 {
-       if (!rq_mergeable(rq))
+       if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
-       /* don't merge file system requests and discard requests */
-       if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
-               return false;
-
-       /* don't merge discard requests and secure discard requests */
-       if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+       if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
                return false;
 
        /* different data direction or already started, don't merge */
@@ -544,6 +522,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
        if (bio_integrity(bio) != blk_integrity_rq(rq))
                return false;
 
+       /* must be using the same buffer */
+       if (rq->cmd_flags & REQ_WRITE_SAME &&
+           !blk_write_same_mergeable(rq->bio, bio))
+               return false;
+
        return true;
 }
 
index 565a6786032f59e40cee28bf4bde4ffb451cffa8..779bb7646bcd13f871dd4ba20862722db699078c 100644 (file)
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
        lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+       lim->max_write_same_sectors = 0;
        lim->max_discard_sectors = 0;
        lim->discard_granularity = 0;
        lim->discard_alignment = 0;
@@ -144,6 +145,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
        lim->max_segments = USHRT_MAX;
        lim->max_hw_sectors = UINT_MAX;
        lim->max_sectors = UINT_MAX;
+       lim->max_write_same_sectors = UINT_MAX;
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
 
@@ -285,6 +287,18 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
 }
 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 
+/**
+ * blk_queue_max_write_same_sectors - set max sectors for a single write same
+ * @q:  the request queue for the device
+ * @max_write_same_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_same_sectors(struct request_queue *q,
+                                     unsigned int max_write_same_sectors)
+{
+       q->limits.max_write_same_sectors = max_write_same_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
+
 /**
  * blk_queue_max_segments - set max hw segments for a request for this queue
  * @q:  the request queue for the device
@@ -510,6 +524,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+       t->max_write_same_sectors = min(t->max_write_same_sectors,
+                                       b->max_write_same_sectors);
        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
index 9628b291f96057a42cbf6a5492bd7480fe7e93da..ce6204608822462c18bda72c7ca2403fd17ec5eb 100644 (file)
@@ -26,9 +26,15 @@ queue_var_show(unsigned long var, char *page)
 static ssize_t
 queue_var_store(unsigned long *var, const char *page, size_t count)
 {
-       char *p = (char *) page;
+       int err;
+       unsigned long v;
+
+       err = strict_strtoul(page, 10, &v);
+       if (err || v > UINT_MAX)
+               return -EINVAL;
+
+       *var = v;
 
-       *var = simple_strtoul(p, &p, 10);
        return count;
 }
 
@@ -48,6 +54,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                return -EINVAL;
 
        ret = queue_var_store(&nr, page, count);
+       if (ret < 0)
+               return ret;
+
        if (nr < BLKDEV_MIN_RQ)
                nr = BLKDEV_MIN_RQ;
 
@@ -102,6 +111,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
        unsigned long ra_kb;
        ssize_t ret = queue_var_store(&ra_kb, page, count);
 
+       if (ret < 0)
+               return ret;
+
        q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
 
        return ret;
@@ -168,6 +180,13 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
        return queue_var_show(queue_discard_zeroes_data(q), page);
 }
 
+static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
+{
+       return sprintf(page, "%llu\n",
+               (unsigned long long)q->limits.max_write_same_sectors << 9);
+}
+
+
 static ssize_t
 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 {
@@ -176,6 +195,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                        page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 
+       if (ret < 0)
+               return ret;
+
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
 
@@ -236,6 +258,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
        unsigned long nm;
        ssize_t ret = queue_var_store(&nm, page, count);
 
+       if (ret < 0)
+               return ret;
+
        spin_lock_irq(q->queue_lock);
        queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
        queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
@@ -264,6 +289,9 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
        unsigned long val;
 
        ret = queue_var_store(&val, page, count);
+       if (ret < 0)
+               return ret;
+
        spin_lock_irq(q->queue_lock);
        if (val == 2) {
                queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
@@ -364,6 +392,11 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
        .show = queue_discard_zeroes_data_show,
 };
 
+static struct queue_sysfs_entry queue_write_same_max_entry = {
+       .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
+       .show = queue_write_same_max_show,
+};
+
 static struct queue_sysfs_entry queue_nonrot_entry = {
        .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
        .show = queue_show_nonrot,
@@ -411,6 +444,7 @@ static struct attribute *default_attrs[] = {
        &queue_discard_granularity_entry.attr,
        &queue_discard_max_entry.attr,
        &queue_discard_zeroes_data_entry.attr,
+       &queue_write_same_max_entry.attr,
        &queue_nonrot_entry.attr,
        &queue_nomerges_entry.attr,
        &queue_rq_affinity_entry.attr,
@@ -527,6 +561,12 @@ int blk_register_queue(struct gendisk *disk)
        if (WARN_ON(!q))
                return -ENXIO;
 
+       /*
+        * Initialization must be complete by now.  Finish the initial
+        * bypass from queue allocation.
+        */
+       blk_queue_bypass_end(q);
+
        ret = blk_trace_init_sysfs(dev);
        if (ret)
                return ret;
index 4af6f5cc1167a65494dc52b39cd3f1dfd1271087..cc345e1d8d4ea0088832833ef985d6b1e764fa44 100644 (file)
@@ -186,7 +186,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
                tags = __blk_queue_init_tags(q, depth);
 
                if (!tags)
-                       goto fail;
+                       return -ENOMEM;
+
        } else if (q->queue_tags) {
                rc = blk_queue_resize_tags(q, depth);
                if (rc)
@@ -203,9 +204,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
        queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
        INIT_LIST_HEAD(&q->tag_busy_list);
        return 0;
-fail:
-       kfree(tags);
-       return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_queue_init_tags);
 
index 2a0ea32d249fdaa9694e0249e435777565535d70..ca51543b248ca16f0eb28e937ec321e52f3130c8 100644 (file)
@@ -171,14 +171,13 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
  *
  *     a) it's attached to a gendisk, and
  *     b) the queue had IO stats enabled when this request was started, and
- *     c) it's a file system request or a discard request
+ *     c) it's a file system request
  */
 static inline int blk_do_io_stat(struct request *rq)
 {
        return rq->rq_disk &&
               (rq->cmd_flags & REQ_IO_STAT) &&
-              (rq->cmd_type == REQ_TYPE_FS ||
-               (rq->cmd_flags & REQ_DISCARD));
+               (rq->cmd_type == REQ_TYPE_FS);
 }
 
 /*
index 6a55d418896f5ceee0042da69c0177c495219cbc..9b1d42b62f207d5a37f657e21fc65769bc9e85c2 100644 (file)
@@ -562,8 +562,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 
        if (rq->cmd_flags & REQ_SOFTBARRIER) {
                /* barriers are scheduling boundary, update end_sector */
-               if (rq->cmd_type == REQ_TYPE_FS ||
-                   (rq->cmd_flags & REQ_DISCARD)) {
+               if (rq->cmd_type == REQ_TYPE_FS) {
                        q->end_sector = rq_end_sector(rq);
                        q->boundary_rq = rq;
                }
@@ -605,8 +604,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                if (elv_attempt_insert_merge(q, rq))
                        break;
        case ELEVATOR_INSERT_SORT:
-               BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
-                      !(rq->cmd_flags & REQ_DISCARD));
+               BUG_ON(rq->cmd_type != REQ_TYPE_FS);
                rq->cmd_flags |= REQ_SORTED;
                q->nr_sorted++;
                if (rq_mergeable(rq)) {
index 6cace663a80e4a89492f8b656f580a944b134f9d..8f29a0b9032e37eb83d61cbd4f4c9585591ab2b6 100644 (file)
@@ -587,8 +587,6 @@ void add_disk(struct gendisk *disk)
        WARN_ON(disk->minors && !(disk->major || disk->first_minor));
        WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
 
-       disk->flags |= GENHD_FL_UP;
-
        retval = blk_alloc_devt(&disk->part0, &devt);
        if (retval) {
                WARN_ON(1);
@@ -596,6 +594,8 @@ void add_disk(struct gendisk *disk)
        }
        disk_to_dev(disk)->devt = devt;
 
+       disk->flags |= GENHD_FL_UP;
+
        /* ->major and ->first_minor aren't supposed to be
         * dereferenced from here on, but set them just in case.
         */
@@ -1105,7 +1105,7 @@ static void disk_release(struct device *dev)
        disk_replace_part_tbl(disk, NULL);
        free_part_stats(&disk->part0);
        free_part_info(&disk->part0);
-       if (disk->queue)
+       if (disk->queue && disk->flags & GENHD_FL_UP)
                blk_put_queue(disk->queue);
        kfree(disk);
 }
index 4a85096f5410adde92d01cd58ecba30373c808f1..a31d91d9bc5a33d70388b0063e1d80c41383c125 100644 (file)
@@ -185,6 +185,22 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
        return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
 }
 
+static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
+                            uint64_t len)
+{
+       if (start & 511)
+               return -EINVAL;
+       if (len & 511)
+               return -EINVAL;
+       start >>= 9;
+       len >>= 9;
+
+       if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+               return -EINVAL;
+
+       return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL);
+}
+
 static int put_ushort(unsigned long arg, unsigned short val)
 {
        return put_user(val, (unsigned short __user *)arg);
@@ -300,6 +316,17 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                return blk_ioctl_discard(bdev, range[0], range[1],
                                         cmd == BLKSECDISCARD);
        }
+       case BLKZEROOUT: {
+               uint64_t range[2];
+
+               if (!(mode & FMODE_WRITE))
+                       return -EBADF;
+
+               if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+                       return -EFAULT;
+
+               return blk_ioctl_zeroout(bdev, range[0], range[1]);
+       }
 
        case HDIO_GETGEO: {
                struct hd_geometry geo;
index f529407db93ff74dbfa8b9dad13324815467fad2..f7de3228f626a0d849c503f059944f575a05f831 100644 (file)
@@ -131,6 +131,7 @@ config BLK_CPQ_DA
 config BLK_CPQ_CISS_DA
        tristate "Compaq Smart Array 5xxx support"
        depends on PCI
+       select CHECK_SIGNATURE
        help
          This is the driver for Compaq Smart Array 5xxx controllers.
          Everyone using these boards should say Y here.
index b0f553b26d0f8d00f86f768f93aad27ebcb0529e..ca83f96756ad86b2a339971050b7378f9a9752d9 100644 (file)
@@ -5205,7 +5205,6 @@ static void cciss_shutdown(struct pci_dev *pdev)
                return;
        }
        /* write all data in the battery backed cache to disk */
-       memset(flush_buf, 0, 4);
        return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
                4, 0, CTLR_LUNID, TYPE_CMD);
        kfree(flush_buf);
index f93a0320e952dd6b07fb8c73739ee73d3c1af0a0..f55683ad4ffad67063b0825a844c3214c07d0601 100644 (file)
@@ -162,23 +162,12 @@ static const struct block_device_operations drbd_ops = {
        .release = drbd_release,
 };
 
-static void bio_destructor_drbd(struct bio *bio)
-{
-       bio_free(bio, drbd_md_io_bio_set);
-}
-
 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
 {
-       struct bio *bio;
-
        if (!drbd_md_io_bio_set)
                return bio_alloc(gfp_mask, 1);
 
-       bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
-       if (!bio)
-               return NULL;
-       bio->bi_destructor = bio_destructor_drbd;
-       return bio;
+       return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
 }
 
 #ifdef __CHECKER__
index 17c675c522954cc39a210e431603aac2a3a2d946..a6eebab17a39da37a945de4de7eb8df94172bd58 100644 (file)
@@ -4109,12 +4109,19 @@ static struct platform_driver floppy_driver = {
 
 static struct platform_device floppy_device[N_DRIVE];
 
+static bool floppy_available(int drive)
+{
+       if (!(allowed_drive_mask & (1 << drive)))
+               return false;
+       if (fdc_state[FDC(drive)].version == FDC_NONE)
+               return false;
+       return true;
+}
+
 static struct kobject *floppy_find(dev_t dev, int *part, void *data)
 {
        int drive = (*part & 3) | ((*part & 0x80) >> 5);
-       if (drive >= N_DRIVE ||
-           !(allowed_drive_mask & (1 << drive)) ||
-           fdc_state[FDC(drive)].version == FDC_NONE)
+       if (drive >= N_DRIVE || !floppy_available(drive))
                return NULL;
        if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
                return NULL;
@@ -4124,8 +4131,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
 
 static int __init do_floppy_init(void)
 {
-       int i, unit, drive;
-       int err, dr;
+       int i, unit, drive, err;
 
        set_debugt();
        interruptjiffies = resultjiffies = jiffies;
@@ -4137,34 +4143,32 @@ static int __init do_floppy_init(void)
 
        raw_cmd = NULL;
 
-       for (dr = 0; dr < N_DRIVE; dr++) {
-               disks[dr] = alloc_disk(1);
-               if (!disks[dr]) {
-                       err = -ENOMEM;
-                       goto out_put_disk;
-               }
+       floppy_wq = alloc_ordered_workqueue("floppy", 0);
+       if (!floppy_wq)
+               return -ENOMEM;
 
-               floppy_wq = alloc_ordered_workqueue("floppy", 0);
-               if (!floppy_wq) {
+       for (drive = 0; drive < N_DRIVE; drive++) {
+               disks[drive] = alloc_disk(1);
+               if (!disks[drive]) {
                        err = -ENOMEM;
                        goto out_put_disk;
                }
 
-               disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
-               if (!disks[dr]->queue) {
+               disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
+               if (!disks[drive]->queue) {
                        err = -ENOMEM;
-                       goto out_destroy_workq;
+                       goto out_put_disk;
                }
 
-               blk_queue_max_hw_sectors(disks[dr]->queue, 64);
-               disks[dr]->major = FLOPPY_MAJOR;
-               disks[dr]->first_minor = TOMINOR(dr);
-               disks[dr]->fops = &floppy_fops;
-               sprintf(disks[dr]->disk_name, "fd%d", dr);
+               blk_queue_max_hw_sectors(disks[drive]->queue, 64);
+               disks[drive]->major = FLOPPY_MAJOR;
+               disks[drive]->first_minor = TOMINOR(drive);
+               disks[drive]->fops = &floppy_fops;
+               sprintf(disks[drive]->disk_name, "fd%d", drive);
 
-               init_timer(&motor_off_timer[dr]);
-               motor_off_timer[dr].data = dr;
-               motor_off_timer[dr].function = motor_off_callback;
+               init_timer(&motor_off_timer[drive]);
+               motor_off_timer[drive].data = drive;
+               motor_off_timer[drive].function = motor_off_callback;
        }
 
        err = register_blkdev(FLOPPY_MAJOR, "fd");
@@ -4282,9 +4286,7 @@ static int __init do_floppy_init(void)
        }
 
        for (drive = 0; drive < N_DRIVE; drive++) {
-               if (!(allowed_drive_mask & (1 << drive)))
-                       continue;
-               if (fdc_state[FDC(drive)].version == FDC_NONE)
+               if (!floppy_available(drive))
                        continue;
 
                floppy_device[drive].name = floppy_device_name;
@@ -4293,7 +4295,7 @@ static int __init do_floppy_init(void)
 
                err = platform_device_register(&floppy_device[drive]);
                if (err)
-                       goto out_release_dma;
+                       goto out_remove_drives;
 
                err = device_create_file(&floppy_device[drive].dev,
                                         &dev_attr_cmos);
@@ -4311,29 +4313,33 @@ static int __init do_floppy_init(void)
 
 out_unreg_platform_dev:
        platform_device_unregister(&floppy_device[drive]);
+out_remove_drives:
+       while (drive--) {
+               if (floppy_available(drive)) {
+                       del_gendisk(disks[drive]);
+                       device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
+                       platform_device_unregister(&floppy_device[drive]);
+               }
+       }
 out_release_dma:
        if (atomic_read(&usage_count))
                floppy_release_irq_and_dma();
 out_unreg_region:
        blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
        platform_driver_unregister(&floppy_driver);
-out_destroy_workq:
-       destroy_workqueue(floppy_wq);
 out_unreg_blkdev:
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
-       while (dr--) {
-               del_timer_sync(&motor_off_timer[dr]);
-               if (disks[dr]->queue) {
-                       blk_cleanup_queue(disks[dr]->queue);
-                       /*
-                        * put_disk() is not paired with add_disk() and
-                        * will put queue reference one extra time. fix it.
-                        */
-                       disks[dr]->queue = NULL;
+       for (drive = 0; drive < N_DRIVE; drive++) {
+               if (!disks[drive])
+                       break;
+               if (disks[drive]->queue) {
+                       del_timer_sync(&motor_off_timer[drive]);
+                       blk_cleanup_queue(disks[drive]->queue);
                }
-               put_disk(disks[dr]);
+               put_disk(disks[drive]);
        }
+       destroy_workqueue(floppy_wq);
        return err;
 }
 
@@ -4551,22 +4557,13 @@ static void __exit floppy_module_exit(void)
        for (drive = 0; drive < N_DRIVE; drive++) {
                del_timer_sync(&motor_off_timer[drive]);
 
-               if ((allowed_drive_mask & (1 << drive)) &&
-                   fdc_state[FDC(drive)].version != FDC_NONE) {
+               if (floppy_available(drive)) {
                        del_gendisk(disks[drive]);
                        device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
                        platform_device_unregister(&floppy_device[drive]);
                }
                blk_cleanup_queue(disks[drive]->queue);
 
-               /*
-                * These disks have not called add_disk().  Don't put down
-                * queue reference in put_disk().
-                */
-               if (!(allowed_drive_mask & (1 << drive)) ||
-                   fdc_state[FDC(drive)].version == FDC_NONE)
-                       disks[drive]->queue = NULL;
-
                put_disk(disks[drive]);
        }
 
index e9d594fd12cbee408251c4ead03d1b71183ff7ae..54046e51160aef28e3ee733797fa453a0403a02a 100644 (file)
@@ -976,8 +976,21 @@ static int loop_clr_fd(struct loop_device *lo)
        if (lo->lo_state != Lo_bound)
                return -ENXIO;
 
-       if (lo->lo_refcnt > 1)  /* we needed one fd for the ioctl */
-               return -EBUSY;
+       /*
+        * If we've explicitly asked to tear down the loop device,
+        * and it has an elevated reference count, set it for auto-teardown when
+        * the last reference goes away. This stops $!~#$@ udev from
+        * preventing teardown because it decided that it needs to run blkid on
+        * the loopback device whenever they appear. xfstests is notorious for
+        * failing tests because blkid via udev races with a losetup
+        * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+        * command to fail with EBUSY.
+        */
+       if (lo->lo_refcnt > 1) {
+               lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+               mutex_unlock(&lo->lo_ctl_mutex);
+               return 0;
+       }
 
        if (filp == NULL)
                return -EINVAL;
index f946d31d6917e00aa0637df5d21cef35ebf82845..adc6f36564cf3c9f214ca37c9b3cf8faffbe6cf8 100644 (file)
@@ -2035,8 +2035,9 @@ static unsigned int implicit_sector(unsigned char command,
        }
        return rv;
 }
-
-static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+static void mtip_set_timeout(struct driver_data *dd,
+                                       struct host_to_dev_fis *fis,
+                                       unsigned int *timeout, u8 erasemode)
 {
        switch (fis->command) {
        case ATA_CMD_DOWNLOAD_MICRO:
@@ -2044,7 +2045,10 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
                break;
        case ATA_CMD_SEC_ERASE_UNIT:
        case 0xFC:
-               *timeout = 240000; /* 4 minutes */
+               if (erasemode)
+                       *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+               else
+                       *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
                break;
        case ATA_CMD_STANDBYNOW1:
                *timeout = 120000;  /* 2 minutes */
@@ -2087,6 +2091,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
        unsigned int transfer_size;
        unsigned long task_file_data;
        int intotal = outtotal + req_task->out_size;
+       int erasemode = 0;
 
        taskout = req_task->out_size;
        taskin = req_task->in_size;
@@ -2212,7 +2217,13 @@ static int exec_drive_taskfile(struct driver_data *dd,
                fis.lba_hi,
                fis.device);
 
-       mtip_set_timeout(&fis, &timeout);
+       /* check for erase mode support during secure erase.*/
+       if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
+                                       && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+               erasemode = 1;
+       }
+
+       mtip_set_timeout(dd, &fis, &timeout, erasemode);
 
        /* Determine the correct transfer size.*/
        if (force_single_sector)
index 18627a1d04c59eff34f7cdd2313555b10a75b283..5f4a917bd8bbcfe88283b3509e365a0faefbb3b5 100644 (file)
@@ -33,6 +33,9 @@
 /* offset of Device Control register in PCIe extended capabilites space */
 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET  0x48
 
+/* check for erase mode support during secure erase */
+#define MTIP_SEC_ERASE_MODE     0x3
+
 /* # of times to retry timed out/failed IOs */
 #define MTIP_MAX_RETRIES       2
 
index 87311ebac0db4e5cc3f04fe0e255bf01d62aa730..1bbc681688e4375aa5098bd1b99d85b38baa796e 100644 (file)
@@ -266,11 +266,10 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
        struct bio *tmp, *new_chain = NULL, *tail = NULL;
 
        while (old_chain) {
-               tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
+               tmp = bio_clone_kmalloc(old_chain, gfpmask);
                if (!tmp)
                        goto err_out;
 
-               __bio_clone(tmp, old_chain);
                tmp->bi_bdev = NULL;
                gfpmask &= ~__GFP_WAIT;
                tmp->bi_next = NULL;
index ba66e4445f412a3c6d3321de8050ad937d902deb..2e7de7a59bfca01c4a35edaa1824a7a38f90e52f 100644 (file)
@@ -522,38 +522,6 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
        }
 }
 
-static void pkt_bio_destructor(struct bio *bio)
-{
-       kfree(bio->bi_io_vec);
-       kfree(bio);
-}
-
-static struct bio *pkt_bio_alloc(int nr_iovecs)
-{
-       struct bio_vec *bvl = NULL;
-       struct bio *bio;
-
-       bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
-       if (!bio)
-               goto no_bio;
-       bio_init(bio);
-
-       bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
-       if (!bvl)
-               goto no_bvl;
-
-       bio->bi_max_vecs = nr_iovecs;
-       bio->bi_io_vec = bvl;
-       bio->bi_destructor = pkt_bio_destructor;
-
-       return bio;
-
- no_bvl:
-       kfree(bio);
- no_bio:
-       return NULL;
-}
-
 /*
  * Allocate a packet_data struct
  */
@@ -567,7 +535,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
                goto no_pkt;
 
        pkt->frames = frames;
-       pkt->w_bio = pkt_bio_alloc(frames);
+       pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
        if (!pkt->w_bio)
                goto no_bio;
 
@@ -581,9 +549,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
        bio_list_init(&pkt->orig_bios);
 
        for (i = 0; i < frames; i++) {
-               struct bio *bio = pkt_bio_alloc(1);
+               struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
                if (!bio)
                        goto no_rd_bio;
+
                pkt->r_bios[i] = bio;
        }
 
@@ -1111,21 +1080,17 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
         * Schedule reads for missing parts of the packet.
         */
        for (f = 0; f < pkt->frames; f++) {
-               struct bio_vec *vec;
-
                int p, offset;
+
                if (written[f])
                        continue;
+
                bio = pkt->r_bios[f];
-               vec = bio->bi_io_vec;
-               bio_init(bio);
-               bio->bi_max_vecs = 1;
+               bio_reset(bio);
                bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
                bio->bi_bdev = pd->bdev;
                bio->bi_end_io = pkt_end_io_read;
                bio->bi_private = pkt;
-               bio->bi_io_vec = vec;
-               bio->bi_destructor = pkt_bio_destructor;
 
                p = (f * CD_FRAMESIZE) / PAGE_SIZE;
                offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1418,14 +1383,11 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        }
 
        /* Start the write request */
-       bio_init(pkt->w_bio);
-       pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
+       bio_reset(pkt->w_bio);
        pkt->w_bio->bi_sector = pkt->sector;
        pkt->w_bio->bi_bdev = pd->bdev;
        pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
        pkt->w_bio->bi_private = pkt;
-       pkt->w_bio->bi_io_vec = bvec;
-       pkt->w_bio->bi_destructor = pkt_bio_destructor;
        for (f = 0; f < pkt->frames; f++)
                if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
                        BUG();
index 9ad3b5ec1dc1c521085db47a7928cc8cf1179701..9a54623e52d74ecc77953937bd923a9ecdb68d09 100644 (file)
@@ -158,8 +158,8 @@ struct xen_vbd {
        struct block_device     *bdev;
        /* Cached size parameter. */
        sector_t                size;
-       bool                    flush_support;
-       bool                    discard_secure;
+       unsigned int            flush_support:1;
+       unsigned int            discard_secure:1;
 };
 
 struct backend_info;
index 4f66171c668354b490f1284aec48278b8676bd84..f58434c2617cab4185b8c049804f1031f4226a6e 100644 (file)
@@ -105,11 +105,10 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
 {
        struct xen_blkif *blkif;
 
-       blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
+       blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
        if (!blkif)
                return ERR_PTR(-ENOMEM);
 
-       memset(blkif, 0, sizeof(*blkif));
        blkif->domid = domid;
        spin_lock_init(&blkif->blk_ring_lock);
        atomic_set(&blkif->refcnt, 1);
@@ -196,7 +195,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
        }
 }
 
-void xen_blkif_free(struct xen_blkif *blkif)
+static void xen_blkif_free(struct xen_blkif *blkif)
 {
        if (!atomic_dec_and_test(&blkif->refcnt))
                BUG();
@@ -257,7 +256,7 @@ static struct attribute_group xen_vbdstat_group = {
 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
 VBD_SHOW(mode, "%s\n", be->mode);
 
-int xenvbd_sysfs_addif(struct xenbus_device *dev)
+static int xenvbd_sysfs_addif(struct xenbus_device *dev)
 {
        int error;
 
@@ -281,7 +280,7 @@ fail1:      device_remove_file(&dev->dev, &dev_attr_physical_device);
        return error;
 }
 
-void xenvbd_sysfs_delif(struct xenbus_device *dev)
+static void xenvbd_sysfs_delif(struct xenbus_device *dev)
 {
        sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
        device_remove_file(&dev->dev, &dev_attr_mode);
index 54a3a6d09819922486f4de420f4ed0f96a6e7bb0..0bb207eaef2ff65e854866fddfdb11fce27cc066 100644 (file)
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
 
 static const struct file_operations raw_fops = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .aio_read       = blkdev_aio_read,
        .write          = do_sync_write,
        .aio_write      = blkdev_aio_write,
        .fsync          = blkdev_fsync,
index 664743d6a6cdb7961ea7555544185ea4cff06357..bbf459bca61d9442e11b9ca544084877b2155268 100644 (file)
@@ -798,14 +798,6 @@ static int crypt_convert(struct crypt_config *cc,
        return 0;
 }
 
-static void dm_crypt_bio_destructor(struct bio *bio)
-{
-       struct dm_crypt_io *io = bio->bi_private;
-       struct crypt_config *cc = io->cc;
-
-       bio_free(bio, cc->bs);
-}
-
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
@@ -974,7 +966,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
        clone->bi_end_io  = crypt_endio;
        clone->bi_bdev    = cc->dev->bdev;
        clone->bi_rw      = io->base_bio->bi_rw;
-       clone->bi_destructor = dm_crypt_bio_destructor;
 }
 
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -988,19 +979,14 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
         * copy the required bvecs because we need the original
         * one in order to decrypt the whole bio data *afterwards*.
         */
-       clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
+       clone = bio_clone_bioset(base_bio, gfp, cc->bs);
        if (!clone)
                return 1;
 
        crypt_inc_pending(io);
 
        clone_init(io, clone);
-       clone->bi_idx = 0;
-       clone->bi_vcnt = bio_segments(base_bio);
-       clone->bi_size = base_bio->bi_size;
        clone->bi_sector = cc->start + io->sector;
-       memcpy(clone->bi_io_vec, bio_iovec(base_bio),
-              sizeof(struct bio_vec) * clone->bi_vcnt);
 
        generic_make_request(clone);
        return 0;
index ea5dd289fe2a591cf62246eb10b36ab445de201f..1c46f97d6664a850ea286f90b6a36e82d6b73074 100644 (file)
@@ -249,16 +249,6 @@ static void vm_dp_init(struct dpages *dp, void *data)
        dp->context_ptr = data;
 }
 
-static void dm_bio_destructor(struct bio *bio)
-{
-       unsigned region;
-       struct io *io;
-
-       retrieve_io_and_region_from_bio(bio, &io, &region);
-
-       bio_free(bio, io->client->bios);
-}
-
 /*
  * Functions for getting the pages from kernel memory.
  */
@@ -317,7 +307,6 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
                bio->bi_sector = where->sector + (where->count - remaining);
                bio->bi_bdev = where->bdev;
                bio->bi_end_io = endio;
-               bio->bi_destructor = dm_bio_destructor;
                store_io_and_region_in_bio(bio, io, region);
 
                if (rw & REQ_DISCARD) {
index 67ffa391edcf1e70a0cc94e7085a1e99abd7a151..66ceaff6455c9dec8de3680cb7ececc49bef896f 100644 (file)
@@ -86,12 +86,17 @@ struct dm_rq_target_io {
 };
 
 /*
- * For request-based dm.
- * One of these is allocated per bio.
+ * For request-based dm - the bio clones we allocate are embedded in these
+ * structs.
+ *
+ * We allocate these with bio_alloc_bioset, using the front_pad parameter when
+ * the bioset is created - this means the bio has to come at the end of the
+ * struct.
  */
 struct dm_rq_clone_bio_info {
        struct bio *orig;
        struct dm_rq_target_io *tio;
+       struct bio clone;
 };
 
 union map_info *dm_get_mapinfo(struct bio *bio)
@@ -211,6 +216,11 @@ struct dm_md_mempools {
 static struct kmem_cache *_io_cache;
 static struct kmem_cache *_tio_cache;
 static struct kmem_cache *_rq_tio_cache;
+
+/*
+ * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
+ * still used for _io_cache, I'm leaving this for a later cleanup
+ */
 static struct kmem_cache *_rq_bio_info_cache;
 
 static int __init local_init(void)
@@ -467,16 +477,6 @@ static void free_rq_tio(struct dm_rq_target_io *tio)
        mempool_free(tio, tio->md->tio_pool);
 }
 
-static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
-{
-       return mempool_alloc(md->io_pool, GFP_ATOMIC);
-}
-
-static void free_bio_info(struct dm_rq_clone_bio_info *info)
-{
-       mempool_free(info, info->tio->md->io_pool);
-}
-
 static int md_in_flight(struct mapped_device *md)
 {
        return atomic_read(&md->pending[READ]) +
@@ -681,11 +681,6 @@ static void clone_endio(struct bio *bio, int error)
                }
        }
 
-       /*
-        * Store md for cleanup instead of tio which is about to get freed.
-        */
-       bio->bi_private = md->bs;
-
        free_tio(md, tio);
        bio_put(bio);
        dec_pending(io, error);
@@ -1036,11 +1031,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
                /* error the io and bail out, or requeue it if needed */
                md = tio->io->md;
                dec_pending(tio->io, r);
-               /*
-                * Store bio_set for cleanup.
-                */
-               clone->bi_end_io = NULL;
-               clone->bi_private = md->bs;
                bio_put(clone);
                free_tio(md, tio);
        } else if (r) {
@@ -1059,13 +1049,6 @@ struct clone_info {
        unsigned short idx;
 };
 
-static void dm_bio_destructor(struct bio *bio)
-{
-       struct bio_set *bs = bio->bi_private;
-
-       bio_free(bio, bs);
-}
-
 /*
  * Creates a little bio that just does part of a bvec.
  */
@@ -1077,7 +1060,6 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
        struct bio_vec *bv = bio->bi_io_vec + idx;
 
        clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
-       clone->bi_destructor = dm_bio_destructor;
        *clone->bi_io_vec = *bv;
 
        clone->bi_sector = sector;
@@ -1090,7 +1072,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
        clone->bi_flags |= 1 << BIO_CLONED;
 
        if (bio_integrity(bio)) {
-               bio_integrity_clone(clone, bio, GFP_NOIO, bs);
+               bio_integrity_clone(clone, bio, GFP_NOIO);
                bio_integrity_trim(clone,
                                   bio_sector_offset(bio, idx, offset), len);
        }
@@ -1109,7 +1091,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
 
        clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
        __bio_clone(clone, bio);
-       clone->bi_destructor = dm_bio_destructor;
        clone->bi_sector = sector;
        clone->bi_idx = idx;
        clone->bi_vcnt = idx + bv_count;
@@ -1117,7 +1098,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
        clone->bi_flags &= ~(1 << BIO_SEG_VALID);
 
        if (bio_integrity(bio)) {
-               bio_integrity_clone(clone, bio, GFP_NOIO, bs);
+               bio_integrity_clone(clone, bio, GFP_NOIO);
 
                if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
                        bio_integrity_trim(clone,
@@ -1152,9 +1133,8 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
         * and discard, so no need for concern about wasted bvec allocations.
         */
-       clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
-       __bio_clone(clone, ci->bio);
-       clone->bi_destructor = dm_bio_destructor;
+       clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs);
+
        if (len) {
                clone->bi_sector = ci->sector;
                clone->bi_size = to_bytes(len);
@@ -1484,30 +1464,17 @@ void dm_dispatch_request(struct request *rq)
 }
 EXPORT_SYMBOL_GPL(dm_dispatch_request);
 
-static void dm_rq_bio_destructor(struct bio *bio)
-{
-       struct dm_rq_clone_bio_info *info = bio->bi_private;
-       struct mapped_device *md = info->tio->md;
-
-       free_bio_info(info);
-       bio_free(bio, md->bs);
-}
-
 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
                                 void *data)
 {
        struct dm_rq_target_io *tio = data;
-       struct mapped_device *md = tio->md;
-       struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
-
-       if (!info)
-               return -ENOMEM;
+       struct dm_rq_clone_bio_info *info =
+               container_of(bio, struct dm_rq_clone_bio_info, clone);
 
        info->orig = bio_orig;
        info->tio = tio;
        bio->bi_end_io = end_clone_bio;
        bio->bi_private = info;
-       bio->bi_destructor = dm_rq_bio_destructor;
 
        return 0;
 }
@@ -2771,7 +2738,10 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
        if (!pools->tio_pool)
                goto free_io_pool_and_out;
 
-       pools->bs = bioset_create(pool_size, 0);
+       pools->bs = (type == DM_TYPE_BIO_BASED) ?
+               bioset_create(pool_size, 0) :
+               bioset_create(pool_size,
+                             offsetof(struct dm_rq_clone_bio_info, clone));
        if (!pools->bs)
                goto free_tio_pool_and_out;
 
index 308e87b417e05a74e4429bac63e1eb2e7af43885..95c88012a3b9c71fb5581871d89f5e7de86ae396 100644 (file)
@@ -155,32 +155,17 @@ static int start_readonly;
  * like bio_clone, but with a local bio set
  */
 
-static void mddev_bio_destructor(struct bio *bio)
-{
-       struct mddev *mddev, **mddevp;
-
-       mddevp = (void*)bio;
-       mddev = mddevp[-1];
-
-       bio_free(bio, mddev->bio_set);
-}
-
 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
                            struct mddev *mddev)
 {
        struct bio *b;
-       struct mddev **mddevp;
 
        if (!mddev || !mddev->bio_set)
                return bio_alloc(gfp_mask, nr_iovecs);
 
-       b = bio_alloc_bioset(gfp_mask, nr_iovecs,
-                            mddev->bio_set);
+       b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
        if (!b)
                return NULL;
-       mddevp = (void*)b;
-       mddevp[-1] = mddev;
-       b->bi_destructor = mddev_bio_destructor;
        return b;
 }
 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
@@ -188,32 +173,10 @@ EXPORT_SYMBOL_GPL(bio_alloc_mddev);
 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
                            struct mddev *mddev)
 {
-       struct bio *b;
-       struct mddev **mddevp;
-
        if (!mddev || !mddev->bio_set)
                return bio_clone(bio, gfp_mask);
 
-       b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
-                            mddev->bio_set);
-       if (!b)
-               return NULL;
-       mddevp = (void*)b;
-       mddevp[-1] = mddev;
-       b->bi_destructor = mddev_bio_destructor;
-       __bio_clone(b, bio);
-       if (bio_integrity(bio)) {
-               int ret;
-
-               ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
-
-               if (ret < 0) {
-                       bio_put(b);
-                       return NULL;
-               }
-       }
-
-       return b;
+       return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
 }
 EXPORT_SYMBOL_GPL(bio_clone_mddev);
 
@@ -5006,8 +4969,7 @@ int md_run(struct mddev *mddev)
        }
 
        if (mddev->bio_set == NULL)
-               mddev->bio_set = bioset_create(BIO_POOL_SIZE,
-                                              sizeof(struct mddev *));
+               mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
 
        spin_lock(&pers_lock);
        pers = find_pers(mddev->level, mddev->clevel);
index de63a1fc3737b7ac2af3c0f7cbf60cd99b495a31..a9e4fa95dfaa76ac1d38434ac8153a6ffe1132c6 100644 (file)
@@ -422,6 +422,7 @@ static int raid0_run(struct mddev *mddev)
        if (md_check_no_bitmap(mddev))
                return -EINVAL;
        blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+       blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
 
        /* if private is not null, we are here after takeover */
        if (mddev->private == NULL) {
index 95f1814b5368c55bd05e7f2eef1c5759b476c41f..f79f2a88484da217c5ae5283668d5c2f469773e5 100644 (file)
@@ -24,3 +24,15 @@ config MSPRO_BLOCK
          support. This provides a block device driver, which you can use
          to mount the filesystem. Almost everyone wishing MemoryStick
          support should say Y or M here.
+
+config MS_BLOCK
+       tristate "MemoryStick Standard device driver"
+       depends on BLOCK && EXPERIMENTAL
+       help
+         Say Y here to enable the MemoryStick Standard device driver
+         support. This provides a block device driver, which you can use
+         to mount the filesystem.
+         This driver works with old (bulky) MemoryStick and MemoryStick Duo
+         but not PRO. Say Y if you have such card.
+         Driver is new and not yet well tested, thus it can damage your card
+         (even permanently)
index ecd0299377386fee7e062a87c9209a5ad36f22f1..0d7f90c0ff25d6a03ef85d0a6c950af465565321 100644 (file)
@@ -3,5 +3,5 @@
 #
 
 obj-$(CONFIG_MEMSTICK)         += memstick.o
-
+obj-$(CONFIG_MS_BLOCK)         += ms_block.o
 obj-$(CONFIG_MSPRO_BLOCK)      += mspro_block.o
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
new file mode 100644 (file)
index 0000000..c815fe5
--- /dev/null
@@ -0,0 +1,2395 @@
+/*
+ *  ms_block.c - Sony MemoryStick (legacy) storage support
+
+ *  Copyright (C) 2012 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver were copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ */
+
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/memstick.h>
+#include <linux/idr.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/bitmap.h>
+#include <linux/scatterlist.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include "ms_block.h"
+
+static int debug;
+static int cache_flush_timeout = 1000;
+static bool verify_writes;
+
+/*
+ * Copies section of 'sg_from' starting from offset 'offset' and with length
+ * 'len' To another scatterlist of to_nents enties
+ */
+static size_t msb_sg_copy(struct scatterlist *sg_from, struct scatterlist *sg_to,
+                                       int to_nents, size_t offset, size_t len)
+{
+       size_t copied = 0;
+
+       while (offset > 0) {
+               if (offset >= sg_from->length) {
+                       if (sg_is_last(sg_from))
+                               return 0;
+
+                       offset -= sg_from->length;
+                       sg_from = sg_next(sg_from);
+                       continue;
+               }
+
+               copied = min(len, sg_from->length - offset);
+               sg_set_page(sg_to, sg_page(sg_from),
+                       copied, sg_from->offset + offset);
+
+               len -= copied;
+               offset = 0;
+
+               if (sg_is_last(sg_from) || !len)
+                       goto out;
+
+               sg_to = sg_next(sg_to);
+               to_nents--;
+               sg_from = sg_next(sg_from);
+       }
+
+       while (len > sg_from->length && to_nents--) {
+               len -= sg_from->length;
+               copied += sg_from->length;
+
+               sg_set_page(sg_to, sg_page(sg_from),
+                               sg_from->length, sg_from->offset);
+
+               if (sg_is_last(sg_from) || !len)
+                       goto out;
+
+               sg_from = sg_next(sg_from);
+               sg_to = sg_next(sg_to);
+       }
+
+       if (len && to_nents) {
+               sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
+               copied += len;
+       }
+out:
+       sg_mark_end(sg_to);
+       return copied;
+}
+
+/*
+ * Compares section of 'sg' starting from offset 'offset' and with length 'len'
+ * to linear buffer of length 'len' at address 'buffer'
+ * Returns 0 if equal and  -1 otherwice
+ */
+static int msb_sg_compare_to_buffer(struct scatterlist *sg,
+                                       size_t offset, u8 *buffer, size_t len)
+{
+       int retval = 0, cmplen;
+       struct sg_mapping_iter miter;
+
+       sg_miter_start(&miter, sg, sg_nents(sg),
+                                       SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+       while (sg_miter_next(&miter) && len > 0) {
+               if (offset >= miter.length) {
+                       offset -= miter.length;
+                       continue;
+               }
+
+               cmplen = min(miter.length - offset, len);
+               retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
+               if (retval)
+                       break;
+
+               buffer += cmplen;
+               len -= cmplen;
+               offset = 0;
+       }
+
+       if (!retval && len)
+               retval = -1;
+
+       sg_miter_stop(&miter);
+       return retval;
+}
+
+
+/* Get zone at which block with logical address 'lba' lives
+ * Flash is broken into zones.
+ * Each zone consists of 512 eraseblocks, out of which in first
+ * zone 494 are used and 496 are for all following zones.
+ * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
+*/
+static int msb_get_zone_from_lba(int lba)
+{
+       if (lba < 494)
+               return 0;
+       return ((lba - 494) / 496) + 1;
+}
+
+/* Get zone of physical block. Trivial */
+static int msb_get_zone_from_pba(int pba)
+{
+       return pba / MS_BLOCKS_IN_ZONE;
+}
+
+/* Debug test to validate free block counts */
+#ifdef DEBUG
+static int msb_validate_used_block_bitmap(struct msb_data *msb)
+{
+       int total_free_blocks = 0;
+       int i;
+
+       for (i = 0 ; i < msb->zone_count ; i++)
+               total_free_blocks += msb->free_block_count[i];
+
+       if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
+                                       msb->block_count) == total_free_blocks)
+               return 0;
+
+       pr_err("BUG: free block counts don't match the bitmap");
+       msb->read_only = true;
+       return -EINVAL;
+}
+#endif
+
+/* Mark physical block as used */
+static void msb_mark_block_used(struct msb_data *msb, int pba)
+{
+       int zone = msb_get_zone_from_pba(pba);
+
+       if (test_bit(pba, msb->used_blocks_bitmap)) {
+               pr_err(
+               "BUG: attempt to mark already used pba %d as used", pba);
+               msb->read_only = true;
+               return;
+       }
+
+#ifdef DEBUG
+       if (msb_validate_used_block_bitmap(msb))
+               return;
+#endif
+       /* No races because all IO is single threaded */
+       __set_bit(pba, msb->used_blocks_bitmap);
+       msb->free_block_count[zone]--;
+}
+
+/* Mark physical block as free */
+static void msb_mark_block_unused(struct msb_data *msb, int pba)
+{
+       int zone = msb_get_zone_from_pba(pba);
+
+       if (!test_bit(pba, msb->used_blocks_bitmap)) {
+               pr_err("BUG: attempt to mark "
+                               "already unused pba %d as unused" , pba);
+               msb->read_only = true;
+               return;
+       }
+
+#ifdef DEBUG
+       if (msb_validate_used_block_bitmap(msb))
+               return;
+#endif
+       /* No races because all IO is single threaded */
+       __clear_bit(pba, msb->used_blocks_bitmap);
+       msb->free_block_count[zone]++;
+}
+
+/* Invalidate current register window */
+static void msb_invalidate_reg_window(struct msb_data *msb)
+{
+       msb->reg_addr.w_offset = offsetof(struct ms_register, id);
+       msb->reg_addr.w_length = sizeof(struct ms_id_register);
+       msb->reg_addr.r_offset = offsetof(struct ms_register, id);
+       msb->reg_addr.r_length = sizeof(struct ms_id_register);
+       msb->addr_valid = false;
+}
+
+/* Start a state machine */
+static int msb_run_state_machine(struct msb_data *msb, int   (*state_func)
+               (struct memstick_dev *card, struct memstick_request **req))
+{
+       struct memstick_dev *card = msb->card;
+
+       WARN_ON(msb->state != -1);
+       msb->int_polling = false;
+       msb->state = 0;
+       msb->exit_error = 0;
+
+       memset(&card->current_mrq, 0, sizeof(card->current_mrq));
+
+       card->next_request = state_func;
+       memstick_new_req(card->host);
+       wait_for_completion(&card->mrq_complete);
+
+       WARN_ON(msb->state != -1);
+       return msb->exit_error;
+}
+
+/* State machines call that to exit */
+static int msb_exit_state_machine(struct msb_data *msb, int error)
+{
+       WARN_ON(msb->state == -1);
+
+       msb->state = -1;
+       msb->exit_error = error;
+       msb->card->next_request = h_msb_default_bad;
+
+       /* Invalidate reg window on errors */
+       if (error)
+               msb_invalidate_reg_window(msb);
+
+       complete(&msb->card->mrq_complete);
+       return -ENXIO;
+}
+
+/* read INT register */
+static int msb_read_int_reg(struct msb_data *msb, long timeout)
+{
+       struct memstick_request *mrq = &msb->card->current_mrq;
+
+       WARN_ON(msb->state == -1);
+
+       if (!msb->int_polling) {
+               msb->int_timeout = jiffies +
+                       msecs_to_jiffies(timeout == -1 ? 500 : timeout);
+               msb->int_polling = true;
+       } else if (time_after(jiffies, msb->int_timeout)) {
+               mrq->data[0] = MEMSTICK_INT_CMDNAK;
+               return 0;
+       }
+
+       if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
+                               mrq->need_card_int && !mrq->error) {
+               mrq->data[0] = mrq->int_reg;
+               mrq->need_card_int = false;
+               return 0;
+       } else {
+               memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+               return 1;
+       }
+}
+
+/* Read a register */
+static int msb_read_regs(struct msb_data *msb, int offset, int len)
+{
+       struct memstick_request *req = &msb->card->current_mrq;
+
+       if (msb->reg_addr.r_offset != offset ||
+           msb->reg_addr.r_length != len || !msb->addr_valid) {
+
+               msb->reg_addr.r_offset = offset;
+               msb->reg_addr.r_length = len;
+               msb->addr_valid = true;
+
+               memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+                       &msb->reg_addr, sizeof(msb->reg_addr));
+               return 0;
+       }
+
+       memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
+       return 1;
+}
+
+/* Write a card register */
+static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
+{
+       struct memstick_request *req = &msb->card->current_mrq;
+
+       if (msb->reg_addr.w_offset != offset ||
+               msb->reg_addr.w_length != len  || !msb->addr_valid) {
+
+               msb->reg_addr.w_offset = offset;
+               msb->reg_addr.w_length = len;
+               msb->addr_valid = true;
+
+               memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
+                       &msb->reg_addr, sizeof(msb->reg_addr));
+               return 0;
+       }
+
+       memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
+       return 1;
+}
+
+/* Handler for absence of IO */
+static int h_msb_default_bad(struct memstick_dev *card,
+                                               struct memstick_request **mrq)
+{
+       return -ENXIO;
+}
+
+/*
+ * This function is a handler for reads of one page from device.
+ * Writes output to msb->current_sg, takes sector address from msb->reg.param
+ * Can also be used to read extra data only. Set params accordintly.
+ */
+static int h_msb_read_page(struct memstick_dev *card,
+                                       struct memstick_request **out_mrq)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+       struct scatterlist sg[2];
+       u8 command, intreg;
+
+       if (mrq->error) {
+               dbg("read_page, unknown error");
+               return msb_exit_state_machine(msb, mrq->error);
+       }
+again:
+       switch (msb->state) {
+       case MSB_RP_SEND_BLOCK_ADDRESS:
+               /* msb_write_regs sometimes "fails" because it needs to update
+                       the reg window, and thus it returns request for that.
+                       Then we stay in this state and retry */
+               if (!msb_write_regs(msb,
+                       offsetof(struct ms_register, param),
+                       sizeof(struct ms_param_register),
+                       (unsigned char *)&msb->regs.param))
+                       return 0;
+
+               msb->state = MSB_RP_SEND_READ_COMMAND;
+               return 0;
+
+       case MSB_RP_SEND_READ_COMMAND:
+               command = MS_CMD_BLOCK_READ;
+               memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+               msb->state = MSB_RP_SEND_INT_REQ;
+               return 0;
+
+       case MSB_RP_SEND_INT_REQ:
+               msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
+               /* If dont actually need to send the int read request (only in
+                       serial mode), then just fall through */
+               if (msb_read_int_reg(msb, -1))
+                       return 0;
+               /* fallthrough */
+
+       case MSB_RP_RECEIVE_INT_REQ_RESULT:
+               intreg = mrq->data[0];
+               msb->regs.status.interrupt = intreg;
+
+               if (intreg & MEMSTICK_INT_CMDNAK)
+                       return msb_exit_state_machine(msb, -EIO);
+
+               if (!(intreg & MEMSTICK_INT_CED)) {
+                       msb->state = MSB_RP_SEND_INT_REQ;
+                       goto again;
+               }
+
+               msb->int_polling = false;
+               msb->state = (intreg & MEMSTICK_INT_ERR) ?
+                       MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
+               goto again;
+
+       case MSB_RP_SEND_READ_STATUS_REG:
+                /* read the status register to understand source of the INT_ERR */
+               if (!msb_read_regs(msb,
+                       offsetof(struct ms_register, status),
+                       sizeof(struct ms_status_register)))
+                       return 0;
+
+               msb->state = MSB_RP_RECEIVE_OOB_READ;
+               return 0;
+
+       case MSB_RP_RECIVE_STATUS_REG:
+               msb->regs.status = *(struct ms_status_register *)mrq->data;
+               msb->state = MSB_RP_SEND_OOB_READ;
+               /* fallthrough */
+
+       case MSB_RP_SEND_OOB_READ:
+               if (!msb_read_regs(msb,
+                       offsetof(struct ms_register, extra_data),
+                       sizeof(struct ms_extra_data_register)))
+                       return 0;
+
+               msb->state = MSB_RP_RECEIVE_OOB_READ;
+               return 0;
+
+       case MSB_RP_RECEIVE_OOB_READ:
+               msb->regs.extra_data =
+                       *(struct ms_extra_data_register *) mrq->data;
+               msb->state = MSB_RP_SEND_READ_DATA;
+               /* fallthrough */
+
+       case MSB_RP_SEND_READ_DATA:
+               /* Skip that state if we only read the oob */
+               if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
+                       msb->state = MSB_RP_RECEIVE_READ_DATA;
+                       goto again;
+               }
+
+               sg_init_table(sg, ARRAY_SIZE(sg));
+               msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+                       msb->current_sg_offset,
+                       msb->page_size);
+
+               memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
+               msb->state = MSB_RP_RECEIVE_READ_DATA;
+               return 0;
+
+       case MSB_RP_RECEIVE_READ_DATA:
+               if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
+                       msb->current_sg_offset += msb->page_size;
+                       return msb_exit_state_machine(msb, 0);
+               }
+
+               if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
+                       dbg("read_page: uncorrectable error");
+                       return msb_exit_state_machine(msb, -EBADMSG);
+               }
+
+               if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
+                       dbg("read_page: correctable error");
+                       msb->current_sg_offset += msb->page_size;
+                       return msb_exit_state_machine(msb, -EUCLEAN);
+               } else {
+                       dbg("read_page: INT error, but no status error bits");
+                       return msb_exit_state_machine(msb, -EIO);
+               }
+       }
+
+       BUG();
+}
+
+/*
+ * Handler of writes of exactly one block.
+ * Takes address from msb->regs.param.
+ * Writes same extra data to blocks, also taken
+ * from msb->regs.extra
+ * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
+ * device refuses to take the command or something else
+ */
+static int h_msb_write_block(struct memstick_dev *card,
+                                       struct memstick_request **out_mrq)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+       struct scatterlist sg[2];
+       u8 intreg, command;
+
+       if (mrq->error)
+               return msb_exit_state_machine(msb, mrq->error);
+
+again:
+       switch (msb->state) {
+
+       /* HACK: Jmicon handling of TPCs between 8 and
+        *      sizeof(memstick_request.data) is broken due to hardware
+        *      bug in PIO mode that is used for these TPCs
+        *      Therefore split the write
+        */
+
+       case MSB_WB_SEND_WRITE_PARAMS:
+               if (!msb_write_regs(msb,
+                       offsetof(struct ms_register, param),
+                       sizeof(struct ms_param_register),
+                       &msb->regs.param))
+                       return 0;
+
+               msb->state = MSB_WB_SEND_WRITE_OOB;
+               return 0;
+
+       case MSB_WB_SEND_WRITE_OOB:
+               if (!msb_write_regs(msb,
+                       offsetof(struct ms_register, extra_data),
+                       sizeof(struct ms_extra_data_register),
+                       &msb->regs.extra_data))
+                       return 0;
+               msb->state = MSB_WB_SEND_WRITE_COMMAND;
+               return 0;
+
+
+       case MSB_WB_SEND_WRITE_COMMAND:
+               command = MS_CMD_BLOCK_WRITE;
+               memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+               msb->state = MSB_WB_SEND_INT_REQ;
+               return 0;
+
+       case MSB_WB_SEND_INT_REQ:
+               msb->state = MSB_WB_RECEIVE_INT_REQ;
+               if (msb_read_int_reg(msb, -1))
+                       return 0;
+               /* fallthrough */
+
+       case MSB_WB_RECEIVE_INT_REQ:
+               intreg = mrq->data[0];
+               msb->regs.status.interrupt = intreg;
+
+               /* errors mean out of here, and fast... */
+               if (intreg & (MEMSTICK_INT_CMDNAK))
+                       return msb_exit_state_machine(msb, -EIO);
+
+               if (intreg & MEMSTICK_INT_ERR)
+                       return msb_exit_state_machine(msb, -EBADMSG);
+
+
+               /* for last page we need to poll CED */
+               if (msb->current_page == msb->pages_in_block) {
+                       if (intreg & MEMSTICK_INT_CED)
+                               return msb_exit_state_machine(msb, 0);
+                       msb->state = MSB_WB_SEND_INT_REQ;
+                       goto again;
+
+               }
+
+               /* for non-last page we need BREQ before writing next chunk */
+               if (!(intreg & MEMSTICK_INT_BREQ)) {
+                       msb->state = MSB_WB_SEND_INT_REQ;
+                       goto again;
+               }
+
+               msb->int_polling = false;
+               msb->state = MSB_WB_SEND_WRITE_DATA;
+               /* fallthrough */
+
+       case MSB_WB_SEND_WRITE_DATA:
+               sg_init_table(sg, ARRAY_SIZE(sg));
+
+               if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
+                       msb->current_sg_offset,
+                       msb->page_size) < msb->page_size)
+                       return msb_exit_state_machine(msb, -EIO);
+
+               memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
+               mrq->need_card_int = 1;
+               msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
+               return 0;
+
+       case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
+               msb->current_page++;
+               msb->current_sg_offset += msb->page_size;
+               msb->state = MSB_WB_SEND_INT_REQ;
+               goto again;
+       default:
+               BUG();
+       }
+
+       return 0;
+}
+
+/*
+ * This function is used to send simple IO requests to device that consist
+ * of register write + command
+ */
+static int h_msb_send_command(struct memstick_dev *card,
+                                       struct memstick_request **out_mrq)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+       u8 intreg;
+
+       if (mrq->error) {
+               dbg("send_command: unknown error");
+               return msb_exit_state_machine(msb, mrq->error);
+       }
+again:
+       switch (msb->state) {
+
+       /* HACK: see h_msb_write_block */
+       case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
+               if (!msb_write_regs(msb,
+                       offsetof(struct ms_register, param),
+                       sizeof(struct ms_param_register),
+                       &msb->regs.param))
+                       return 0;
+               msb->state = MSB_SC_SEND_WRITE_OOB;
+               return 0;
+
+       case MSB_SC_SEND_WRITE_OOB:
+               if (!msb->command_need_oob) {
+                       msb->state = MSB_SC_SEND_COMMAND;
+                       goto again;
+               }
+
+               if (!msb_write_regs(msb,
+                       offsetof(struct ms_register, extra_data),
+                       sizeof(struct ms_extra_data_register),
+                       &msb->regs.extra_data))
+                       return 0;
+
+               msb->state = MSB_SC_SEND_COMMAND;
+               return 0;
+
+       case MSB_SC_SEND_COMMAND:
+               memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
+               msb->state = MSB_SC_SEND_INT_REQ;
+               return 0;
+
+       case MSB_SC_SEND_INT_REQ:
+               msb->state = MSB_SC_RECEIVE_INT_REQ;
+               if (msb_read_int_reg(msb, -1))
+                       return 0;
+               /* fallthrough */
+
+       case MSB_SC_RECEIVE_INT_REQ:
+               intreg = mrq->data[0];
+
+               if (intreg & MEMSTICK_INT_CMDNAK)
+                       return msb_exit_state_machine(msb, -EIO);
+               if (intreg & MEMSTICK_INT_ERR)
+                       return msb_exit_state_machine(msb, -EBADMSG);
+
+               if (!(intreg & MEMSTICK_INT_CED)) {
+                       msb->state = MSB_SC_SEND_INT_REQ;
+                       goto again;
+               }
+
+               return msb_exit_state_machine(msb, 0);
+       }
+
+       BUG();
+}
+
+/* Small handler for card reset */
+static int h_msb_reset(struct memstick_dev *card,
+                                       struct memstick_request **out_mrq)
+{
+       u8 command = MS_CMD_RESET;
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+
+       if (mrq->error)
+               return msb_exit_state_machine(msb, mrq->error);
+
+       switch (msb->state) {
+       case MSB_RS_SEND:
+               memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
+               mrq->need_card_int = 0;
+               msb->state = MSB_RS_CONFIRM;
+               return 0;
+       case MSB_RS_CONFIRM:
+               return msb_exit_state_machine(msb, 0);
+       }
+       BUG();
+}
+
+/* This handler is used to do serial->parallel switch */
+static int h_msb_parallel_switch(struct memstick_dev *card,
+                                       struct memstick_request **out_mrq)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_request *mrq = *out_mrq = &card->current_mrq;
+       struct memstick_host *host = card->host;
+
+       if (mrq->error) {
+               dbg("parallel_switch: error");
+               msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+               return msb_exit_state_machine(msb, mrq->error);
+       }
+
+       switch (msb->state) {
+       case MSB_PS_SEND_SWITCH_COMMAND:
+               /* Set the parallel interface on memstick side */
+               msb->regs.param.system |= MEMSTICK_SYS_PAM;
+
+               if (!msb_write_regs(msb,
+                       offsetof(struct ms_register, param),
+                       1,
+                       (unsigned char *)&msb->regs.param))
+                       return 0;
+
+               msb->state = MSB_PS_SWICH_HOST;
+               return 0;
+
+       case MSB_PS_SWICH_HOST:
+                /* Set parallel interface on our side + send a dummy request
+                       to see if card responds */
+               host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
+               memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
+               msb->state = MSB_PS_CONFIRM;
+               return 0;
+
+       case MSB_PS_CONFIRM:
+               return msb_exit_state_machine(msb, 0);
+       }
+
+       BUG();
+}
+
+static int msb_switch_to_parallel(struct msb_data *msb);
+
+/* Reset the card, to guard against hw errors beeing treated as bad blocks */
+static int msb_reset(struct msb_data *msb, bool full)
+{
+
+       bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
+       struct memstick_dev *card = msb->card;
+       struct memstick_host *host = card->host;
+       int error;
+
+       /* Reset the card */
+       msb->regs.param.system = MEMSTICK_SYS_BAMD;
+
+       if (full) {
+               error =  host->set_param(host,
+                                       MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+               if (error)
+                       goto out_error;
+
+               msb_invalidate_reg_window(msb);
+
+               error = host->set_param(host,
+                                       MEMSTICK_POWER, MEMSTICK_POWER_ON);
+               if (error)
+                       goto out_error;
+
+               error = host->set_param(host,
+                                       MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
+               if (error) {
+out_error:
+                       dbg("Failed to reset the host controller");
+                       msb->read_only = true;
+                       return -EFAULT;
+               }
+       }
+
+       error = msb_run_state_machine(msb, h_msb_reset);
+       if (error) {
+               dbg("Failed to reset the card");
+               msb->read_only = true;
+               return -ENODEV;
+       }
+
+       /* Set parallel mode */
+       if (was_parallel)
+               msb_switch_to_parallel(msb);
+       return 0;
+}
+
+/* Attempts to switch interface to parallel mode */
+static int msb_switch_to_parallel(struct msb_data *msb)
+{
+       int error;
+
+       error = msb_run_state_machine(msb, h_msb_parallel_switch);
+       if (error) {
+               pr_err("Switch to parallel failed");
+               msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
+               msb_reset(msb, true);
+               return -EFAULT;
+       }
+
+       msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
+       return 0;
+}
+
+/* Changes overwrite flag on a page */
+static int msb_set_overwrite_flag(struct msb_data *msb,
+                                               u16 pba, u8 page, u8 flag)
+{
+       if (msb->read_only)
+               return -EROFS;
+
+       msb->regs.param.block_address = cpu_to_be16(pba);
+       msb->regs.param.page_address = page;
+       msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
+       msb->regs.extra_data.overwrite_flag = flag;
+       msb->command_value = MS_CMD_BLOCK_WRITE;
+       msb->command_need_oob = true;
+
+       dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
+                                                       flag, pba, page);
+       return msb_run_state_machine(msb, h_msb_send_command);
+}
+
+static int msb_mark_bad(struct msb_data *msb, int pba)
+{
+       pr_notice("marking pba %d as bad", pba);
+       msb_reset(msb, true);
+       return msb_set_overwrite_flag(
+                       msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
+}
+
+static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
+{
+       dbg("marking page %d of pba %d as bad", page, pba);
+       msb_reset(msb, true);
+       return msb_set_overwrite_flag(msb,
+               pba, page, ~MEMSTICK_OVERWRITE_PGST0);
+}
+
+/* Erases one physical block */
+static int msb_erase_block(struct msb_data *msb, u16 pba)
+{
+       int error, try;
+       if (msb->read_only)
+               return -EROFS;
+
+       dbg_verbose("erasing pba %d", pba);
+
+       for (try = 1 ; try < 3 ; try++) {
+               msb->regs.param.block_address = cpu_to_be16(pba);
+               msb->regs.param.page_address = 0;
+               msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+               msb->command_value = MS_CMD_BLOCK_ERASE;
+               msb->command_need_oob = false;
+
+
+               error = msb_run_state_machine(msb, h_msb_send_command);
+               if (!error || msb_reset(msb, true))
+                       break;
+       }
+
+       if (error) {
+               pr_err("erase failed, marking pba %d as bad", pba);
+               msb_mark_bad(msb, pba);
+       }
+
+       dbg_verbose("erase success, marking pba %d as unused", pba);
+       msb_mark_block_unused(msb, pba);
+       __set_bit(pba, msb->erased_blocks_bitmap);
+       return error;
+}
+
+/* Reads one page from device */
+static int msb_read_page(struct msb_data *msb,
+       u16 pba, u8 page, struct ms_extra_data_register *extra,
+                                       struct scatterlist *sg,  int offset)
+{
+       int try, error;
+
+       if (pba == MS_BLOCK_INVALID) {
+               unsigned long flags;
+               struct sg_mapping_iter miter;
+               size_t len = msb->page_size;
+
+               dbg_verbose("read unmapped sector. returning 0xFF");
+
+               local_irq_save(flags);
+               sg_miter_start(&miter, sg, sg_nents(sg),
+                               SG_MITER_ATOMIC | SG_MITER_TO_SG);
+
+               while (sg_miter_next(&miter) && len > 0) {
+
+                       int chunklen;
+
+                       if (offset && offset >= miter.length) {
+                               offset -= miter.length;
+                               continue;
+                       }
+
+                       chunklen = min(miter.length - offset, len);
+                       memset(miter.addr + offset, 0xFF, chunklen);
+                       len -= chunklen;
+                       offset = 0;
+               }
+
+               sg_miter_stop(&miter);
+               local_irq_restore(flags);
+
+               if (offset)
+                       return -EFAULT;
+
+               if (extra)
+                       memset(extra, 0xFF, sizeof(*extra));
+               return 0;
+       }
+
+       if (pba >= msb->block_count) {
+               pr_err("BUG: attempt to read beyond"
+                                       " the end of the card at pba %d", pba);
+               return -EINVAL;
+       }
+
+       for (try = 1 ; try < 3 ; try++) {
+               msb->regs.param.block_address = cpu_to_be16(pba);
+               msb->regs.param.page_address = page;
+               msb->regs.param.cp = MEMSTICK_CP_PAGE;
+
+               msb->current_sg = sg;
+               msb->current_sg_offset = offset;
+               error = msb_run_state_machine(msb, h_msb_read_page);
+
+
+               if (error == -EUCLEAN) {
+                       pr_notice("correctable error on pba %d, page %d",
+                               pba, page);
+                       error = 0;
+               }
+
+               if (!error && extra)
+                       *extra = msb->regs.extra_data;
+
+               if (!error || msb_reset(msb, true))
+                       break;
+
+       }
+
+       /* Mark bad pages */
+       if (error == -EBADMSG) {
+               pr_err("uncorrectable error on read of pba %d, page %d",
+                       pba, page);
+
+               if (msb->regs.extra_data.overwrite_flag &
+                                       MEMSTICK_OVERWRITE_PGST0)
+                       msb_mark_page_bad(msb, pba, page);
+               return -EBADMSG;
+       }
+
+       if (error)
+               pr_err("read of pba %d, page %d failed with error %d",
+                       pba, page, error);
+       return error;
+}
+
+/* Reads oob of page only */
+static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
+       struct ms_extra_data_register *extra)
+{
+       int error;
+
+       BUG_ON(!extra);
+       msb->regs.param.block_address = cpu_to_be16(pba);
+       msb->regs.param.page_address = page;
+       msb->regs.param.cp = MEMSTICK_CP_EXTRA;
+
+       if (pba > msb->block_count) {
+               pr_err("BUG: attempt to read beyond"
+                                       " the end of card at pba %d", pba);
+               return -EINVAL;
+       }
+
+       error = msb_run_state_machine(msb, h_msb_read_page);
+       *extra = msb->regs.extra_data;
+
+       if (error == -EUCLEAN) {
+               pr_notice("correctable error on pba %d, page %d",
+                       pba, page);
+               return 0;
+       }
+
+       return error;
+}
+
+
+/* Reads a block and compares it with data contained in scatterlist orig_sg */
+static int msb_verify_block(struct msb_data *msb, u16 pba,
+                               struct scatterlist *orig_sg,  int offset)
+{
+       struct scatterlist sg;
+       int page = 0, error;
+
+       sg_init_one(&sg, msb->block_buffer, msb->block_size);
+
+       while (page < msb->pages_in_block) {
+
+               error = msb_read_page(msb, pba, page,
+                               NULL, &sg, page * msb->page_size);
+               if (error)
+                       return error;
+               page++;
+       }
+
+       if (msb_sg_compare_to_buffer(orig_sg, offset,
+                               msb->block_buffer, msb->block_size))
+               return -EIO;
+       return 0;
+}
+
+/* Writes exectly one block + oob */
+static int msb_write_block(struct msb_data *msb,
+                       u16 pba, u32 lba, struct scatterlist *sg, int offset)
+{
+       int error, current_try = 1;
+       BUG_ON(sg->length < msb->page_size);
+
+       if (msb->read_only)
+               return -EROFS;
+
+       if (pba == MS_BLOCK_INVALID) {
+               pr_err(
+                       "BUG: write: attempt to write MS_BLOCK_INVALID block");
+               return -EINVAL;
+       }
+
+       if (pba >= msb->block_count || lba >= msb->logical_block_count) {
+               pr_err(
+               "BUG: write: attempt to write beyond the end of device");
+               return -EINVAL;
+       }
+
+       if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+               pr_err("BUG: write: lba zone mismatch");
+               return -EINVAL;
+       }
+
+       if (pba == msb->boot_block_locations[0] ||
+               pba == msb->boot_block_locations[1]) {
+               pr_err("BUG: write: attempt to write to boot blocks!");
+               return -EINVAL;
+       }
+
+       while (1) {
+
+               if (msb->read_only)
+                       return -EROFS;
+
+               msb->regs.param.cp = MEMSTICK_CP_BLOCK;
+               msb->regs.param.page_address = 0;
+               msb->regs.param.block_address = cpu_to_be16(pba);
+
+               msb->regs.extra_data.management_flag = 0xFF;
+               msb->regs.extra_data.overwrite_flag = 0xF8;
+               msb->regs.extra_data.logical_address = cpu_to_be16(lba);
+
+               msb->current_sg = sg;
+               msb->current_sg_offset = offset;
+               msb->current_page = 0;
+
+               error = msb_run_state_machine(msb, h_msb_write_block);
+
+               /* Sector we just wrote to is assumed erased since its pba
+                       was erased. If it wasn't erased, write will succeed
+                       and will just clear the bits that were set in the block
+                       thus test that what we have written,
+                       matches what we expect.
+                       We do trust the blocks that we erased */
+               if (!error && (verify_writes ||
+                               !test_bit(pba, msb->erased_blocks_bitmap)))
+                       error = msb_verify_block(msb, pba, sg, offset);
+
+               if (!error)
+                       break;
+
+               if (current_try > 1 || msb_reset(msb, true))
+                       break;
+
+               pr_err("write failed, trying to erase the pba %d", pba);
+               error = msb_erase_block(msb, pba);
+               if (error)
+                       break;
+
+               current_try++;
+       }
+       return error;
+}
+
+/* Finds a free block for write replacement */
+static u16 msb_get_free_block(struct msb_data *msb, int zone)
+{
+       u16 pos;
+       int pba = zone * MS_BLOCKS_IN_ZONE;
+       int i;
+
+       get_random_bytes(&pos, sizeof(pos));
+
+       if (!msb->free_block_count[zone]) {
+               pr_err("NO free blocks in the zone %d, to use for a write, "
+                       "(media is WORN out) switching to RO mode", zone);
+               msb->read_only = true;
+               return MS_BLOCK_INVALID;
+       }
+
+       pos %= msb->free_block_count[zone];
+
+       dbg_verbose("have %d choices for a free block, selected randomally: %d",
+               msb->free_block_count[zone], pos);
+
+       pba = find_next_zero_bit(msb->used_blocks_bitmap,
+                                                       msb->block_count, pba);
+       for (i = 0 ; i < pos ; ++i)
+               pba = find_next_zero_bit(msb->used_blocks_bitmap,
+                                               msb->block_count, pba + 1);
+
+       dbg_verbose("result of the free blocks scan: pba %d", pba);
+
+       if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
+               pr_err("BUG: cant get a free block");
+               msb->read_only = true;
+               return MS_BLOCK_INVALID;
+       }
+
+       msb_mark_block_used(msb, pba);
+       return pba;
+}
+
+static int msb_update_block(struct msb_data *msb, u16 lba,
+       struct scatterlist *sg, int offset)
+{
+       u16 pba, new_pba;
+       int error, try;
+
+       pba = msb->lba_to_pba_table[lba];
+       dbg_verbose("start of a block update at lba  %d, pba %d", lba, pba);
+
+       if (pba != MS_BLOCK_INVALID) {
+               dbg_verbose("setting the update flag on the block");
+               msb_set_overwrite_flag(msb, pba, 0,
+                               0xFF & ~MEMSTICK_OVERWRITE_UDST);
+       }
+
+       for (try = 0 ; try < 3 ; try++) {
+               new_pba = msb_get_free_block(msb,
+                       msb_get_zone_from_lba(lba));
+
+               if (new_pba == MS_BLOCK_INVALID) {
+                       error = -EIO;
+                       goto out;
+               }
+
+               dbg_verbose("block update: writing updated block to the pba %d",
+                                                               new_pba);
+               error = msb_write_block(msb, new_pba, lba, sg, offset);
+               if (error == -EBADMSG) {
+                       msb_mark_bad(msb, new_pba);
+                       continue;
+               }
+
+               if (error)
+                       goto out;
+
+               dbg_verbose("block update: erasing the old block");
+               msb_erase_block(msb, pba);
+               msb->lba_to_pba_table[lba] = new_pba;
+               return 0;
+       }
+out:
+       if (error) {
+               pr_err("block update error after %d tries, "
+                                               "switching to r/o mode", try);
+               msb->read_only = true;
+       }
+       return error;
+}
+
+/* Converts endiannes in the boot block for easy use */
+static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
+{
+       p->header.block_id = be16_to_cpu(p->header.block_id);
+       p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
+       p->entry.disabled_block.start_addr
+               = be32_to_cpu(p->entry.disabled_block.start_addr);
+       p->entry.disabled_block.data_size
+               = be32_to_cpu(p->entry.disabled_block.data_size);
+       p->entry.cis_idi.start_addr
+               = be32_to_cpu(p->entry.cis_idi.start_addr);
+       p->entry.cis_idi.data_size
+               = be32_to_cpu(p->entry.cis_idi.data_size);
+       p->attr.block_size = be16_to_cpu(p->attr.block_size);
+       p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
+       p->attr.number_of_effective_blocks
+               = be16_to_cpu(p->attr.number_of_effective_blocks);
+       p->attr.page_size = be16_to_cpu(p->attr.page_size);
+       p->attr.memory_manufacturer_code
+               = be16_to_cpu(p->attr.memory_manufacturer_code);
+       p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
+       p->attr.implemented_capacity
+               = be16_to_cpu(p->attr.implemented_capacity);
+       p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
+       p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
+}
+
+static int msb_read_boot_blocks(struct msb_data *msb)
+{
+       int pba = 0;
+       struct scatterlist sg;
+       struct ms_extra_data_register extra;
+       struct ms_boot_page *page;
+
+       msb->boot_block_locations[0] = MS_BLOCK_INVALID;
+       msb->boot_block_locations[1] = MS_BLOCK_INVALID;
+       msb->boot_block_count = 0;
+
+       dbg_verbose("Start of a scan for the boot blocks");
+
+       if (!msb->boot_page) {
+               page = kmalloc(sizeof(struct ms_boot_page)*2, GFP_KERNEL);
+               if (!page)
+                       return -ENOMEM;
+
+               msb->boot_page = page;
+       } else
+               page = msb->boot_page;
+
+       msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
+
+       for (pba = 0 ; pba < MS_BLOCK_MAX_BOOT_ADDR ; pba++) {
+
+               sg_init_one(&sg, page, sizeof(*page));
+               if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
+                       dbg("boot scan: can't read pba %d", pba);
+                       continue;
+               }
+
+               if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
+                       dbg("managment flag doesn't indicate boot block %d",
+                                                                       pba);
+                       continue;
+               }
+
+               if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
+                       dbg("the pba at %d doesn' contain boot block ID", pba);
+                       continue;
+               }
+
+               msb_fix_boot_page_endianness(page);
+               msb->boot_block_locations[msb->boot_block_count] = pba;
+
+               page++;
+               msb->boot_block_count++;
+
+               if (msb->boot_block_count == 2)
+                       break;
+       }
+
+       if (!msb->boot_block_count) {
+               pr_err("media doesn't contain master page, aborting");
+               return -EIO;
+       }
+
+       dbg_verbose("End of scan for boot blocks");
+       return 0;
+}
+
+static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
+{
+       struct ms_boot_page *boot_block;
+       struct scatterlist sg;
+       u16 *buffer = NULL;
+       int offset = 0;
+       int i, error = 0;
+       int data_size, data_offset, page, page_offset, size_to_read;
+       u16 pba;
+
+       BUG_ON(block_nr > 1);
+       boot_block = &msb->boot_page[block_nr];
+       pba = msb->boot_block_locations[block_nr];
+
+       if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
+               return -EINVAL;
+
+       data_size = boot_block->entry.disabled_block.data_size;
+       data_offset = sizeof(struct ms_boot_page) +
+                       boot_block->entry.disabled_block.start_addr;
+       if (!data_size)
+               return 0;
+
+       page = data_offset / msb->page_size;
+       page_offset = data_offset % msb->page_size;
+       size_to_read =
+               DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
+                       msb->page_size;
+
+       dbg("reading bad block of boot block at pba %d, offset %d len %d",
+               pba, data_offset, data_size);
+
+       buffer = kzalloc(size_to_read, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       /* Read the buffer */
+       sg_init_one(&sg, buffer, size_to_read);
+
+       while (offset < size_to_read) {
+               error = msb_read_page(msb, pba, page, NULL, &sg, offset);
+               if (error)
+                       goto out;
+
+               page++;
+               offset += msb->page_size;
+
+               if (page == msb->pages_in_block) {
+                       pr_err(
+                       "bad block table extends beyond the boot block");
+                       break;
+               }
+       }
+
+       /* Process the bad block table */
+       for (i = page_offset ; i < data_size / sizeof(u16) ; i++) {
+
+               u16 bad_block = be16_to_cpu(buffer[i]);
+
+               if (bad_block >= msb->block_count) {
+                       dbg("bad block table contains invalid block %d",
+                                                               bad_block);
+                       continue;
+               }
+
+               if (test_bit(bad_block, msb->used_blocks_bitmap))  {
+                       dbg("duplicate bad block %d in the table",
+                               bad_block);
+                       continue;
+               }
+
+               dbg("block %d is marked as factory bad", bad_block);
+               msb_mark_block_used(msb, bad_block);
+       }
+out:
+       kfree(buffer);
+       return error;
+}
+
+static int msb_ftl_initialize(struct msb_data *msb)
+{
+       int i;
+
+       if (msb->ftl_initialized)
+               return 0;
+
+       msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
+       msb->logical_block_count = msb->zone_count * 496 - 2;
+
+       msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+       msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL);
+       msb->lba_to_pba_table =
+               kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL);
+
+       if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
+                                               !msb->erased_blocks_bitmap) {
+               kfree(msb->used_blocks_bitmap);
+               kfree(msb->lba_to_pba_table);
+               kfree(msb->erased_blocks_bitmap);
+               return -ENOMEM;
+       }
+
+       for (i = 0 ; i < msb->zone_count ; i++)
+               msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
+
+       memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
+                       msb->logical_block_count * sizeof(u16));
+
+       dbg("initial FTL tables created. Zone count = %d, "
+                                       "Logical block count = %d",
+               msb->zone_count, msb->logical_block_count);
+
+       msb->ftl_initialized = true;
+       return 0;
+}
+
+static int msb_ftl_scan(struct msb_data *msb)
+{
+       u16 pba, lba, other_block;
+       u8 overwrite_flag, managment_flag, other_overwrite_flag;
+       int error;
+       struct ms_extra_data_register extra;
+       u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
+
+       if (!overwrite_flags)
+               return -ENOMEM;
+
+       dbg("Start of media scanning");
+       for (pba = 0 ; pba < msb->block_count ; pba++) {
+
+               if (pba == msb->boot_block_locations[0] ||
+                       pba == msb->boot_block_locations[1]) {
+                       dbg_verbose("pba %05d -> [boot block]", pba);
+                       msb_mark_block_used(msb, pba);
+                       continue;
+               }
+
+               if (test_bit(pba, msb->used_blocks_bitmap)) {
+                       dbg_verbose("pba %05d -> [factory bad]", pba);
+                       continue;
+               }
+
+               memset(&extra, 0, sizeof(extra));
+               error = msb_read_oob(msb, pba, 0, &extra);
+
+               /* can't trust the page if we can't read the oob */
+               if (error == -EBADMSG) {
+                       pr_notice(
+                       "oob of pba %d damaged, will try to erase it", pba);
+                       msb_mark_block_used(msb, pba);
+                       msb_erase_block(msb, pba);
+                       continue;
+               } else if (error)
+                       return error;
+
+               lba = be16_to_cpu(extra.logical_address);
+               managment_flag = extra.management_flag;
+               overwrite_flag = extra.overwrite_flag;
+               overwrite_flags[pba] = overwrite_flag;
+
+               /* Skip bad blocks */
+               if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
+                       dbg("pba %05d -> [BAD]", pba);
+                       msb_mark_block_used(msb, pba);
+                       continue;
+               }
+
+               /* Skip system/drm blocks */
+               if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
+                       MEMSTICK_MANAGMENT_FLAG_NORMAL) {
+                       dbg("pba %05d -> [reserved managment flag %02x]",
+                                                       pba, managment_flag);
+                       msb_mark_block_used(msb, pba);
+                       continue;
+               }
+
+               /* Erase temporary tables */
+               if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+                       dbg("pba %05d -> [temp table] - will erase", pba);
+
+                       msb_mark_block_used(msb, pba);
+                       msb_erase_block(msb, pba);
+                       continue;
+               }
+
+               if (lba == MS_BLOCK_INVALID) {
+                       dbg_verbose("pba %05d -> [free]", pba);
+                       continue;
+               }
+
+               msb_mark_block_used(msb, pba);
+
+               /* Block has LBA not according to zoning*/
+               if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
+                       pr_notice("pba %05d -> [bad lba %05d] - will erase",
+                                                               pba, lba);
+                       msb_erase_block(msb, pba);
+                       continue;
+               }
+
+               /* No collisions - great */
+               if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
+                       dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
+                       msb->lba_to_pba_table[lba] = pba;
+                       continue;
+               }
+
+               other_block = msb->lba_to_pba_table[lba];
+               other_overwrite_flag = overwrite_flags[other_block];
+
+               pr_notice("Collision between pba %d and pba %d",
+                       pba, other_block);
+
+               if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+                       pr_notice("pba %d is marked as stable, use it", pba);
+                       msb_erase_block(msb, other_block);
+                       msb->lba_to_pba_table[lba] = pba;
+                       continue;
+               }
+
+               if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
+                       pr_notice("pba %d is marked as stable, use it",
+                                                               other_block);
+                       msb_erase_block(msb, pba);
+                       continue;
+               }
+
+               pr_notice("collision between blocks %d and %d,"
+               " without stable flag set on both, erasing pba %d",
+                               pba, other_block, other_block);
+
+               msb_erase_block(msb, other_block);
+               msb->lba_to_pba_table[lba] = pba;
+       }
+
+       dbg("End of media scanning");
+       kfree(overwrite_flags);
+       return 0;
+}
+
+static void msb_cache_flush_timer(unsigned long data)
+{
+       struct msb_data *msb = (struct msb_data *)data;
+       msb->need_flush_cache = true;
+       queue_work(msb->io_queue, &msb->io_work);
+}
+
+
+static void msb_cache_discard(struct msb_data *msb)
+{
+       if (msb->cache_block_lba == MS_BLOCK_INVALID)
+               return;
+
+       del_timer_sync(&msb->cache_flush_timer);
+
+       dbg_verbose("Discarding the write cache");
+       msb->cache_block_lba = MS_BLOCK_INVALID;
+       bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
+}
+
+static int msb_cache_init(struct msb_data *msb)
+{
+       setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
+               (unsigned long)msb);
+
+       if (!msb->cache)
+               msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
+       if (!msb->cache)
+               return -ENOMEM;
+
+       msb_cache_discard(msb);
+       return 0;
+}
+
+static int msb_cache_flush(struct msb_data *msb)
+{
+       struct scatterlist sg;
+       struct ms_extra_data_register extra;
+       int page, offset, error;
+       u16 pba, lba;
+
+       if (msb->read_only)
+               return -EROFS;
+
+       if (msb->cache_block_lba == MS_BLOCK_INVALID)
+               return 0;
+
+       lba = msb->cache_block_lba;
+       pba = msb->lba_to_pba_table[lba];
+
+       dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
+                                               pba, msb->cache_block_lba);
+
+       sg_init_one(&sg, msb->cache , msb->block_size);
+
+       /* Read all missing pages in cache */
+       for (page = 0 ; page < msb->pages_in_block ; page++) {
+
+               if (test_bit(page, &msb->valid_cache_bitmap))
+                       continue;
+
+               offset = page * msb->page_size;
+
+               dbg_verbose("reading non-present sector %d of cache block %d",
+                       page, lba);
+               error = msb_read_page(msb, pba, page, &extra, &sg, offset);
+
+               /* Bad pages are copied with 00 page status */
+               if (error == -EBADMSG) {
+                       pr_err("read error on sector %d, contents probably"
+                               " damaged", page);
+                       continue;
+               }
+
+               if (error)
+                       return error;
+
+               if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
+                                                       MEMSTICK_OV_PG_NORMAL) {
+                       dbg("page %d is marked as bad", page);
+                       continue;
+               }
+
+               set_bit(page, &msb->valid_cache_bitmap);
+       }
+
+       /* Write the cache now */
+       error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
+       pba = msb->lba_to_pba_table[msb->cache_block_lba];
+
+       /* Mark invalid pages */
+       if (!error) {
+               for (page = 0 ; page < msb->pages_in_block ; page++) {
+
+                       if (test_bit(page, &msb->valid_cache_bitmap))
+                               continue;
+
+                       dbg("marking page %d as containing damaged data",
+                               page);
+                       msb_set_overwrite_flag(msb,
+                               pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
+               }
+       }
+
+       msb_cache_discard(msb);
+       return error;
+}
+
+static int msb_cache_write(struct msb_data *msb, int lba,
+       int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
+{
+       int error;
+       struct scatterlist sg_tmp[10];
+
+       if (msb->read_only)
+               return -EROFS;
+
+       if (msb->cache_block_lba == MS_BLOCK_INVALID ||
+                                               lba != msb->cache_block_lba)
+               if (add_to_cache_only)
+                       return 0;
+
+       /* If we need to write different block */
+       if (msb->cache_block_lba != MS_BLOCK_INVALID &&
+                                               lba != msb->cache_block_lba) {
+               dbg_verbose("first flush the cache");
+               error = msb_cache_flush(msb);
+               if (error)
+                       return error;
+       }
+
+       if (msb->cache_block_lba  == MS_BLOCK_INVALID) {
+               msb->cache_block_lba  = lba;
+               mod_timer(&msb->cache_flush_timer,
+                       jiffies + msecs_to_jiffies(cache_flush_timeout));
+       }
+
+       dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
+
+       sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+       msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+
+       sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
+               msb->cache + page * msb->page_size, msb->page_size);
+
+       set_bit(page, &msb->valid_cache_bitmap);
+       return 0;
+}
+
+static int msb_cache_read(struct msb_data *msb, int lba,
+                               int page, struct scatterlist *sg, int offset)
+{
+       int pba = msb->lba_to_pba_table[lba];
+       struct scatterlist sg_tmp[10];
+       int error = 0;
+
+       if (lba == msb->cache_block_lba &&
+                       test_bit(page, &msb->valid_cache_bitmap)) {
+
+               dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
+                                                       lba, pba, page);
+
+               sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
+               msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
+               sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
+                       msb->cache + msb->page_size * page,
+                                                       msb->page_size);
+       } else {
+               dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
+                                                       lba, pba, page);
+
+               error = msb_read_page(msb, pba, page, NULL, sg, offset);
+               if (error)
+                       return error;
+
+               msb_cache_write(msb, lba, page, true, sg, offset);
+       }
+       return error;
+}
+
+/* Emulated geometry table
+ * This table content isn't that importaint,
+ * One could put here different values, providing that they still
+ * cover whole disk.
+ * 64 MB entry is what windows reports for my 64M memstick */
+
+static const struct chs_entry chs_table[] = {
+/*        size sectors cylynders  heads */
+       { 4,    16,    247,       2  },
+       { 8,    16,    495,       2  },
+       { 16,   16,    495,       4  },
+       { 32,   16,    991,       4  },
+       { 64,   16,    991,       8  },
+       {128,   16,    991,       16 },
+       { 0 }
+};
+
+/* Load information about the card */
+static int msb_init_card(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_host *host = card->host;
+       struct ms_boot_page *boot_block;
+       int error = 0, i, raw_size_in_megs;
+
+       msb->caps = 0;
+
+       if (card->id.class >= MEMSTICK_CLASS_ROM &&
+                               card->id.class <= MEMSTICK_CLASS_ROM)
+               msb->read_only = true;
+
+       msb->state = -1;
+       error = msb_reset(msb, false);
+       if (error)
+               return error;
+
+       /* Due to a bug in Jmicron driver written by Alex Dubov,
+        its serial mode barely works,
+        so we switch to parallel mode right away */
+       if (host->caps & MEMSTICK_CAP_PAR4)
+               msb_switch_to_parallel(msb);
+
+       msb->page_size = sizeof(struct ms_boot_page);
+
+       /* Read the boot page */
+       error = msb_read_boot_blocks(msb);
+       if (error)
+               return -EIO;
+
+       boot_block = &msb->boot_page[0];
+
+       /* Save intersting attributes from boot page */
+       msb->block_count = boot_block->attr.number_of_blocks;
+       msb->page_size = boot_block->attr.page_size;
+
+       msb->pages_in_block = boot_block->attr.block_size * 2;
+       msb->block_size = msb->page_size * msb->pages_in_block;
+
+       if (msb->page_size > PAGE_SIZE) {
+               /* this isn't supported by linux at all, anyway*/
+               dbg("device page %d size isn't supported", msb->page_size);
+               return -EINVAL;
+       }
+
+       msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
+       if (!msb->block_buffer)
+               return -ENOMEM;
+
+       raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
+
+       for (i = 0 ; chs_table[i].size ; i++) {
+
+               if (chs_table[i].size != raw_size_in_megs)
+                       continue;
+
+               msb->geometry.cylinders = chs_table[i].cyl;
+               msb->geometry.heads = chs_table[i].head;
+               msb->geometry.sectors = chs_table[i].sec;
+               break;
+       }
+
+       if (boot_block->attr.transfer_supporting == 1)
+               msb->caps |= MEMSTICK_CAP_PAR4;
+
+       if (boot_block->attr.device_type & 0x03)
+               msb->read_only = true;
+
+       dbg("Total block count = %d", msb->block_count);
+       dbg("Each block consists of %d pages", msb->pages_in_block);
+       dbg("Page size = %d bytes", msb->page_size);
+       dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
+       dbg("Read only: %d", msb->read_only);
+
+#if 0
+       /* Now we can switch the interface */
+       if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
+               msb_switch_to_parallel(msb);
+#endif
+
+       error = msb_cache_init(msb);
+       if (error)
+               return error;
+
+       error = msb_ftl_initialize(msb);
+       if (error)
+               return error;
+
+
+       /* Read the bad block table */
+       error = msb_read_bad_block_table(msb, 0);
+
+       if (error && error != -ENOMEM) {
+               dbg("failed to read bad block table from primary boot block,"
+                                                       " trying from backup");
+               error = msb_read_bad_block_table(msb, 1);
+       }
+
+       if (error)
+               return error;
+
+       /* *drum roll* Scan the media */
+       error = msb_ftl_scan(msb);
+       if (error) {
+               pr_err("Scan of media failed");
+               return error;
+       }
+
+       return 0;
+
+}
+
+static int msb_do_write_request(struct msb_data *msb, int lba,
+       int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
+{
+       int error = 0;
+       off_t offset = 0;
+       *sucessfuly_written = 0;
+
+       while (offset < len) {
+               if (page == 0 && len - offset >= msb->block_size) {
+
+                       if (msb->cache_block_lba == lba)
+                               msb_cache_discard(msb);
+
+                       dbg_verbose("Writing whole lba %d", lba);
+                       error = msb_update_block(msb, lba, sg, offset);
+                       if (error)
+                               return error;
+
+                       offset += msb->block_size;
+                       *sucessfuly_written += msb->block_size;
+                       lba++;
+                       continue;
+               }
+
+               error = msb_cache_write(msb, lba, page, false, sg, offset);
+               if (error)
+                       return error;
+
+               offset += msb->page_size;
+               *sucessfuly_written += msb->page_size;
+
+               page++;
+               if (page == msb->pages_in_block) {
+                       page = 0;
+                       lba++;
+               }
+       }
+       return 0;
+}
+
+static int msb_do_read_request(struct msb_data *msb, int lba,
+               int page, struct scatterlist *sg, int len, int *sucessfuly_read)
+{
+       int error = 0;
+       int offset = 0;
+       *sucessfuly_read = 0;
+
+       while (offset < len) {
+
+               error = msb_cache_read(msb, lba, page, sg, offset);
+               if (error)
+                       return error;
+
+               offset += msb->page_size;
+               *sucessfuly_read += msb->page_size;
+
+               page++;
+               if (page == msb->pages_in_block) {
+                       page = 0;
+                       lba++;
+               }
+       }
+       return 0;
+}
+
+static void msb_io_work(struct work_struct *work)
+{
+       struct msb_data *msb = container_of(work, struct msb_data, io_work);
+       int page, error, len;
+       sector_t lba;
+       unsigned long flags;
+       struct scatterlist *sg = msb->prealloc_sg;
+
+       dbg_verbose("IO: work started");
+
+       while (1) {
+               spin_lock_irqsave(&msb->q_lock, flags);
+
+               if (msb->need_flush_cache) {
+                       msb->need_flush_cache = false;
+                       spin_unlock_irqrestore(&msb->q_lock, flags);
+                       msb_cache_flush(msb);
+                       continue;
+               }
+
+               if (!msb->req) {
+                       msb->req = blk_fetch_request(msb->queue);
+                       if (!msb->req) {
+                               dbg_verbose("IO: no more requests exiting");
+                               spin_unlock_irqrestore(&msb->q_lock, flags);
+                               return;
+                       }
+               }
+
+               spin_unlock_irqrestore(&msb->q_lock, flags);
+
+               /* If card was removed meanwhile */
+               if (!msb->req)
+                       return;
+
+               /* process the request */
+               dbg_verbose("IO: processing new request");
+               blk_rq_map_sg(msb->queue, msb->req, sg);
+
+               lba = blk_rq_pos(msb->req);
+
+               sector_div(lba, msb->page_size / 512);
+               page = do_div(lba, msb->pages_in_block);
+
+               if (rq_data_dir(msb->req) == READ)
+                       error = msb_do_read_request(msb, lba, page, sg,
+                               blk_rq_bytes(msb->req), &len);
+               else
+                       error = msb_do_write_request(msb, lba, page, sg,
+                               blk_rq_bytes(msb->req), &len);
+
+               spin_lock_irqsave(&msb->q_lock, flags);
+
+               if (len)
+                       if (!__blk_end_request(msb->req, 0, len))
+                               msb->req = NULL;
+
+               if (error && msb->req) {
+                       dbg_verbose("IO: ending one sector "
+                                       "of the request with error");
+                       if (!__blk_end_request(msb->req, error, msb->page_size))
+                               msb->req = NULL;
+               }
+
+               if (msb->req)
+                       dbg_verbose("IO: request still pending");
+
+               spin_unlock_irqrestore(&msb->q_lock, flags);
+       }
+}
+
+static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
+static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
+
+static int msb_bd_open(struct block_device *bdev, fmode_t mode)
+{
+       struct gendisk *disk = bdev->bd_disk;
+       struct msb_data *msb = disk->private_data;
+
+       dbg_verbose("block device open");
+
+       mutex_lock(&msb_disk_lock);
+
+       if (msb && msb->card)
+               msb->usage_count++;
+
+       mutex_unlock(&msb_disk_lock);
+       return 0;
+}
+
+static void msb_data_clear(struct msb_data *msb)
+{
+       kfree(msb->boot_page);
+       kfree(msb->used_blocks_bitmap);
+       kfree(msb->lba_to_pba_table);
+       kfree(msb->cache);
+       msb->card = NULL;
+}
+
+static int msb_disk_release(struct gendisk *disk)
+{
+       struct msb_data *msb = disk->private_data;
+
+       dbg_verbose("block device release");
+       mutex_lock(&msb_disk_lock);
+
+       if (msb) {
+               if (msb->usage_count)
+                       msb->usage_count--;
+
+               if (!msb->usage_count) {
+                       kfree(msb);
+                       disk->private_data = NULL;
+                       idr_remove(&msb_disk_idr, msb->disk_id);
+                       put_disk(disk);
+               }
+       }
+       mutex_unlock(&msb_disk_lock);
+       return 0;
+}
+
+static int msb_bd_release(struct gendisk *disk, fmode_t mode)
+{
+       return msb_disk_release(disk);
+}
+
+static int msb_bd_getgeo(struct block_device *bdev,
+                                struct hd_geometry *geo)
+{
+       struct msb_data *msb = bdev->bd_disk->private_data;
+       *geo = msb->geometry;
+       return 0;
+}
+
+static int msb_prepare_req(struct request_queue *q, struct request *req)
+{
+       if (req->cmd_type != REQ_TYPE_FS &&
+                               req->cmd_type != REQ_TYPE_BLOCK_PC) {
+               blk_dump_rq_flags(req, "MS unsupported request");
+               return BLKPREP_KILL;
+       }
+       req->cmd_flags |= REQ_DONTPREP;
+       return BLKPREP_OK;
+}
+
+static void msb_submit_req(struct request_queue *q)
+{
+       struct memstick_dev *card = q->queuedata;
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct request *req = NULL;
+
+       dbg_verbose("Submit request");
+
+       if (msb->card_dead) {
+               dbg("Refusing requests on removed card");
+
+               WARN_ON(!msb->io_queue_stopped);
+
+               while ((req = blk_fetch_request(q)) != NULL)
+                       __blk_end_request_all(req, -ENODEV);
+               return;
+       }
+
+       if (msb->req)
+               return;
+
+       if (!msb->io_queue_stopped)
+               queue_work(msb->io_queue, &msb->io_work);
+}
+
+static int msb_check_card(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       return (msb->card_dead == 0);
+}
+
+static void msb_stop(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       unsigned long flags;
+
+       dbg("Stopping all msblock IO");
+
+       spin_lock_irqsave(&msb->q_lock, flags);
+       blk_stop_queue(msb->queue);
+       msb->io_queue_stopped = true;
+       spin_unlock_irqrestore(&msb->q_lock, flags);
+
+       del_timer_sync(&msb->cache_flush_timer);
+       flush_workqueue(msb->io_queue);
+
+       if (msb->req) {
+               spin_lock_irqsave(&msb->q_lock, flags);
+               blk_requeue_request(msb->queue, msb->req);
+               msb->req = NULL;
+               spin_unlock_irqrestore(&msb->q_lock, flags);
+       }
+
+}
+
+static void msb_start(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       unsigned long flags;
+
+       dbg("Resuming IO from msblock");
+
+       msb_invalidate_reg_window(msb);
+
+       spin_lock_irqsave(&msb->q_lock, flags);
+       if (!msb->io_queue_stopped || msb->card_dead) {
+               spin_unlock_irqrestore(&msb->q_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&msb->q_lock, flags);
+
+       /* Kick cache flush anyway, its harmless */
+       msb->need_flush_cache = true;
+       msb->io_queue_stopped = false;
+
+       spin_lock_irqsave(&msb->q_lock, flags);
+       blk_start_queue(msb->queue);
+       spin_unlock_irqrestore(&msb->q_lock, flags);
+
+       queue_work(msb->io_queue, &msb->io_work);
+
+}
+
+static const struct block_device_operations msb_bdops = {
+       .open    = msb_bd_open,
+       .release = msb_bd_release,
+       .getgeo  = msb_bd_getgeo,
+       .owner   = THIS_MODULE
+};
+
+/* Registers the block device */
+static int msb_init_disk(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct memstick_host *host = card->host;
+       int rc;
+       u64 limit = BLK_BOUNCE_HIGH;
+       unsigned long capacity;
+
+       if (host->dev.dma_mask && *(host->dev.dma_mask))
+               limit = *(host->dev.dma_mask);
+
+       mutex_lock(&msb_disk_lock);
+       if (!idr_pre_get(&msb_disk_idr, GFP_KERNEL)) {
+               mutex_unlock(&msb_disk_lock);
+               return -ENOMEM;
+       }
+       rc = idr_get_new(&msb_disk_idr, card, &msb->disk_id);
+       mutex_unlock(&msb_disk_lock);
+
+       if (rc)
+               return rc;
+
+       msb->disk = alloc_disk(0);
+       if (!msb->disk) {
+               rc = -ENOMEM;
+               goto out_release_id;
+       }
+
+       msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
+       if (!msb->queue) {
+               rc = -ENOMEM;
+               goto out_put_disk;
+       }
+
+       msb->queue->queuedata = card;
+       blk_queue_prep_rq(msb->queue, msb_prepare_req);
+
+       blk_queue_bounce_limit(msb->queue, limit);
+       blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
+       blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
+       blk_queue_max_segment_size(msb->queue,
+                                  MS_BLOCK_MAX_PAGES * msb->page_size);
+       blk_queue_logical_block_size(msb->queue, msb->page_size);
+
+       sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
+       msb->disk->fops = &msb_bdops;
+       msb->disk->private_data = msb;
+       msb->disk->queue = msb->queue;
+       msb->disk->driverfs_dev = &card->dev;
+       msb->disk->flags |= GENHD_FL_EXT_DEVT;
+
+       capacity = msb->pages_in_block * msb->logical_block_count;
+       capacity *= (msb->page_size / 512);
+       set_capacity(msb->disk, capacity);
+       dbg("Set total disk size to %lu sectors", capacity);
+
+       msb->usage_count = 1;
+       msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
+       INIT_WORK(&msb->io_work, msb_io_work);
+       sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+       if (msb->read_only)
+               set_disk_ro(msb->disk, 1);
+
+       msb_start(card);
+       add_disk(msb->disk);
+       dbg("Disk added");
+       return 0;
+
+out_put_disk:
+       put_disk(msb->disk);
+out_release_id:
+       mutex_lock(&msb_disk_lock);
+       idr_remove(&msb_disk_idr, msb->disk_id);
+       mutex_unlock(&msb_disk_lock);
+       return rc;
+}
+
+static int msb_probe(struct memstick_dev *card)
+{
+       struct msb_data *msb;
+       int rc = 0;
+
+       msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+       if (!msb)
+               return -ENOMEM;
+       memstick_set_drvdata(card, msb);
+       msb->card = card;
+       spin_lock_init(&msb->q_lock);
+
+       rc = msb_init_card(card);
+       if (rc)
+               goto out_free;
+
+       rc = msb_init_disk(card);
+       if (!rc) {
+               card->check = msb_check_card;
+               card->stop = msb_stop;
+               card->start = msb_start;
+               return 0;
+       }
+out_free:
+       memstick_set_drvdata(card, NULL);
+       msb_data_clear(msb);
+       kfree(msb);
+       return rc;
+}
+
+static void msb_remove(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       unsigned long flags;
+
+       if (!msb->io_queue_stopped)
+               msb_stop(card);
+
+       dbg("Removing the disk device");
+
+       /* Take care of unhandled + new requests from now on */
+       spin_lock_irqsave(&msb->q_lock, flags);
+       msb->card_dead = true;
+       blk_start_queue(msb->queue);
+       spin_unlock_irqrestore(&msb->q_lock, flags);
+
+       /* Remove the disk */
+       del_gendisk(msb->disk);
+       blk_cleanup_queue(msb->queue);
+       msb->queue = NULL;
+
+       mutex_lock(&msb_disk_lock);
+       msb_data_clear(msb);
+       mutex_unlock(&msb_disk_lock);
+
+       msb_disk_release(msb->disk);
+       memstick_set_drvdata(card, NULL);
+}
+
+#ifdef CONFIG_PM
+
+static int msb_suspend(struct memstick_dev *card, pm_message_t state)
+{
+       msb_stop(card);
+       return 0;
+}
+
+static int msb_resume(struct memstick_dev *card)
+{
+       struct msb_data *msb = memstick_get_drvdata(card);
+       struct msb_data *new_msb = NULL;
+       bool card_dead = true;
+
+#ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
+       msb->card_dead = true;
+       return 0;
+#endif
+       mutex_lock(&card->host->lock);
+
+       new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
+       if (!new_msb)
+               goto out;
+
+       new_msb->card = card;
+       memstick_set_drvdata(card, new_msb);
+       spin_lock_init(&new_msb->q_lock);
+       sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
+
+       if (msb_init_card(card))
+               goto out;
+
+       if (msb->block_size != new_msb->block_size)
+               goto out;
+
+       if (memcmp(msb->boot_page, new_msb->boot_page,
+                                       sizeof(struct ms_boot_page)))
+               goto out;
+
+       if (msb->logical_block_count != new_msb->logical_block_count ||
+               memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
+                                               msb->logical_block_count))
+               goto out;
+
+       if (msb->block_count != new_msb->block_count ||
+               memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
+                                                       msb->block_count / 8))
+               goto out;
+
+       card_dead = false;
+out:
+       if (card_dead)
+               dbg("Card was removed/replaced during suspend");
+
+       msb->card_dead = card_dead;
+       memstick_set_drvdata(card, msb);
+
+       if (new_msb) {
+               msb_data_clear(new_msb);
+               kfree(new_msb);
+       }
+
+       msb_start(card);
+       mutex_unlock(&card->host->lock);
+       return 0;
+}
+#else
+
+#define msb_suspend NULL
+#define msb_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct memstick_device_id msb_id_tbl[] = {
+       {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+        MEMSTICK_CLASS_FLASH},
+
+       {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+        MEMSTICK_CLASS_ROM},
+
+       {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+        MEMSTICK_CLASS_RO},
+
+       {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
+        MEMSTICK_CLASS_WP},
+
+       {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
+        MEMSTICK_CLASS_DUO},
+       {}
+};
+MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
+
+
+static struct memstick_driver msb_driver = {
+       .driver = {
+               .name  = DRIVER_NAME,
+               .owner = THIS_MODULE
+       },
+       .id_table = msb_id_tbl,
+       .probe    = msb_probe,
+       .remove   = msb_remove,
+       .suspend  = msb_suspend,
+       .resume   = msb_resume
+};
+
+static int major;
+
+static int __init msb_init(void)
+{
+       int rc = register_blkdev(0, DRIVER_NAME);
+
+       if (rc < 0) {
+               pr_err("failed to register major (error %d)\n", rc);
+               return rc;
+       }
+
+       major = rc;
+       rc = memstick_register_driver(&msb_driver);
+       if (rc) {
+               unregister_blkdev(major, DRIVER_NAME);
+               pr_err("failed to register memstick driver (error %d)\n", rc);
+       }
+
+       return rc;
+}
+
+static void __exit msb_exit(void)
+{
+       memstick_unregister_driver(&msb_driver);
+       unregister_blkdev(major, DRIVER_NAME);
+       idr_destroy(&msb_disk_idr);
+}
+
+module_init(msb_init);
+module_exit(msb_exit);
+
+module_param(cache_flush_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(cache_flush_timeout,
+                               "Cache flush timeout in msec (1000 default)");
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+module_param(verify_writes, bool, S_IRUGO);
+MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky");
+MODULE_DESCRIPTION("Sony MemoryStick block device driver");
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
new file mode 100644 (file)
index 0000000..6b5b83e
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ *  ms_block.c - Sony MemoryStick (legacy) storage support
+
+ *  Copyright (C) 2010 Maxim Levitsky <maximlevitsky@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Minor portions of the driver are copied from mspro_block.c which is
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * Also ms structures were copied from old broken driver by same author
+ * These probably come from MS spec
+ *
+ */
+
+#ifndef MS_BLOCK_NEW_H
+#define MS_BLOCK_NEW_H
+
+#define MS_BLOCK_MAX_SEGS      32
+#define MS_BLOCK_MAX_PAGES     ((2 << 16) - 1)
+
+#define MS_BLOCK_MAX_BOOT_ADDR 0x000c
+#define MS_BLOCK_BOOT_ID       0x0001
+#define MS_BLOCK_INVALID       0xffff
+#define MS_MAX_ZONES           16
+#define MS_BLOCKS_IN_ZONE      512
+
+#define MS_BLOCK_MAP_LINE_SZ   16
+#define MS_BLOCK_PART_SHIFT    3
+
+
+#define MEMSTICK_UNCORR_ERROR (MEMSTICK_STATUS1_UCFG | \
+               MEMSTICK_STATUS1_UCEX | MEMSTICK_STATUS1_UCDT)
+
+#define MEMSTICK_CORR_ERROR (MEMSTICK_STATUS1_FGER | MEMSTICK_STATUS1_EXER | \
+       MEMSTICK_STATUS1_DTER)
+
+#define MEMSTICK_INT_ERROR (MEMSTICK_INT_CMDNAK | MEMSTICK_INT_ERR)
+
+#define MEMSTICK_OVERWRITE_FLAG_NORMAL \
+       (MEMSTICK_OVERWRITE_PGST1 | \
+       MEMSTICK_OVERWRITE_PGST0  | \
+       MEMSTICK_OVERWRITE_BKST)
+
+#define MEMSTICK_OV_PG_NORMAL \
+       (MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
+
+#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+       (MEMSTICK_MANAGEMENT_SYSFLG |  \
+       MEMSTICK_MANAGEMENT_SCMS1   |  \
+       MEMSTICK_MANAGEMENT_SCMS0)     \
+
+struct ms_boot_header {
+       unsigned short block_id;
+       unsigned short format_reserved;
+       unsigned char  reserved0[184];
+       unsigned char  data_entry;
+       unsigned char  reserved1[179];
+} __packed;
+
+
+struct ms_system_item {
+       unsigned int  start_addr;
+       unsigned int  data_size;
+       unsigned char data_type_id;
+       unsigned char reserved[3];
+} __packed;
+
+struct ms_system_entry {
+       struct ms_system_item disabled_block;
+       struct ms_system_item cis_idi;
+       unsigned char         reserved[24];
+} __packed;
+
+struct ms_boot_attr_info {
+       unsigned char      memorystick_class;
+       unsigned char      format_unique_value1;
+       unsigned short     block_size;
+       unsigned short     number_of_blocks;
+       unsigned short     number_of_effective_blocks;
+       unsigned short     page_size;
+       unsigned char      extra_data_size;
+       unsigned char      format_unique_value2;
+       unsigned char      assembly_time[8];
+       unsigned char      format_unique_value3;
+       unsigned char      serial_number[3];
+       unsigned char      assembly_manufacturer_code;
+       unsigned char      assembly_model_code[3];
+       unsigned short     memory_manufacturer_code;
+       unsigned short     memory_device_code;
+       unsigned short     implemented_capacity;
+       unsigned char      format_unique_value4[2];
+       unsigned char      vcc;
+       unsigned char      vpp;
+       unsigned short     controller_number;
+       unsigned short     controller_function;
+       unsigned char      reserved0[9];
+       unsigned char      transfer_supporting;
+       unsigned short     format_unique_value5;
+       unsigned char      format_type;
+       unsigned char      memorystick_application;
+       unsigned char      device_type;
+       unsigned char      reserved1[22];
+       unsigned char      format_uniqure_value6[2];
+       unsigned char      reserved2[15];
+} __packed;
+
+struct ms_cis_idi {
+       unsigned short general_config;
+       unsigned short logical_cylinders;
+       unsigned short reserved0;
+       unsigned short logical_heads;
+       unsigned short track_size;
+       unsigned short page_size;
+       unsigned short pages_per_track;
+       unsigned short msw;
+       unsigned short lsw;
+       unsigned short reserved1;
+       unsigned char  serial_number[20];
+       unsigned short buffer_type;
+       unsigned short buffer_size_increments;
+       unsigned short long_command_ecc;
+       unsigned char  firmware_version[28];
+       unsigned char  model_name[18];
+       unsigned short reserved2[5];
+       unsigned short pio_mode_number;
+       unsigned short dma_mode_number;
+       unsigned short field_validity;
+       unsigned short current_logical_cylinders;
+       unsigned short current_logical_heads;
+       unsigned short current_pages_per_track;
+       unsigned int   current_page_capacity;
+       unsigned short mutiple_page_setting;
+       unsigned int   addressable_pages;
+       unsigned short single_word_dma;
+       unsigned short multi_word_dma;
+       unsigned char  reserved3[128];
+} __packed;
+
+
+struct ms_boot_page {
+       struct ms_boot_header    header;
+       struct ms_system_entry   entry;
+       struct ms_boot_attr_info attr;
+} __packed;
+
+struct msb_data {
+       unsigned int                    usage_count;
+       struct memstick_dev             *card;
+       struct gendisk                  *disk;
+       struct request_queue            *queue;
+       spinlock_t                      q_lock;
+       struct hd_geometry              geometry;
+       struct attribute_group          attr_group;
+       struct request                  *req;
+       int                             caps;
+       int                             disk_id;
+
+       /* IO */
+       struct workqueue_struct         *io_queue;
+       bool                            io_queue_stopped;
+       struct work_struct              io_work;
+       struct scatterlist              prealloc_sg[MS_BLOCK_MAX_SEGS+1];
+       bool                            card_dead;
+
+       /* Media properties */
+       struct ms_boot_page             *boot_page;
+       u16                             boot_block_locations[2];
+       int                             boot_block_count;
+
+       bool                            read_only;
+       unsigned short                  page_size;
+       int                             block_size;
+       int                             pages_in_block;
+       int                             zone_count;
+       int                             block_count;
+       int                             logical_block_count;
+
+       /* FTL tables */
+       unsigned long                   *used_blocks_bitmap;
+       unsigned long                   *erased_blocks_bitmap;
+       u16                             *lba_to_pba_table;
+       int                             free_block_count[MS_MAX_ZONES];
+       bool                            ftl_initialized;
+
+       /* Cache */
+       unsigned char                   *cache;
+       unsigned long                   valid_cache_bitmap;
+       int                             cache_block_lba;
+       bool                            need_flush_cache;
+       struct timer_list               cache_flush_timer;
+
+       /* Preallocated buffers */
+       unsigned char                   *block_buffer;
+       struct scatterlist              sg[MS_BLOCK_MAX_SEGS+1];
+
+
+       /* handler's local data */
+       struct ms_register_addr         reg_addr;
+       bool                            addr_valid;
+
+       u8                              command_value;
+       bool                            command_need_oob;
+       struct scatterlist              *current_sg;
+       int                             current_sg_offset;
+
+       struct ms_register              regs;
+       int                             current_page;
+
+       int                             state;
+       int                             exit_error;
+       bool                            int_polling;
+       unsigned long                   int_timeout;
+
+};
+
+enum msb_readpage_states {
+       MSB_RP_SEND_BLOCK_ADDRESS = 0,
+       MSB_RP_SEND_READ_COMMAND,
+
+       MSB_RP_SEND_INT_REQ,
+       MSB_RP_RECEIVE_INT_REQ_RESULT,
+
+       MSB_RP_SEND_READ_STATUS_REG,
+       MSB_RP_RECIVE_STATUS_REG,
+
+       MSB_RP_SEND_OOB_READ,
+       MSB_RP_RECEIVE_OOB_READ,
+
+       MSB_RP_SEND_READ_DATA,
+       MSB_RP_RECEIVE_READ_DATA,
+};
+
+enum msb_write_block_states {
+       MSB_WB_SEND_WRITE_PARAMS = 0,
+       MSB_WB_SEND_WRITE_OOB,
+       MSB_WB_SEND_WRITE_COMMAND,
+
+       MSB_WB_SEND_INT_REQ,
+       MSB_WB_RECEIVE_INT_REQ,
+
+       MSB_WB_SEND_WRITE_DATA,
+       MSB_WB_RECEIVE_WRITE_CONFIRMATION,
+};
+
+enum msb_send_command_states {
+       MSB_SC_SEND_WRITE_PARAMS,
+       MSB_SC_SEND_WRITE_OOB,
+       MSB_SC_SEND_COMMAND,
+
+       MSB_SC_SEND_INT_REQ,
+       MSB_SC_RECEIVE_INT_REQ,
+
+};
+
+enum msb_reset_states {
+       MSB_RS_SEND,
+       MSB_RS_CONFIRM,
+};
+
+enum msb_par_switch_states {
+       MSB_PS_SEND_SWITCH_COMMAND,
+       MSB_PS_SWICH_HOST,
+       MSB_PS_CONFIRM,
+};
+
+struct chs_entry {
+       unsigned long size;
+       unsigned char sec;
+       unsigned short cyl;
+       unsigned char head;
+};
+
+static int msb_reset(struct msb_data *msb, bool full);
+
+static int h_msb_default_bad(struct memstick_dev *card,
+                                               struct memstick_request **mrq);
+
+#define DRIVER_NAME "ms_block"
+
+#define __dbg(level, format, ...) \
+       do { \
+               if (debug >= level) \
+                       pr_err(format "\n", ## __VA_ARGS__); \
+       } while (0)
+
+
+#define dbg(format, ...)               __dbg(1, format, ## __VA_ARGS__)
+#define dbg_verbose(format, ...)       __dbg(2, format, ## __VA_ARGS__)
+
+#endif
index 29408d46a6d9227130fab6aeb180b49ca05baf16..57d7674c50133c67430019efb98966b6013fa9e3 100644 (file)
@@ -553,14 +553,6 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
        kfree(ibr);
 }
 
-static void iblock_bio_destructor(struct bio *bio)
-{
-       struct se_cmd *cmd = bio->bi_private;
-       struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-
-       bio_free(bio, ib_dev->ibd_bio_set);
-}
-
 static struct bio *
 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
 {
@@ -582,7 +574,6 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
 
        bio->bi_bdev = ib_dev->ibd_bd;
        bio->bi_private = cmd;
-       bio->bi_destructor = iblock_bio_destructor;
        bio->bi_end_io = &iblock_bio_done;
        bio->bi_sector = lba;
        return bio;
index e85c04b9f61c59705d297c5eb137a2b046865e88..a3f28f331b2bba7e6653da30dce92adba5d97140 100644 (file)
@@ -70,23 +70,25 @@ static inline int use_bip_pool(unsigned int idx)
 }
 
 /**
- * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
  * @bio:       bio to attach integrity metadata to
  * @gfp_mask:  Memory allocation mask
  * @nr_vecs:   Number of integrity metadata scatter-gather elements
- * @bs:                bio_set to allocate from
  *
  * Description: This function prepares a bio for attaching integrity
  * metadata.  nr_vecs specifies the maximum number of pages containing
  * integrity metadata that can be attached.
  */
-struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
-                                                        gfp_t gfp_mask,
-                                                        unsigned int nr_vecs,
-                                                        struct bio_set *bs)
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+                                                 gfp_t gfp_mask,
+                                                 unsigned int nr_vecs)
 {
        struct bio_integrity_payload *bip;
        unsigned int idx = vecs_to_idx(nr_vecs);
+       struct bio_set *bs = bio->bi_pool;
+
+       if (!bs)
+               bs = fs_bio_set;
 
        BUG_ON(bio == NULL);
        bip = NULL;
@@ -114,37 +116,22 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
 
        return bip;
 }
-EXPORT_SYMBOL(bio_integrity_alloc_bioset);
-
-/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
- * @bio:       bio to attach integrity metadata to
- * @gfp_mask:  Memory allocation mask
- * @nr_vecs:   Number of integrity metadata scatter-gather elements
- *
- * Description: This function prepares a bio for attaching integrity
- * metadata.  nr_vecs specifies the maximum number of pages containing
- * integrity metadata that can be attached.
- */
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
-                                                 gfp_t gfp_mask,
-                                                 unsigned int nr_vecs)
-{
-       return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
-}
 EXPORT_SYMBOL(bio_integrity_alloc);
 
 /**
  * bio_integrity_free - Free bio integrity payload
  * @bio:       bio containing bip to be freed
- * @bs:                bio_set this bio was allocated from
  *
  * Description: Used to free the integrity portion of a bio. Usually
  * called from bio_free().
  */
-void bio_integrity_free(struct bio *bio, struct bio_set *bs)
+void bio_integrity_free(struct bio *bio)
 {
        struct bio_integrity_payload *bip = bio->bi_integrity;
+       struct bio_set *bs = bio->bi_pool;
+
+       if (!bs)
+               bs = fs_bio_set;
 
        BUG_ON(bip == NULL);
 
@@ -730,19 +717,18 @@ EXPORT_SYMBOL(bio_integrity_split);
  * @bio:       New bio
  * @bio_src:   Original bio
  * @gfp_mask:  Memory allocation mask
- * @bs:                bio_set to allocate bip from
  *
  * Description:        Called to allocate a bip when cloning a bio
  */
 int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
-                       gfp_t gfp_mask, struct bio_set *bs)
+                       gfp_t gfp_mask)
 {
        struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
        struct bio_integrity_payload *bip;
 
        BUG_ON(bip_src == NULL);
 
-       bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
+       bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
 
        if (bip == NULL)
                return -EIO;
index 71072ab99128aadf1090e2ceab32bae67827dc9c..9298c65ad9c74bb1c4adde9ef9ffa07b397ca63f 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
  * IO code that does not need private memory pools.
  */
 struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
 
 /*
  * Our slab pool management
@@ -233,26 +234,37 @@ fallback:
        return bvl;
 }
 
-void bio_free(struct bio *bio, struct bio_set *bs)
+static void __bio_free(struct bio *bio)
 {
+       bio_disassociate_task(bio);
+
+       if (bio_integrity(bio))
+               bio_integrity_free(bio);
+}
+
+static void bio_free(struct bio *bio)
+{
+       struct bio_set *bs = bio->bi_pool;
        void *p;
 
-       if (bio_has_allocated_vec(bio))
-               bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
+       __bio_free(bio);
 
-       if (bio_integrity(bio))
-               bio_integrity_free(bio, bs);
+       if (bs) {
+               if (bio_has_allocated_vec(bio))
+                       bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
 
-       /*
-        * If we have front padding, adjust the bio pointer before freeing
-        */
-       p = bio;
-       if (bs->front_pad)
+               /*
+                * If we have front padding, adjust the bio pointer before freeing
+                */
+               p = bio;
                p -= bs->front_pad;
 
-       mempool_free(p, bs->bio_pool);
+               mempool_free(p, bs->bio_pool);
+       } else {
+               /* Bio was allocated by bio_kmalloc() */
+               kfree(bio);
+       }
 }
-EXPORT_SYMBOL(bio_free);
 
 void bio_init(struct bio *bio)
 {
@@ -262,6 +274,27 @@ void bio_init(struct bio *bio)
 }
 EXPORT_SYMBOL(bio_init);
 
+/**
+ * bio_reset - reinitialize a bio
+ * @bio:       bio to reset
+ *
+ * Description:
+ *   After calling bio_reset(), @bio will be in the same state as a freshly
+ *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
+ *   preserved are the ones that are initialized by bio_alloc_bioset(). See
+ *   comment in struct bio.
+ */
+void bio_reset(struct bio *bio)
+{
+       unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
+
+       __bio_free(bio);
+
+       memset(bio, 0, BIO_RESET_BYTES);
+       bio->bi_flags = flags|(1 << BIO_UPTODATE);
+}
+EXPORT_SYMBOL(bio_reset);
+
 /**
  * bio_alloc_bioset - allocate a bio for I/O
  * @gfp_mask:   the GFP_ mask given to the slab allocator
@@ -269,42 +302,58 @@ EXPORT_SYMBOL(bio_init);
  * @bs:                the bio_set to allocate from.
  *
  * Description:
- *   bio_alloc_bioset will try its own mempool to satisfy the allocation.
- *   If %__GFP_WAIT is set then we will block on the internal pool waiting
- *   for a &struct bio to become free.
+ *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ *   backed by the @bs's mempool.
  *
- *   Note that the caller must set ->bi_destructor on successful return
- *   of a bio, to do the appropriate freeing of the bio once the reference
- *   count drops to zero.
- **/
+ *   When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ *   able to allocate a bio. This is due to the mempool guarantees. To make this
+ *   work, callers must never allocate more than 1 bio at a time from this pool.
+ *   Callers that need to allocate more than 1 bio must always submit the
+ *   previously allocated bio for IO before attempting to allocate a new one.
+ *   Failure to do so can cause deadlocks under memory pressure.
+ *
+ *   RETURNS:
+ *   Pointer to new bio on success, NULL on failure.
+ */
 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 {
+       unsigned front_pad;
+       unsigned inline_vecs;
        unsigned long idx = BIO_POOL_NONE;
        struct bio_vec *bvl = NULL;
        struct bio *bio;
        void *p;
 
-       p = mempool_alloc(bs->bio_pool, gfp_mask);
+       if (!bs) {
+               if (nr_iovecs > UIO_MAXIOV)
+                       return NULL;
+
+               p = kmalloc(sizeof(struct bio) +
+                           nr_iovecs * sizeof(struct bio_vec),
+                           gfp_mask);
+               front_pad = 0;
+               inline_vecs = nr_iovecs;
+       } else {
+               p = mempool_alloc(bs->bio_pool, gfp_mask);
+               front_pad = bs->front_pad;
+               inline_vecs = BIO_INLINE_VECS;
+       }
+
        if (unlikely(!p))
                return NULL;
-       bio = p + bs->front_pad;
 
+       bio = p + front_pad;
        bio_init(bio);
 
-       if (unlikely(!nr_iovecs))
-               goto out_set;
-
-       if (nr_iovecs <= BIO_INLINE_VECS) {
-               bvl = bio->bi_inline_vecs;
-               nr_iovecs = BIO_INLINE_VECS;
-       } else {
+       if (nr_iovecs > inline_vecs) {
                bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
                if (unlikely(!bvl))
                        goto err_free;
-
-               nr_iovecs = bvec_nr_vecs(idx);
+       } else if (nr_iovecs) {
+               bvl = bio->bi_inline_vecs;
        }
-out_set:
+
+       bio->bi_pool = bs;
        bio->bi_flags |= idx << BIO_POOL_OFFSET;
        bio->bi_max_vecs = nr_iovecs;
        bio->bi_io_vec = bvl;
@@ -316,80 +365,6 @@ err_free:
 }
 EXPORT_SYMBOL(bio_alloc_bioset);
 
-static void bio_fs_destructor(struct bio *bio)
-{
-       bio_free(bio, fs_bio_set);
-}
-
-/**
- *     bio_alloc - allocate a new bio, memory pool backed
- *     @gfp_mask: allocation mask to use
- *     @nr_iovecs: number of iovecs
- *
- *     bio_alloc will allocate a bio and associated bio_vec array that can hold
- *     at least @nr_iovecs entries. Allocations will be done from the
- *     fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- *     If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- *     a bio. This is due to the mempool guarantees. To make this work, callers
- *     must never allocate more than 1 bio at a time from this pool. Callers
- *     that need to allocate more than 1 bio must always submit the previously
- *     allocated bio for IO before attempting to allocate a new one. Failure to
- *     do so can cause livelocks under memory pressure.
- *
- *     RETURNS:
- *     Pointer to new bio on success, NULL on failure.
- */
-struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
-       struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
-
-       if (bio)
-               bio->bi_destructor = bio_fs_destructor;
-
-       return bio;
-}
-EXPORT_SYMBOL(bio_alloc);
-
-static void bio_kmalloc_destructor(struct bio *bio)
-{
-       if (bio_integrity(bio))
-               bio_integrity_free(bio, fs_bio_set);
-       kfree(bio);
-}
-
-/**
- * bio_kmalloc - allocate a bio for I/O using kmalloc()
- * @gfp_mask:   the GFP_ mask given to the slab allocator
- * @nr_iovecs: number of iovecs to pre-allocate
- *
- * Description:
- *   Allocate a new bio with @nr_iovecs bvecs.  If @gfp_mask contains
- *   %__GFP_WAIT, the allocation is guaranteed to succeed.
- *
- **/
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
-       struct bio *bio;
-
-       if (nr_iovecs > UIO_MAXIOV)
-               return NULL;
-
-       bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
-                     gfp_mask);
-       if (unlikely(!bio))
-               return NULL;
-
-       bio_init(bio);
-       bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
-       bio->bi_max_vecs = nr_iovecs;
-       bio->bi_io_vec = bio->bi_inline_vecs;
-       bio->bi_destructor = bio_kmalloc_destructor;
-
-       return bio;
-}
-EXPORT_SYMBOL(bio_kmalloc);
-
 void zero_fill_bio(struct bio *bio)
 {
        unsigned long flags;
@@ -420,11 +395,8 @@ void bio_put(struct bio *bio)
        /*
         * last put frees it
         */
-       if (atomic_dec_and_test(&bio->bi_cnt)) {
-               bio_disassociate_task(bio);
-               bio->bi_next = NULL;
-               bio->bi_destructor(bio);
-       }
+       if (atomic_dec_and_test(&bio->bi_cnt))
+               bio_free(bio);
 }
 EXPORT_SYMBOL(bio_put);
 
@@ -466,26 +438,28 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
 EXPORT_SYMBOL(__bio_clone);
 
 /**
- *     bio_clone       -       clone a bio
+ *     bio_clone_bioset -      clone a bio
  *     @bio: bio to clone
  *     @gfp_mask: allocation priority
+ *     @bs: bio_set to allocate from
  *
  *     Like __bio_clone, only also allocates the returned bio
  */
-struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
+struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
+                            struct bio_set *bs)
 {
-       struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
+       struct bio *b;
 
+       b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
        if (!b)
                return NULL;
 
-       b->bi_destructor = bio_fs_destructor;
        __bio_clone(b, bio);
 
        if (bio_integrity(bio)) {
                int ret;
 
-               ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
+               ret = bio_integrity_clone(b, bio, gfp_mask);
 
                if (ret < 0) {
                        bio_put(b);
@@ -495,7 +469,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
 
        return b;
 }
-EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_clone_bioset);
 
 /**
  *     bio_get_nr_vecs         - return approx number of vecs
@@ -1501,7 +1475,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
        trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
                                bi->bi_sector + first_sectors);
 
-       BUG_ON(bi->bi_vcnt != 1);
+       BUG_ON(bi->bi_vcnt != 1 && bi->bi_vcnt != 0);
        BUG_ON(bi->bi_idx != 0);
        atomic_set(&bp->cnt, 3);
        bp->error = 0;
@@ -1511,17 +1485,22 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
        bp->bio2.bi_size -= first_sectors << 9;
        bp->bio1.bi_size = first_sectors << 9;
 
-       bp->bv1 = bi->bi_io_vec[0];
-       bp->bv2 = bi->bi_io_vec[0];
-       bp->bv2.bv_offset += first_sectors << 9;
-       bp->bv2.bv_len -= first_sectors << 9;
-       bp->bv1.bv_len = first_sectors << 9;
+       if (bi->bi_vcnt != 0) {
+               bp->bv1 = bi->bi_io_vec[0];
+               bp->bv2 = bi->bi_io_vec[0];
+
+               if (bio_is_rw(bi)) {
+                       bp->bv2.bv_offset += first_sectors << 9;
+                       bp->bv2.bv_len -= first_sectors << 9;
+                       bp->bv1.bv_len = first_sectors << 9;
+               }
 
-       bp->bio1.bi_io_vec = &bp->bv1;
-       bp->bio2.bi_io_vec = &bp->bv2;
+               bp->bio1.bi_io_vec = &bp->bv1;
+               bp->bio2.bi_io_vec = &bp->bv2;
 
-       bp->bio1.bi_max_vecs = 1;
-       bp->bio2.bi_max_vecs = 1;
+               bp->bio1.bi_max_vecs = 1;
+               bp->bio2.bi_max_vecs = 1;
+       }
 
        bp->bio1.bi_end_io = bio_pair_end_1;
        bp->bio2.bi_end_io = bio_pair_end_2;
index 38e721b35d45388cb0febed8021a02277a4a2f1b..37967bcea05c10be626bc9a3c55d882ebb119adf 100644 (file)
@@ -116,6 +116,8 @@ EXPORT_SYMBOL(invalidate_bdev);
 
 int set_blocksize(struct block_device *bdev, int size)
 {
+       struct address_space *mapping;
+
        /* Size must be a power of two, and between 512 and PAGE_SIZE */
        if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
                return -EINVAL;
@@ -124,6 +126,20 @@ int set_blocksize(struct block_device *bdev, int size)
        if (size < bdev_logical_block_size(bdev))
                return -EINVAL;
 
+       /* Prevent starting I/O or mapping the device */
+       percpu_down_write(&bdev->bd_block_size_semaphore);
+
+       /* Check that the block device is not memory mapped */
+       mapping = bdev->bd_inode->i_mapping;
+       mutex_lock(&mapping->i_mmap_mutex);
+       if (!prio_tree_empty(&mapping->i_mmap) ||
+           !list_empty(&mapping->i_mmap_nonlinear)) {
+               mutex_unlock(&mapping->i_mmap_mutex);
+               percpu_up_write(&bdev->bd_block_size_semaphore);
+               return -EBUSY;
+       }
+       mutex_unlock(&mapping->i_mmap_mutex);
+
        /* Don't change the size if it is same as current */
        if (bdev->bd_block_size != size) {
                sync_blockdev(bdev);
@@ -131,6 +147,9 @@ int set_blocksize(struct block_device *bdev, int size)
                bdev->bd_inode->i_blkbits = blksize_bits(size);
                kill_bdev(bdev);
        }
+
+       percpu_up_write(&bdev->bd_block_size_semaphore);
+
        return 0;
 }
 
@@ -441,6 +460,12 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
        struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
+
+       if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) {
+               kmem_cache_free(bdev_cachep, ei);
+               return NULL;
+       }
+
        return &ei->vfs_inode;
 }
 
@@ -449,6 +474,8 @@ static void bdev_i_callback(struct rcu_head *head)
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct bdev_inode *bdi = BDEV_I(inode);
 
+       percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore);
+
        kmem_cache_free(bdev_cachep, bdi);
 }
 
@@ -1567,6 +1594,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        return blkdev_ioctl(bdev, mode, cmd, arg);
 }
 
+ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
+                       unsigned long nr_segs, loff_t pos)
+{
+       ssize_t ret;
+       struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+
+       percpu_down_read(&bdev->bd_block_size_semaphore);
+
+       ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+       percpu_up_read(&bdev->bd_block_size_semaphore);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_aio_read);
+
 /*
  * Write data to the block device.  Only intended for the block device itself
  * and the raw driver which basically is a fake block device.
@@ -1578,12 +1621,16 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
+       struct block_device *bdev = I_BDEV(file->f_mapping->host);
        struct blk_plug plug;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
        blk_start_plug(&plug);
+
+       percpu_down_read(&bdev->bd_block_size_semaphore);
+
        ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        if (ret > 0 || ret == -EIOCBQUEUED) {
                ssize_t err;
@@ -1592,11 +1639,29 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                if (err < 0 && ret > 0)
                        ret = err;
        }
+
+       percpu_up_read(&bdev->bd_block_size_semaphore);
+
        blk_finish_plug(&plug);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkdev_aio_write);
 
+static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       int ret;
+       struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+       percpu_down_read(&bdev->bd_block_size_semaphore);
+
+       ret = generic_file_mmap(file, vma);
+
+       percpu_up_read(&bdev->bd_block_size_semaphore);
+
+       return ret;
+}
+
 /*
  * Try to release a page associated with block device when the system
  * is under memory pressure.
@@ -1627,9 +1692,9 @@ const struct file_operations def_blk_fops = {
        .llseek         = block_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
+       .aio_read       = blkdev_aio_read,
        .aio_write      = blkdev_aio_write,
-       .mmap           = generic_file_mmap,
+       .mmap           = blkdev_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = block_ioctl,
 #ifdef CONFIG_COMPAT
index 1585db1aa3651a3eb2fbe586156fd5bd270f5b82..f936cb50dc0d524250dae6d6ac5382db5c1f25b7 100644 (file)
@@ -814,8 +814,8 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
                        struct bio *bio;
 
                        if (per_dev != master_dev) {
-                               bio = bio_kmalloc(GFP_KERNEL,
-                                                 master_dev->bio->bi_max_vecs);
+                               bio = bio_clone_kmalloc(master_dev->bio,
+                                                       GFP_KERNEL);
                                if (unlikely(!bio)) {
                                        ORE_DBGMSG(
                                              "Failed to allocate BIO size=%u\n",
@@ -824,7 +824,6 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
                                        goto out;
                                }
 
-                               __bio_clone(bio, master_dev->bio);
                                bio->bi_bdev = NULL;
                                bio->bi_next = NULL;
                                per_dev->offset = master_dev->offset;
index 26435890dc87a6c3b9d37f14571833fb1afe0164..820e7aaad4fdbbf432b188b083662b5015bd3905 100644 (file)
@@ -212,20 +212,41 @@ extern void bio_pair_release(struct bio_pair *dbio);
 extern struct bio_set *bioset_create(unsigned int, unsigned int);
 extern void bioset_free(struct bio_set *);
 
-extern struct bio *bio_alloc(gfp_t, unsigned int);
-extern struct bio *bio_kmalloc(gfp_t, unsigned int);
 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 extern void bio_put(struct bio *);
-extern void bio_free(struct bio *, struct bio_set *);
+
+extern void __bio_clone(struct bio *, struct bio *);
+extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+
+extern struct bio_set *fs_bio_set;
+
+static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+       return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+}
+
+static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
+{
+       return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
+}
+
+static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+       return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
+}
+
+static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
+{
+       return bio_clone_bioset(bio, gfp_mask, NULL);
+
+}
 
 extern void bio_endio(struct bio *, int);
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
-extern void __bio_clone(struct bio *, struct bio *);
-extern struct bio *bio_clone(struct bio *, gfp_t);
-
 extern void bio_init(struct bio *);
+extern void bio_reset(struct bio *);
 
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
@@ -304,8 +325,6 @@ struct biovec_slab {
        struct kmem_cache *slab;
 };
 
-extern struct bio_set *fs_bio_set;
-
 /*
  * a small number of entries is fine, not going to be performance critical.
  * basically we just need to survive
@@ -367,9 +386,31 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
 /*
  * Check whether this bio carries any data or not. A NULL bio is allowed.
  */
-static inline int bio_has_data(struct bio *bio)
+static inline bool bio_has_data(struct bio *bio)
 {
-       return bio && bio->bi_io_vec != NULL;
+       if (bio && bio->bi_vcnt)
+               return true;
+
+       return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+       if (!bio_has_data(bio))
+               return false;
+
+       if (bio->bi_rw & REQ_WRITE_SAME)
+               return false;
+
+       return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+       if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+               return false;
+
+       return true;
 }
 
 /*
@@ -505,9 +546,8 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
 
 #define bio_integrity(bio) (bio->bi_integrity != NULL)
 
-extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern void bio_integrity_free(struct bio *, struct bio_set *);
+extern void bio_integrity_free(struct bio *);
 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
 extern int bio_integrity_enabled(struct bio *bio);
 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -517,7 +557,7 @@ extern void bio_integrity_endio(struct bio *, int);
 extern void bio_integrity_advance(struct bio *, unsigned int);
 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
+extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 extern int bioset_integrity_create(struct bio_set *, int);
 extern void bioset_integrity_free(struct bio_set *);
 extern void bio_integrity_init(void);
@@ -549,13 +589,13 @@ static inline int bio_integrity_prep(struct bio *bio)
        return 0;
 }
 
-static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs)
+static inline void bio_integrity_free(struct bio *bio)
 {
        return;
 }
 
 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
-                                     gfp_t gfp_mask, struct bio_set *bs)
+                                     gfp_t gfp_mask)
 {
        return 0;
 }
index 7b7ac9ccec7af5900b469166e7bdda9aa578b4d2..cdf11191e6450fa680b7a8b71352c3642633b9fb 100644 (file)
@@ -59,12 +59,6 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
-       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
-
-       atomic_t                bi_cnt;         /* pin count */
-
-       struct bio_vec          *bi_io_vec;     /* the actual vec list */
-
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
@@ -80,7 +74,17 @@ struct bio {
        struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
 
-       bio_destructor_t        *bi_destructor; /* destructor */
+       /*
+        * Everything starting with bi_max_vecs will be preserved by bio_reset()
+        */
+
+       unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
+
+       atomic_t                bi_cnt;         /* pin count */
+
+       struct bio_vec          *bi_io_vec;     /* the actual vec list */
+
+       struct bio_set          *bi_pool;
 
        /*
         * We can inline a number of vecs at the end of the bio, to avoid
@@ -90,6 +94,8 @@ struct bio {
        struct bio_vec          bi_inline_vecs[0];
 };
 
+#define BIO_RESET_BYTES                offsetof(struct bio, bi_max_vecs)
+
 /*
  * bio flags
  */
@@ -105,6 +111,13 @@ struct bio {
 #define BIO_FS_INTEGRITY 9     /* fs owns integrity data, not block layer */
 #define BIO_QUIET      10      /* Make BIO Quiet */
 #define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
+
+/*
+ * Flags starting here get preserved by bio_reset() - this includes
+ * BIO_POOL_IDX()
+ */
+#define BIO_RESET_BITS 12
+
 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
 
 /*
@@ -134,6 +147,7 @@ enum rq_flag_bits {
        __REQ_PRIO,             /* boost priority in cfq */
        __REQ_DISCARD,          /* request to discard sectors */
        __REQ_SECURE,           /* secure discard (used with __REQ_DISCARD) */
+       __REQ_WRITE_SAME,       /* write same block many times */
 
        __REQ_NOIDLE,           /* don't anticipate more IO after this one */
        __REQ_FUA,              /* forced unit access */
@@ -172,15 +186,21 @@ enum rq_flag_bits {
 #define REQ_META               (1 << __REQ_META)
 #define REQ_PRIO               (1 << __REQ_PRIO)
 #define REQ_DISCARD            (1 << __REQ_DISCARD)
+#define REQ_WRITE_SAME         (1 << __REQ_WRITE_SAME)
 #define REQ_NOIDLE             (1 << __REQ_NOIDLE)
 
 #define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
        (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
-        REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+        REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
+        REQ_SECURE)
 #define REQ_CLONE_MASK         REQ_COMMON_MASK
 
+/* This mask is used for both bio and request merge checking */
+#define REQ_NOMERGE_FLAGS \
+       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+
 #define REQ_RAHEAD             (1 << __REQ_RAHEAD)
 #define REQ_THROTTLED          (1 << __REQ_THROTTLED)
 
index 4a2ab7c85393df48fd8d93e085f3b7ead5de7be2..1756001210d23a7f32037a3bfb21579202c8c380 100644 (file)
@@ -270,6 +270,7 @@ struct queue_limits {
        unsigned int            io_min;
        unsigned int            io_opt;
        unsigned int            max_discard_sectors;
+       unsigned int            max_write_same_sectors;
        unsigned int            discard_granularity;
        unsigned int            discard_alignment;
 
@@ -540,8 +541,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 
 #define blk_account_rq(rq) \
        (((rq)->cmd_flags & REQ_STARTED) && \
-        ((rq)->cmd_type == REQ_TYPE_FS || \
-         ((rq)->cmd_flags & REQ_DISCARD)))
+        ((rq)->cmd_type == REQ_TYPE_FS))
 
 #define blk_pm_request(rq)     \
        ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
@@ -595,17 +595,39 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
        rl->flags &= ~flag;
 }
 
+static inline bool rq_mergeable(struct request *rq)
+{
+       if (rq->cmd_type != REQ_TYPE_FS)
+               return false;
 
-/*
- * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
- * it already be started by driver.
- */
-#define RQ_NOMERGE_FLAGS       \
-       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
-#define rq_mergeable(rq)       \
-       (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
-        (((rq)->cmd_flags & REQ_DISCARD) || \
-         (rq)->cmd_type == REQ_TYPE_FS))
+       if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+               return false;
+
+       return true;
+}
+
+static inline bool blk_check_merge_flags(unsigned int flags1,
+                                        unsigned int flags2)
+{
+       if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+               return false;
+
+       if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
+               return false;
+
+       if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+               return false;
+
+       return true;
+}
+
+static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+{
+       if (bio_data(a) == bio_data(b))
+               return true;
+
+       return false;
+}
 
 /*
  * q->prep_rq_fn return values
@@ -802,6 +824,28 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
        return blk_rq_cur_bytes(rq) >> 9;
 }
 
+static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
+                                                    unsigned int cmd_flags)
+{
+       if (unlikely(cmd_flags & REQ_DISCARD))
+               return q->limits.max_discard_sectors;
+
+       if (unlikely(cmd_flags & REQ_WRITE_SAME))
+               return q->limits.max_write_same_sectors;
+
+       return q->limits.max_sectors;
+}
+
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+{
+       struct request_queue *q = rq->q;
+
+       if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+               return q->limits.max_hw_sectors;
+
+       return blk_queue_get_max_sectors(q, rq->cmd_flags);
+}
+
 /*
  * Request issue related functions.
  */
@@ -857,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_max_discard_sectors(struct request_queue *q,
                unsigned int max_discard_sectors);
+extern void blk_queue_max_write_same_sectors(struct request_queue *q,
+               unsigned int max_write_same_sectors);
 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -987,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+               sector_t nr_sects, gfp_t gfp_mask, struct page *page);
 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        sector_t nr_sects, gfp_t gfp_mask);
 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1164,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
        return queue_discard_zeroes_data(bdev_get_queue(bdev));
 }
 
+static inline unsigned int bdev_write_same(struct block_device *bdev)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+
+       if (q)
+               return q->limits.max_write_same_sectors;
+
+       return 0;
+}
+
 static inline int queue_dma_alignment(struct request_queue *q)
 {
        return q ? q->dma_alignment : 511;
index ca6d8c806f470ab7d26f23337648978891f37ffd..1819456157df46a4aea1a52fc68abc38cd862649 100644 (file)
@@ -335,6 +335,7 @@ struct inodes_stat_t {
 #define BLKDISCARDZEROES _IO(0x12,124)
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
+#define BLKZEROOUT _IO(0x12,127)
 
 #define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
 #define FIBMAP    _IO(0x00,1)  /* bmap access */
@@ -415,6 +416,7 @@ struct inodes_stat_t {
 #include <linux/migrate_mode.h>
 #include <linux/uidgid.h>
 #include <linux/lockdep.h>
+#include <linux/percpu-rwsem.h>
 
 #include <asm/byteorder.h>
 
@@ -724,6 +726,8 @@ struct block_device {
        int                     bd_fsfreeze_count;
        /* Mutex for freeze */
        struct mutex            bd_fsfreeze_mutex;
+       /* A semaphore that prevents I/O while block size is being changed */
+       struct percpu_rw_semaphore      bd_block_size_semaphore;
 };
 
 /*
@@ -2568,6 +2572,8 @@ extern int generic_segment_checks(const struct iovec *iov,
                unsigned long *nr_segs, size_t *count, int access_flags);
 
 /* fs/block_dev.c */
+extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
+                              unsigned long nr_segs, loff_t pos);
 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
                                unsigned long nr_segs, loff_t pos);
 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
new file mode 100644 (file)
index 0000000..cf80f7e
--- /dev/null
@@ -0,0 +1,89 @@
+#ifndef _LINUX_PERCPU_RWSEM_H
+#define _LINUX_PERCPU_RWSEM_H
+
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+
+struct percpu_rw_semaphore {
+       unsigned __percpu *counters;
+       bool locked;
+       struct mutex mtx;
+};
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *p)
+{
+       rcu_read_lock();
+       if (unlikely(p->locked)) {
+               rcu_read_unlock();
+               mutex_lock(&p->mtx);
+               this_cpu_inc(*p->counters);
+               mutex_unlock(&p->mtx);
+               return;
+       }
+       this_cpu_inc(*p->counters);
+       rcu_read_unlock();
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *p)
+{
+       /*
+        * On X86, write operation in this_cpu_dec serves as a memory unlock
+        * barrier (i.e. memory accesses may be moved before the write, but
+        * no memory accesses are moved past the write).
+        * On other architectures this may not be the case, so we need smp_mb()
+        * there.
+        */
+#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
+       barrier();
+#else
+       smp_mb();
+#endif
+       this_cpu_dec(*p->counters);
+}
+
+static inline unsigned __percpu_count(unsigned __percpu *counters)
+{
+       unsigned total = 0;
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
+
+       return total;
+}
+
+static inline void percpu_down_write(struct percpu_rw_semaphore *p)
+{
+       mutex_lock(&p->mtx);
+       p->locked = true;
+       synchronize_rcu();
+       while (__percpu_count(p->counters))
+               msleep(1);
+       smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
+}
+
+static inline void percpu_up_write(struct percpu_rw_semaphore *p)
+{
+       p->locked = false;
+       mutex_unlock(&p->mtx);
+}
+
+static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
+{
+       p->counters = alloc_percpu(unsigned);
+       if (unlikely(!p->counters))
+               return -ENOMEM;
+       p->locked = false;
+       mutex_init(&p->mtx);
+       return 0;
+}
+
+static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
+{
+       free_percpu(p->counters);
+       p->counters = NULL; /* catch use after free bugs */
+}
+
+#endif
index 7b600da9a635027393368715a43e0a2a44827db0..4bd6c06eb28edb13cc8a47698fd2b1df94d72f65 100644 (file)
@@ -201,6 +201,7 @@ static inline void *sg_virt(struct scatterlist *sg)
        return page_address(sg_page(sg)) + sg->offset;
 }
 
+int sg_nents(struct scatterlist *sg);
 struct scatterlist *sg_next(struct scatterlist *);
 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
 void sg_init_table(struct scatterlist *, unsigned int);
index e76d85cf31755c6de5b9ca5898756254e0525845..3675452b23ca175b612489e362aa7274b75d96a5 100644 (file)
@@ -38,6 +38,25 @@ struct scatterlist *sg_next(struct scatterlist *sg)
 }
 EXPORT_SYMBOL(sg_next);
 
+/**
+ * sg_nents - return total count of entries in scatterlist
+ * @sg:                The scatterlist
+ *
+ * Description:
+ * Allows to know how many entries are in sg, taking into acount
+ * chaining as well
+ *
+ **/
+int sg_nents(struct scatterlist *sg)
+{
+       int nents;
+       for (nents = 0; sg; sg = sg_next(sg))
+               nents++;
+       return nents;
+}
+EXPORT_SYMBOL(sg_nents);
+
+
 /**
  * sg_last - return the last scatterlist entry in a list
  * @sgl:       First entry in the scatterlist