]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/block/xen-blkback/blkback.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / drivers / block / xen-blkback / blkback.c
index 4809c1501d7eda82081d922fa1944d8cd7c76c84..4a80ee752597f02adfc8096e5d14ce9a27e03f0e 100644 (file)
@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
        struct xen_vbd *vbd = &blkif->vbd;
        int rc = -EACCES;
 
-       if ((operation != READ) && vbd->readonly)
+       if ((operation != REQ_OP_READ) && vbd->readonly)
                goto out;
 
        if (likely(req->nr_sects)) {
@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
        preq.sector_number = req->u.discard.sector_number;
        preq.nr_sects      = req->u.discard.nr_sectors;
 
-       err = xen_vbd_translate(&preq, blkif, WRITE);
+       err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
        if (err) {
                pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
                        preq.sector_number,
@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        struct bio **biolist = pending_req->biolist;
        int i, nbio = 0;
        int operation;
+       int operation_flags = 0;
        struct blk_plug plug;
        bool drain = false;
        struct grant_page **pages = pending_req->segments;
@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        switch (req_operation) {
        case BLKIF_OP_READ:
                ring->st_rd_req++;
-               operation = READ;
+               operation = REQ_OP_READ;
                break;
        case BLKIF_OP_WRITE:
                ring->st_wr_req++;
-               operation = WRITE_ODIRECT;
+               operation = REQ_OP_WRITE;
+               operation_flags = WRITE_ODIRECT;
                break;
        case BLKIF_OP_WRITE_BARRIER:
                drain = true;
        case BLKIF_OP_FLUSH_DISKCACHE:
                ring->st_f_req++;
-               operation = WRITE_FLUSH;
+               operation = REQ_OP_WRITE;
+               operation_flags = WRITE_FLUSH;
                break;
        default:
                operation = 0; /* make gcc happy */
@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        nseg = req->operation == BLKIF_OP_INDIRECT ?
               req->u.indirect.nr_segments : req->u.rw.nr_segments;
 
-       if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+       if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
            unlikely((req->operation != BLKIF_OP_INDIRECT) &&
                     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
            unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
        if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
                pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
-                        operation == READ ? "read" : "write",
+                        operation == REQ_OP_READ ? "read" : "write",
                         preq.sector_number,
                         preq.sector_number + preq.nr_sects,
                         ring->blkif->vbd.pdevice);
@@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                        bio->bi_private = pending_req;
                        bio->bi_end_io  = end_block_io_op;
                        bio->bi_iter.bi_sector  = preq.sector_number;
+                       bio_set_op_attrs(bio, operation, operation_flags);
                }
 
                preq.sector_number += seg[i].nsec;
@@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
        /* This will be hit if the operation was a flush or discard. */
        if (!bio) {
-               BUG_ON(operation != WRITE_FLUSH);
+               BUG_ON(operation_flags != WRITE_FLUSH);
 
                bio = bio_alloc(GFP_KERNEL, 0);
                if (unlikely(bio == NULL))
@@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                bio->bi_bdev    = preq.bdev;
                bio->bi_private = pending_req;
                bio->bi_end_io  = end_block_io_op;
+               bio_set_op_attrs(bio, operation, operation_flags);
        }
 
        atomic_set(&pending_req->pendcnt, nbio);
        blk_start_plug(&plug);
 
        for (i = 0; i < nbio; i++)
-               submit_bio(operation, biolist[i]);
+               submit_bio(biolist[i]);
 
        /* Let the I/Os go.. */
        blk_finish_plug(&plug);
 
-       if (operation == READ)
+       if (operation == REQ_OP_READ)
                ring->st_rd_sect += preq.nr_sects;
-       else if (operation WRITE)
+       else if (operation == REQ_OP_WRITE)
                ring->st_wr_sect += preq.nr_sects;
 
        return 0;