]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-2.6.39/drivers' of git://git.kernel.dk/linux-2.6-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Mar 2011 03:02:07 +0000 (20:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 28 Mar 2011 03:02:07 +0000 (20:02 -0700)
* 'for-2.6.39/drivers' of git://git.kernel.dk/linux-2.6-block: (122 commits)
  cciss: fix lost command issue
  drbd: need include for bitops functions declarations
  Revert "cciss: Add missing allocation in scsi_cmd_stack_setup and  corresponding deallocation"
  cciss: fix missed command status value CMD_UNABORTABLE
  cciss: remove unnecessary casts
  cciss: Mask off error bits of c->busaddr in cmd_special_free when calling pci_free_consistent
  cciss: Inform controller we are using 32-bit tags.
  cciss: hoist tag masking out of loop
  cciss: Add missing allocation in scsi_cmd_stack_setup and  corresponding deallocation
  cciss: export resettable host attribute
  drbd: drop code present under #ifdef which is relevant to 2.6.28 and below
  drbd: Fixed handling of read errors on a 'VerifyS' node
  drbd: Fixed handling of read errors on a 'VerifyT' node
  drbd: Implemented real timeout checking for request processing time
  drbd: Remove unused function atodb_endio()
  drbd: improve log message if received sector offset exceeds local capacity
  drbd: kill dead code
  drbd: don't BUG_ON, if bio_add_page of a single page to an empty bio fails
  drbd: Removed left over, now wrong comments
  drbd: serialize admin requests for new verify run with pending bitmap io
  ...

21 files changed:
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
drivers/block/cciss.c
drivers/block/cciss.h
drivers/block/cciss_cmd.h
drivers/block/cciss_scsi.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_proc.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_strings.c
drivers/block/drbd/drbd_worker.c
drivers/block/drbd/drbd_wrappers.h
include/linux/drbd.h
include/linux/drbd_limits.h
include/linux/drbd_nl.h
include/linux/drbd_tag_magic.h

index 4f29e5f1ebfa5b19cd7851e6148b80219911e9c2..f5bb0a3bb8c0d5d18bdcfdd2bf8ce70765ed4dc5 100644 (file)
@@ -59,3 +59,15 @@ Kernel Version: 2.6.31
 Contact:       iss_storagedev@hp.com
 Description:   Displays the usage count (number of opens) of logical drive Y
                of controller X.
+
+Where:         /sys/bus/pci/devices/<dev>/ccissX/resettable
+Date:          February 2011
+Kernel Version:        2.6.38
+Contact:       iss_storagedev@hp.com
+Description:   Value of 1 indicates the controller can honor the reset_devices
+               kernel parameter.  Value of 0 indicates reset_devices cannot be
+               honored.  This is to allow, for example, kexec tools to be able
+               to warn the user if they designate an unresettable device as
+               a dump device, as kdump requires resetting the device in order
+               to work reliably.
+
index 35658f445fca4a494071438ad0f4788bab936231..9bf13988f1a2f813ff20d6d65d5c9dd22426579e 100644 (file)
@@ -193,7 +193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
        u64 *cfg_offset);
 static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
        unsigned long *memory_bar);
-
+static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
 
 /* performant mode helper functions */
 static void  calc_bucket_map(int *bucket, int num_buckets, int nsgs,
@@ -231,7 +231,7 @@ static const struct block_device_operations cciss_fops = {
  */
 static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
 {
-       if (likely(h->transMethod == CFGTBL_Trans_Performant))
+       if (likely(h->transMethod & CFGTBL_Trans_Performant))
                c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
 }
 
@@ -556,6 +556,44 @@ static void __devinit cciss_procinit(ctlr_info_t *h)
 #define to_hba(n) container_of(n, struct ctlr_info, dev)
 #define to_drv(n) container_of(n, drive_info_struct, dev)
 
+/* List of controllers which cannot be reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+       0x324a103C, /* Smart Array P712m */
+       0x324b103C, /* SmartArray P711m */
+       0x3223103C, /* Smart Array P800 */
+       0x3234103C, /* Smart Array P400 */
+       0x3235103C, /* Smart Array P400i */
+       0x3211103C, /* Smart Array E200i */
+       0x3212103C, /* Smart Array E200 */
+       0x3213103C, /* Smart Array E200i */
+       0x3214103C, /* Smart Array E200i */
+       0x3215103C, /* Smart Array E200i */
+       0x3237103C, /* Smart Array E500 */
+       0x323D103C, /* Smart Array P700m */
+       0x409C0E11, /* Smart Array 6400 */
+       0x409D0E11, /* Smart Array 6400 EM */
+};
+
+static int ctlr_is_resettable(struct ctlr_info *h)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+               if (unresettable_controller[i] == h->board_id)
+                       return 0;
+       return 1;
+}
+
+static ssize_t host_show_resettable(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct ctlr_info *h = to_hba(dev);
+
+       return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
+}
+static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
+
 static ssize_t host_store_rescan(struct device *dev,
                                 struct device_attribute *attr,
                                 const char *buf, size_t count)
@@ -741,6 +779,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
 
 static struct attribute *cciss_host_attrs[] = {
        &dev_attr_rescan.attr,
+       &dev_attr_resettable.attr,
        NULL
 };
 
@@ -973,8 +1012,8 @@ static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
        temp64.val32.upper = c->ErrDesc.Addr.upper;
        pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
                            c->err_info, (dma_addr_t) temp64.val);
-       pci_free_consistent(h->pdev, sizeof(CommandList_struct),
-                           c, (dma_addr_t) c->busaddr);
+       pci_free_consistent(h->pdev, sizeof(CommandList_struct), c,
+               (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
 }
 
 static inline ctlr_info_t *get_host(struct gendisk *disk)
@@ -1490,8 +1529,7 @@ static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
                return -EINVAL;
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
-       ioc = (BIG_IOCTL_Command_struct *)
-           kmalloc(sizeof(*ioc), GFP_KERNEL);
+       ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
        if (!ioc) {
                status = -ENOMEM;
                goto cleanup1;
@@ -2653,6 +2691,10 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
                        c->Request.CDB[0]);
                return_status = IO_NEEDS_RETRY;
                break;
+       case CMD_UNABORTABLE:
+               dev_warn(&h->pdev->dev, "cmd unabortable\n");
+               return_status = IO_ERROR;
+               break;
        default:
                dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
                       "unknown status %x\n", c->Request.CDB[0],
@@ -3103,6 +3145,13 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
                        (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
                                DID_PASSTHROUGH : DID_ERROR);
                break;
+       case CMD_UNABORTABLE:
+               dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
+               rq->errors = make_status_bytes(SAM_STAT_GOOD,
+                       cmd->err_info->CommandStatus, DRIVER_OK,
+                       cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ?
+                               DID_PASSTHROUGH : DID_ERROR);
+               break;
        default:
                dev_warn(&h->pdev->dev, "cmd %p returned "
                       "unknown status %x\n", cmd,
@@ -3136,10 +3185,13 @@ static inline u32 cciss_tag_to_index(u32 tag)
        return tag >> DIRECT_LOOKUP_SHIFT;
 }
 
-static inline u32 cciss_tag_discard_error_bits(u32 tag)
+static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
 {
-#define CCISS_ERROR_BITS 0x03
-       return tag & ~CCISS_ERROR_BITS;
+#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define CCISS_SIMPLE_ERROR_BITS 0x03
+       if (likely(h->transMethod & CFGTBL_Trans_Performant))
+               return tag & ~CCISS_PERF_ERROR_BITS;
+       return tag & ~CCISS_SIMPLE_ERROR_BITS;
 }
 
 static inline void cciss_mark_tag_indexed(u32 *tag)
@@ -3359,7 +3411,7 @@ static inline u32 next_command(ctlr_info_t *h)
 {
        u32 a;
 
-       if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+       if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
                return h->access.command_completed(h);
 
        if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
@@ -3394,14 +3446,12 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
 /* process completion of a non-indexed command */
 static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
 {
-       u32 tag;
        CommandList_struct *c = NULL;
        __u32 busaddr_masked, tag_masked;
 
-       tag = cciss_tag_discard_error_bits(raw_tag);
+       tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
        list_for_each_entry(c, &h->cmpQ, list) {
-               busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
-               tag_masked = cciss_tag_discard_error_bits(tag);
+               busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
                if (busaddr_masked == tag_masked) {
                        finish_cmd(h, c, raw_tag);
                        return next_command(h);
@@ -3753,7 +3803,8 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
        }
 }
 
-static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
+static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
+       u32 use_short_tags)
 {
        /* This is a bit complicated.  There are 8 registers on
         * the controller which we write to to tell it 8 different
@@ -3808,7 +3859,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
        writel(0, &h->transtable->RepQCtrAddrHigh32);
        writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
        writel(0, &h->transtable->RepQAddr0High32);
-       writel(CFGTBL_Trans_Performant,
+       writel(CFGTBL_Trans_Performant | use_short_tags,
                        &(h->cfgtable->HostWrite.TransportRequest));
 
        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
@@ -3855,7 +3906,8 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
        if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
                goto clean_up;
 
-       cciss_enter_performant_mode(h);
+       cciss_enter_performant_mode(h,
+               trans_support & CFGTBL_Trans_use_short_tags);
 
        /* Change the access methods to the performant access methods */
        h->access = SA5_performant_access;
index 579f7491849304cfc311ed9a9e46580013e116b7..554bbd907d144500817a615702ab6155cb7f7243 100644 (file)
@@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
                        h->ctlr, c->busaddr);
 #endif /* CCISS_DEBUG */
          writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+       readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
         h->commands_outstanding++;
         if ( h->commands_outstanding > h->max_outstanding)
                h->max_outstanding = h->commands_outstanding;
index 35463d2f0ee7ce71d80943105f1594eb556141d4..cd441bef031f1a4806ba29df8067c47b050bfae7 100644 (file)
@@ -56,6 +56,7 @@
 
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
 
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra3   0x00000002l
index 727d0225b7d049e9f529655c081c513267d248bc..df793803f5ae5c70615df404a442d525442184a0 100644 (file)
@@ -824,13 +824,18 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
                        break;
                        case CMD_UNSOLICITED_ABORT:
                                cmd->result = DID_ABORT << 16;
-                               dev_warn(&h->pdev->dev, "%p aborted do to an "
+                               dev_warn(&h->pdev->dev, "%p aborted due to an "
                                        "unsolicited abort\n", c);
                        break;
                        case CMD_TIMEOUT:
                                cmd->result = DID_TIME_OUT << 16;
                                dev_warn(&h->pdev->dev, "%p timedout\n", c);
                        break;
+                       case CMD_UNABORTABLE:
+                               cmd->result = DID_ERROR << 16;
+                               dev_warn(&h->pdev->dev, "c %p command "
+                                       "unabortable\n", c);
+                       break;
                        default:
                                cmd->result = DID_ERROR << 16;
                                dev_warn(&h->pdev->dev,
@@ -1007,11 +1012,15 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
                break;
                case CMD_UNSOLICITED_ABORT:
                        dev_warn(&h->pdev->dev,
-                               "%p aborted do to an unsolicited abort\n", c);
+                               "%p aborted due to an unsolicited abort\n", c);
                break;
                case CMD_TIMEOUT:
                        dev_warn(&h->pdev->dev, "%p timedout\n", c);
                break;
+               case CMD_UNABORTABLE:
+                       dev_warn(&h->pdev->dev,
+                               "%p unabortable\n", c);
+               break;
                default:
                        dev_warn(&h->pdev->dev,
                                "%p returned unknown status %x\n",
index aca302492ff20a25e65ac645a601dbf51ada2f33..2a1642bc451d6cdaffebc8e9c6bf9ba9fa895077 100644 (file)
@@ -92,7 +92,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        bio->bi_end_io = drbd_md_io_complete;
        bio->bi_rw = rw;
 
-       if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+       if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
                bio_endio(bio, -EIO);
        else
                submit_bio(rw, bio);
@@ -176,13 +176,17 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
        struct lc_element *al_ext;
        struct lc_element *tmp;
        unsigned long     al_flags = 0;
+       int wake;
 
        spin_lock_irq(&mdev->al_lock);
        tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
        if (unlikely(tmp != NULL)) {
                struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
                if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
+                       wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
                        spin_unlock_irq(&mdev->al_lock);
+                       if (wake)
+                               wake_up(&mdev->al_wait);
                        return NULL;
                }
        }
@@ -258,6 +262,33 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
        spin_unlock_irqrestore(&mdev->al_lock, flags);
 }
 
+#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
+/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
+ * are still coupled, or assume too much about their relation.
+ * Code below will not work if this is violated.
+ * Will be cleaned up with some followup patch.
+ */
+# error FIXME
+#endif
+
+static unsigned int al_extent_to_bm_page(unsigned int al_enr)
+{
+       return al_enr >>
+               /* bit to page */
+               ((PAGE_SHIFT + 3) -
+               /* al extent number to bit */
+                (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
+}
+
+static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
+{
+       return rs_enr >>
+               /* bit to page */
+               ((PAGE_SHIFT + 3) -
+               /* al extent number to bit */
+                (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
+}
+
 int
 w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
@@ -285,7 +316,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
         * For now, we must not write the transaction,
         * if we cannot write out the bitmap of the evicted extent. */
        if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
-               drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
+               drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
 
        /* The bitmap write may have failed, causing a state change. */
        if (mdev->state.disk < D_INCONSISTENT) {
@@ -334,7 +365,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
                + mdev->ldev->md.al_offset + mdev->al_tr_pos;
 
        if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
-               drbd_chk_io_error(mdev, 1, TRUE);
+               drbd_chk_io_error(mdev, 1, true);
 
        if (++mdev->al_tr_pos >
            div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -511,225 +542,6 @@ cancel:
        return 1;
 }
 
-static void atodb_endio(struct bio *bio, int error)
-{
-       struct drbd_atodb_wait *wc = bio->bi_private;
-       struct drbd_conf *mdev = wc->mdev;
-       struct page *page;
-       int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
-       /* strange behavior of some lower level drivers...
-        * fail the request by clearing the uptodate flag,
-        * but do not return any error?! */
-       if (!error && !uptodate)
-               error = -EIO;
-
-       drbd_chk_io_error(mdev, error, TRUE);
-       if (error && wc->error == 0)
-               wc->error = error;
-
-       if (atomic_dec_and_test(&wc->count))
-               complete(&wc->io_done);
-
-       page = bio->bi_io_vec[0].bv_page;
-       put_page(page);
-       bio_put(bio);
-       mdev->bm_writ_cnt++;
-       put_ldev(mdev);
-}
-
-/* sector to word */
-#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
-
-/* activity log to on disk bitmap -- prepare bio unless that sector
- * is already covered by previously prepared bios */
-static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
-                                       struct bio **bios,
-                                       unsigned int enr,
-                                       struct drbd_atodb_wait *wc) __must_hold(local)
-{
-       struct bio *bio;
-       struct page *page;
-       sector_t on_disk_sector;
-       unsigned int page_offset = PAGE_SIZE;
-       int offset;
-       int i = 0;
-       int err = -ENOMEM;
-
-       /* We always write aligned, full 4k blocks,
-        * so we can ignore the logical_block_size (for now) */
-       enr &= ~7U;
-       on_disk_sector = enr + mdev->ldev->md.md_offset
-                            + mdev->ldev->md.bm_offset;
-
-       D_ASSERT(!(on_disk_sector & 7U));
-
-       /* Check if that enr is already covered by an already created bio.
-        * Caution, bios[] is not NULL terminated,
-        * but only initialized to all NULL.
-        * For completely scattered activity log,
-        * the last invocation iterates over all bios,
-        * and finds the last NULL entry.
-        */
-       while ((bio = bios[i])) {
-               if (bio->bi_sector == on_disk_sector)
-                       return 0;
-               i++;
-       }
-       /* bios[i] == NULL, the next not yet used slot */
-
-       /* GFP_KERNEL, we are not in the write-out path */
-       bio = bio_alloc(GFP_KERNEL, 1);
-       if (bio == NULL)
-               return -ENOMEM;
-
-       if (i > 0) {
-               const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec;
-               page_offset = prev_bv->bv_offset + prev_bv->bv_len;
-               page = prev_bv->bv_page;
-       }
-       if (page_offset == PAGE_SIZE) {
-               page = alloc_page(__GFP_HIGHMEM);
-               if (page == NULL)
-                       goto out_bio_put;
-               page_offset = 0;
-       } else {
-               get_page(page);
-       }
-
-       offset = S2W(enr);
-       drbd_bm_get_lel(mdev, offset,
-                       min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset),
-                       kmap(page) + page_offset);
-       kunmap(page);
-
-       bio->bi_private = wc;
-       bio->bi_end_io = atodb_endio;
-       bio->bi_bdev = mdev->ldev->md_bdev;
-       bio->bi_sector = on_disk_sector;
-
-       if (bio_add_page(bio, page, 4096, page_offset) != 4096)
-               goto out_put_page;
-
-       atomic_inc(&wc->count);
-       /* we already know that we may do this...
-        * get_ldev_if_state(mdev,D_ATTACHING);
-        * just get the extra reference, so that the local_cnt reflects
-        * the number of pending IO requests DRBD at its backing device.
-        */
-       atomic_inc(&mdev->local_cnt);
-
-       bios[i] = bio;
-
-       return 0;
-
-out_put_page:
-       err = -EINVAL;
-       put_page(page);
-out_bio_put:
-       bio_put(bio);
-       return err;
-}
-
-/**
- * drbd_al_to_on_disk_bm() -  * Writes bitmap parts covered by active AL extents
- * @mdev:      DRBD device.
- *
- * Called when we detach (unconfigure) local storage,
- * or when we go from R_PRIMARY to R_SECONDARY role.
- */
-void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
-{
-       int i, nr_elements;
-       unsigned int enr;
-       struct bio **bios;
-       struct drbd_atodb_wait wc;
-
-       ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING))
-               return; /* sorry, I don't have any act_log etc... */
-
-       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
-
-       nr_elements = mdev->act_log->nr_elements;
-
-       /* GFP_KERNEL, we are not in anyone's write-out path */
-       bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL);
-       if (!bios)
-               goto submit_one_by_one;
-
-       atomic_set(&wc.count, 0);
-       init_completion(&wc.io_done);
-       wc.mdev = mdev;
-       wc.error = 0;
-
-       for (i = 0; i < nr_elements; i++) {
-               enr = lc_element_by_index(mdev->act_log, i)->lc_number;
-               if (enr == LC_FREE)
-                       continue;
-               /* next statement also does atomic_inc wc.count and local_cnt */
-               if (atodb_prepare_unless_covered(mdev, bios,
-                                               enr/AL_EXT_PER_BM_SECT,
-                                               &wc))
-                       goto free_bios_submit_one_by_one;
-       }
-
-       /* unnecessary optimization? */
-       lc_unlock(mdev->act_log);
-       wake_up(&mdev->al_wait);
-
-       /* all prepared, submit them */
-       for (i = 0; i < nr_elements; i++) {
-               if (bios[i] == NULL)
-                       break;
-               if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) {
-                       bios[i]->bi_rw = WRITE;
-                       bio_endio(bios[i], -EIO);
-               } else {
-                       submit_bio(WRITE, bios[i]);
-               }
-       }
-
-       /* always (try to) flush bitmap to stable storage */
-       drbd_md_flush(mdev);
-
-       /* In case we did not submit a single IO do not wait for
-        * them to complete. ( Because we would wait forever here. )
-        *
-        * In case we had IOs and they are already complete, there
-        * is not point in waiting anyways.
-        * Therefore this if () ... */
-       if (atomic_read(&wc.count))
-               wait_for_completion(&wc.io_done);
-
-       put_ldev(mdev);
-
-       kfree(bios);
-       return;
-
- free_bios_submit_one_by_one:
-       /* free everything by calling the endio callback directly. */
-       for (i = 0; i < nr_elements && bios[i]; i++)
-               bio_endio(bios[i], 0);
-
-       kfree(bios);
-
- submit_one_by_one:
-       dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n");
-
-       for (i = 0; i < mdev->act_log->nr_elements; i++) {
-               enr = lc_element_by_index(mdev->act_log, i)->lc_number;
-               if (enr == LC_FREE)
-                       continue;
-               /* Really slow: if we have al-extents 16..19 active,
-                * sector 4 will be written four times! Synchronous! */
-               drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT);
-       }
-
-       lc_unlock(mdev->act_log);
-       wake_up(&mdev->al_wait);
-       put_ldev(mdev);
-}
-
 /**
  * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
  * @mdev:      DRBD device.
@@ -809,7 +621,7 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
                return 1;
        }
 
-       drbd_bm_write_sect(mdev, udw->enr);
+       drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
        put_ldev(mdev);
 
        kfree(udw);
@@ -889,7 +701,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
                                dev_warn(DEV, "Kicking resync_lru element enr=%u "
                                     "out with rs_failed=%d\n",
                                     ext->lce.lc_number, ext->rs_failed);
-                               set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
                        }
                        ext->rs_left = rs_left;
                        ext->rs_failed = success ? 0 : count;
@@ -908,7 +719,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
                                drbd_queue_work_front(&mdev->data.work, &udw->w);
                        } else {
                                dev_warn(DEV, "Could not kmalloc an udw\n");
-                               set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
                        }
                }
        } else {
@@ -919,6 +729,22 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
        }
 }
 
+void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
+{
+       unsigned long now = jiffies;
+       unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
+       int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
+       if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
+               if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
+                   mdev->state.conn != C_PAUSED_SYNC_T &&
+                   mdev->state.conn != C_PAUSED_SYNC_S) {
+                       mdev->rs_mark_time[next] = now;
+                       mdev->rs_mark_left[next] = still_to_go;
+                       mdev->rs_last_mark = next;
+               }
+       }
+}
+
 /* clear the bit corresponding to the piece of storage in question:
  * size byte of data starting from sector.  Only clear a bits of the affected
  * one ore more _aligned_ BM_BLOCK_SIZE blocks.
@@ -936,7 +762,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
        int wake_up = 0;
        unsigned long flags;
 
-       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
                dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
                                (unsigned long long)sector, size);
                return;
@@ -969,21 +795,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
         */
        count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
        if (count && get_ldev(mdev)) {
-               unsigned long now = jiffies;
-               unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
-               int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
-               if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
-                       unsigned long tw = drbd_bm_total_weight(mdev);
-                       if (mdev->rs_mark_left[mdev->rs_last_mark] != tw &&
-                           mdev->state.conn != C_PAUSED_SYNC_T &&
-                           mdev->state.conn != C_PAUSED_SYNC_S) {
-                               mdev->rs_mark_time[next] = now;
-                               mdev->rs_mark_left[next] = tw;
-                               mdev->rs_last_mark = next;
-                       }
-               }
+               drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
                spin_lock_irqsave(&mdev->al_lock, flags);
-               drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
+               drbd_try_clear_on_disk_bm(mdev, sector, count, true);
                spin_unlock_irqrestore(&mdev->al_lock, flags);
 
                /* just wake_up unconditional now, various lc_chaged(),
@@ -998,27 +812,27 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
 /*
  * this is intended to set one request worth of data out of sync.
  * affects at least 1 bit,
- * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits.
+ * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
  *
  * called by tl_clear and drbd_send_dblock (==drbd_make_request).
  * so this can be _any_ process.
  */
-void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
+int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
                            const char *file, const unsigned int line)
 {
        unsigned long sbnr, ebnr, lbnr, flags;
        sector_t esector, nr_sectors;
-       unsigned int enr, count;
+       unsigned int enr, count = 0;
        struct lc_element *e;
 
-       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
                dev_err(DEV, "sector: %llus, size: %d\n",
                        (unsigned long long)sector, size);
-               return;
+               return 0;
        }
 
        if (!get_ldev(mdev))
-               return; /* no disk, no metadata, no bitmap to set bits in */
+               return 0; /* no disk, no metadata, no bitmap to set bits in */
 
        nr_sectors = drbd_get_capacity(mdev->this_bdev);
        esector = sector + (size >> 9) - 1;
@@ -1048,6 +862,8 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
 
 out:
        put_ldev(mdev);
+
+       return count;
 }
 
 static
@@ -1128,7 +944,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
        unsigned int enr = BM_SECT_TO_EXT(sector);
        struct bm_extent *bm_ext;
        int i, sig;
+       int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
+                        200 times -> 20 seconds. */
 
+retry:
        sig = wait_event_interruptible(mdev->al_wait,
                        (bm_ext = _bme_get(mdev, enr)));
        if (sig)
@@ -1139,16 +958,25 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
 
        for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
                sig = wait_event_interruptible(mdev->al_wait,
-                               !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i));
-               if (sig) {
+                                              !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
+                                              test_bit(BME_PRIORITY, &bm_ext->flags));
+
+               if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
                        spin_lock_irq(&mdev->al_lock);
                        if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
-                               clear_bit(BME_NO_WRITES, &bm_ext->flags);
+                               bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
                                mdev->resync_locked--;
                                wake_up(&mdev->al_wait);
                        }
                        spin_unlock_irq(&mdev->al_lock);
-                       return -EINTR;
+                       if (sig)
+                               return -EINTR;
+                       if (schedule_timeout_interruptible(HZ/10))
+                               return -EINTR;
+                       if (sa && --sa == 0)
+                               dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
+                                        "Resync stalled?\n");
+                       goto retry;
                }
        }
        set_bit(BME_LOCKED, &bm_ext->flags);
@@ -1291,8 +1119,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
        }
 
        if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
-               clear_bit(BME_LOCKED, &bm_ext->flags);
-               clear_bit(BME_NO_WRITES, &bm_ext->flags);
+               bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
                mdev->resync_locked--;
                wake_up(&mdev->al_wait);
        }
@@ -1383,7 +1210,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
        sector_t esector, nr_sectors;
        int wake_up = 0;
 
-       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
                dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
                                (unsigned long long)sector, size);
                return;
@@ -1420,7 +1247,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
                mdev->rs_failed += count;
 
                if (get_ldev(mdev)) {
-                       drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
+                       drbd_try_clear_on_disk_bm(mdev, sector, count, false);
                        put_ldev(mdev);
                }
 
index 0645ca829a94163c57f4ee5ac43e96aaa344870d..f0ae63d2df65531447d23a50c8cf4185f724e1bf 100644 (file)
 #include <linux/drbd.h>
 #include <linux/slab.h>
 #include <asm/kmap_types.h>
+
+#include <asm-generic/bitops/le.h>
+
 #include "drbd_int.h"
 
+
 /* OPAQUE outside this file!
  * interface defined in drbd_int.h
 
  * convention:
  * function name drbd_bm_... => used elsewhere, "public".
  * function name      bm_... => internal to implementation, "private".
+ */
+
+
+/*
+ * LIMITATIONS:
+ * We want to support >= peta byte of backend storage, while for now still using
+ * a granularity of one bit per 4KiB of storage.
+ * 1 << 50             bytes backend storage (1 PiB)
+ * 1 << (50 - 12)      bits needed
+ *     38 --> we need u64 to index and count bits
+ * 1 << (38 - 3)       bitmap bytes needed
+ *     35 --> we still need u64 to index and count bytes
+ *                     (that's 32 GiB of bitmap for 1 PiB storage)
+ * 1 << (35 - 2)       32bit longs needed
+ *     33 --> we'd even need u64 to index and count 32bit long words.
+ * 1 << (35 - 3)       64bit longs needed
+ *     32 --> we could get away with a 32bit unsigned int to index and count
+ *     64bit long words, but I rather stay with unsigned long for now.
+ *     We probably should neither count nor point to bytes or long words
+ *     directly, but either by bitnumber, or by page index and offset.
+ * 1 << (35 - 12)
+ *     22 --> we need that much 4KiB pages of bitmap.
+ *     1 << (22 + 3) --> on a 64bit arch,
+ *     we need 32 MiB to store the array of page pointers.
+ *
+ * Because I'm lazy, and because the resulting patch was too large, too ugly
+ * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
+ * (1 << 32) bits * 4k storage.
+ *
 
- * Note that since find_first_bit returns int, at the current granularity of
- * the bitmap (4KB per byte), this implementation "only" supports up to
- * 1<<(32+12) == 16 TB...
+ * bitmap storage and IO:
+ *     Bitmap is stored little endian on disk, and is kept little endian in
+ *     core memory. Currently we still hold the full bitmap in core as long
+ *     as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
+ *     seems excessive.
+ *
+ *     We plan to reduce the amount of in-core bitmap pages by pageing them in
+ *     and out against their on-disk location as necessary, but need to make
+ *     sure we don't cause too much meta data IO, and must not deadlock in
+ *     tight memory situations. This needs some more work.
  */
 
 /*
 struct drbd_bitmap {
        struct page **bm_pages;
        spinlock_t bm_lock;
-       /* WARNING unsigned long bm_*:
-        * 32bit number of bit offset is just enough for 512 MB bitmap.
-        * it will blow up if we make the bitmap bigger...
-        * not that it makes much sense to have a bitmap that large,
-        * rather change the granularity to 16k or 64k or something.
-        * (that implies other problems, however...)
-        */
+
+       /* see LIMITATIONS: above */
+
        unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
        unsigned long bm_bits;
        size_t   bm_words;
@@ -69,29 +105,18 @@ struct drbd_bitmap {
        sector_t bm_dev_capacity;
        struct mutex bm_change; /* serializes resize operations */
 
-       atomic_t bm_async_io;
-       wait_queue_head_t bm_io_wait;
+       wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
 
-       unsigned long  bm_flags;
+       enum bm_flag bm_flags;
 
        /* debugging aid, in case we are still racy somewhere */
        char          *bm_why;
        struct task_struct *bm_task;
 };
 
-/* definition of bits in bm_flags */
-#define BM_LOCKED       0
-#define BM_MD_IO_ERROR  1
-#define BM_P_VMALLOCED  2
-
 static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
                               unsigned long e, int val, const enum km_type km);
 
-static int bm_is_locked(struct drbd_bitmap *b)
-{
-       return test_bit(BM_LOCKED, &b->bm_flags);
-}
-
 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
 static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
 {
@@ -108,7 +133,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
            b->bm_task == mdev->worker.task   ? "worker"   : "?");
 }
 
-void drbd_bm_lock(struct drbd_conf *mdev, char *why)
+void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
 {
        struct drbd_bitmap *b = mdev->bitmap;
        int trylock_failed;
@@ -131,8 +156,9 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why)
                    b->bm_task == mdev->worker.task   ? "worker"   : "?");
                mutex_lock(&b->bm_change);
        }
-       if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
+       if (BM_LOCKED_MASK & b->bm_flags)
                dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
+       b->bm_flags |= flags & BM_LOCKED_MASK;
 
        b->bm_why  = why;
        b->bm_task = current;
@@ -146,31 +172,137 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
                return;
        }
 
-       if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags))
+       if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
                dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
 
+       b->bm_flags &= ~BM_LOCKED_MASK;
        b->bm_why  = NULL;
        b->bm_task = NULL;
        mutex_unlock(&b->bm_change);
 }
 
-/* word offset to long pointer */
-static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km)
+/* we store some "meta" info about our pages in page->private */
+/* at a granularity of 4k storage per bitmap bit:
+ * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
+ *  1<<38 bits,
+ *  1<<23 4k bitmap pages.
+ * Use 24 bits as page index, covers 2 peta byte storage
+ * at a granularity of 4k per bit.
+ * Used to report the failed page idx on io error from the endio handlers.
+ */
+#define BM_PAGE_IDX_MASK       ((1UL<<24)-1)
+/* this page is currently read in, or written back */
+#define BM_PAGE_IO_LOCK                31
+/* if there has been an IO error for this page */
+#define BM_PAGE_IO_ERROR       30
+/* this is to be able to intelligently skip disk IO,
+ * set if bits have been set since last IO. */
+#define BM_PAGE_NEED_WRITEOUT  29
+/* to mark for lazy writeout once syncer cleared all clearable bits,
+ * we if bits have been cleared since last IO. */
+#define BM_PAGE_LAZY_WRITEOUT  28
+
+/* store_page_idx uses non-atomic assingment. It is only used directly after
+ * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
+ * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
+ * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
+ * requires it all to be atomic as well. */
+static void bm_store_page_idx(struct page *page, unsigned long idx)
 {
-       struct page *page;
-       unsigned long page_nr;
+       BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
+       page_private(page) |= idx;
+}
+
+static unsigned long bm_page_to_idx(struct page *page)
+{
+       return page_private(page) & BM_PAGE_IDX_MASK;
+}
+
+/* As is very unlikely that the same page is under IO from more than one
+ * context, we can get away with a bit per page and one wait queue per bitmap.
+ */
+static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       void *addr = &page_private(b->bm_pages[page_nr]);
+       wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
+}
+
+static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       void *addr = &page_private(b->bm_pages[page_nr]);
+       clear_bit(BM_PAGE_IO_LOCK, addr);
+       smp_mb__after_clear_bit();
+       wake_up(&mdev->bitmap->bm_io_wait);
+}
+
+/* set _before_ submit_io, so it may be reset due to being changed
+ * while this page is in flight... will get submitted later again */
+static void bm_set_page_unchanged(struct page *page)
+{
+       /* use cmpxchg? */
+       clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
+       clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
+}
 
+static void bm_set_page_need_writeout(struct page *page)
+{
+       set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
+}
+
+static int bm_test_page_unchanged(struct page *page)
+{
+       volatile const unsigned long *addr = &page_private(page);
+       return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
+}
+
+static void bm_set_page_io_err(struct page *page)
+{
+       set_bit(BM_PAGE_IO_ERROR, &page_private(page));
+}
+
+static void bm_clear_page_io_err(struct page *page)
+{
+       clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
+}
+
+static void bm_set_page_lazy_writeout(struct page *page)
+{
+       set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
+}
+
+static int bm_test_page_lazy_writeout(struct page *page)
+{
+       return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
+}
+
+/* on a 32bit box, this would allow for exactly (2<<38) bits. */
+static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
+{
        /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
-       page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
+       unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
        BUG_ON(page_nr >= b->bm_number_of_pages);
-       page = b->bm_pages[page_nr];
+       return page_nr;
+}
 
+static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
+{
+       /* page_nr = (bitnr/8) >> PAGE_SHIFT; */
+       unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
+       BUG_ON(page_nr >= b->bm_number_of_pages);
+       return page_nr;
+}
+
+static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
+{
+       struct page *page = b->bm_pages[idx];
        return (unsigned long *) kmap_atomic(page, km);
 }
 
-static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset)
+static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
 {
-       return __bm_map_paddr(b, offset, KM_IRQ1);
+       return __bm_map_pidx(b, idx, KM_IRQ1);
 }
 
 static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
@@ -202,6 +334,7 @@ static void bm_unmap(unsigned long *p_addr)
  * to be able to report device specific.
  */
 
+
 static void bm_free_pages(struct page **pages, unsigned long number)
 {
        unsigned long i;
@@ -269,6 +402,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
                                bm_vk_free(new_pages, vmalloced);
                                return NULL;
                        }
+                       /* we want to know which page it is
+                        * from the endio handlers */
+                       bm_store_page_idx(page, i);
                        new_pages[i] = page;
                }
        } else {
@@ -280,9 +416,9 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
        }
 
        if (vmalloced)
-               set_bit(BM_P_VMALLOCED, &b->bm_flags);
+               b->bm_flags |= BM_P_VMALLOCED;
        else
-               clear_bit(BM_P_VMALLOCED, &b->bm_flags);
+               b->bm_flags &= ~BM_P_VMALLOCED;
 
        return new_pages;
 }
@@ -319,7 +455,7 @@ void drbd_bm_cleanup(struct drbd_conf *mdev)
 {
        ERR_IF (!mdev->bitmap) return;
        bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
-       bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags));
+       bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
        kfree(mdev->bitmap);
        mdev->bitmap = NULL;
 }
@@ -329,22 +465,39 @@ void drbd_bm_cleanup(struct drbd_conf *mdev)
  * this masks out the remaining bits.
  * Returns the number of bits cleared.
  */
+#define BITS_PER_PAGE          (1UL << (PAGE_SHIFT + 3))
+#define BITS_PER_PAGE_MASK     (BITS_PER_PAGE - 1)
+#define BITS_PER_LONG_MASK     (BITS_PER_LONG - 1)
 static int bm_clear_surplus(struct drbd_bitmap *b)
 {
-       const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
-       size_t w = b->bm_bits >> LN2_BPL;
-       int cleared = 0;
+       unsigned long mask;
        unsigned long *p_addr, *bm;
+       int tmp;
+       int cleared = 0;
 
-       p_addr = bm_map_paddr(b, w);
-       bm = p_addr + MLPP(w);
-       if (w < b->bm_words) {
+       /* number of bits modulo bits per page */
+       tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
+       /* mask the used bits of the word containing the last bit */
+       mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
+       /* bitmap is always stored little endian,
+        * on disk and in core memory alike */
+       mask = cpu_to_lel(mask);
+
+       p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
+       bm = p_addr + (tmp/BITS_PER_LONG);
+       if (mask) {
+               /* If mask != 0, we are not exactly aligned, so bm now points
+                * to the long containing the last bit.
+                * If mask == 0, bm already points to the word immediately
+                * after the last (long word aligned) bit. */
                cleared = hweight_long(*bm & ~mask);
                *bm &= mask;
-               w++; bm++;
+               bm++;
        }
 
-       if (w < b->bm_words) {
+       if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
+               /* on a 32bit arch, we may need to zero out
+                * a padding long to align with a 64bit remote */
                cleared += hweight_long(*bm);
                *bm = 0;
        }
@@ -354,66 +507,75 @@ static int bm_clear_surplus(struct drbd_bitmap *b)
 
 static void bm_set_surplus(struct drbd_bitmap *b)
 {
-       const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
-       size_t w = b->bm_bits >> LN2_BPL;
+       unsigned long mask;
        unsigned long *p_addr, *bm;
-
-       p_addr = bm_map_paddr(b, w);
-       bm = p_addr + MLPP(w);
-       if (w < b->bm_words) {
+       int tmp;
+
+       /* number of bits modulo bits per page */
+       tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
+       /* mask the used bits of the word containing the last bit */
+       mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
+       /* bitmap is always stored little endian,
+        * on disk and in core memory alike */
+       mask = cpu_to_lel(mask);
+
+       p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
+       bm = p_addr + (tmp/BITS_PER_LONG);
+       if (mask) {
+               /* If mask != 0, we are not exactly aligned, so bm now points
+                * to the long containing the last bit.
+                * If mask == 0, bm already points to the word immediately
+                * after the last (long word aligned) bit. */
                *bm |= ~mask;
-               bm++; w++;
+               bm++;
        }
 
-       if (w < b->bm_words) {
-               *bm = ~(0UL);
+       if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
+               /* on a 32bit arch, we may need to zero out
+                * a padding long to align with a 64bit remote */
+               *bm = ~0UL;
        }
        bm_unmap(p_addr);
 }
 
-static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian)
+/* you better not modify the bitmap while this is running,
+ * or its results will be stale */
+static unsigned long bm_count_bits(struct drbd_bitmap *b)
 {
-       unsigned long *p_addr, *bm, offset = 0;
+       unsigned long *p_addr;
        unsigned long bits = 0;
-       unsigned long i, do_now;
-
-       while (offset < b->bm_words) {
-               i = do_now = min_t(size_t, b->bm_words-offset, LWPP);
-               p_addr = __bm_map_paddr(b, offset, KM_USER0);
-               bm = p_addr + MLPP(offset);
-               while (i--) {
-#ifndef __LITTLE_ENDIAN
-                       if (swap_endian)
-                               *bm = lel_to_cpu(*bm);
-#endif
-                       bits += hweight_long(*bm++);
-               }
+       unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
+       int idx, i, last_word;
+
+       /* all but last page */
+       for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
+               p_addr = __bm_map_pidx(b, idx, KM_USER0);
+               for (i = 0; i < LWPP; i++)
+                       bits += hweight_long(p_addr[i]);
                __bm_unmap(p_addr, KM_USER0);
-               offset += do_now;
                cond_resched();
        }
-
+       /* last (or only) page */
+       last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
+       p_addr = __bm_map_pidx(b, idx, KM_USER0);
+       for (i = 0; i < last_word; i++)
+               bits += hweight_long(p_addr[i]);
+       p_addr[last_word] &= cpu_to_lel(mask);
+       bits += hweight_long(p_addr[last_word]);
+       /* 32bit arch, may have an unused padding long */
+       if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
+               p_addr[last_word+1] = 0;
+       __bm_unmap(p_addr, KM_USER0);
        return bits;
 }
 
-static unsigned long bm_count_bits(struct drbd_bitmap *b)
-{
-       return __bm_count_bits(b, 0);
-}
-
-static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b)
-{
-       return __bm_count_bits(b, 1);
-}
-
 /* offset and len in long words.*/
 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
 {
        unsigned long *p_addr, *bm;
+       unsigned int idx;
        size_t do_now, end;
 
-#define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512)
-
        end = offset + len;
 
        if (end > b->bm_words) {
@@ -423,15 +585,16 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
 
        while (offset < end) {
                do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
-               p_addr = bm_map_paddr(b, offset);
+               idx = bm_word_to_page_idx(b, offset);
+               p_addr = bm_map_pidx(b, idx);
                bm = p_addr + MLPP(offset);
                if (bm+do_now > p_addr + LWPP) {
                        printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
                               p_addr, bm, (int)do_now);
-                       break; /* breaks to after catch_oob_access_end() only! */
-               }
-               memset(bm, c, do_now * sizeof(long));
+               } else
+                       memset(bm, c, do_now * sizeof(long));
                bm_unmap(p_addr);
+               bm_set_page_need_writeout(b->bm_pages[idx]);
                offset += do_now;
        }
 }
@@ -447,7 +610,7 @@ static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
 int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
 {
        struct drbd_bitmap *b = mdev->bitmap;
-       unsigned long bits, words, owords, obits, *p_addr, *bm;
+       unsigned long bits, words, owords, obits;
        unsigned long want, have, onpages; /* number of pages */
        struct page **npages, **opages = NULL;
        int err = 0, growing;
@@ -455,7 +618,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
 
        ERR_IF(!b) return -ENOMEM;
 
-       drbd_bm_lock(mdev, "resize");
+       drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
 
        dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
                        (unsigned long long)capacity);
@@ -463,7 +626,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
        if (capacity == b->bm_dev_capacity)
                goto out;
 
-       opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags);
+       opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
 
        if (capacity == 0) {
                spin_lock_irq(&b->bm_lock);
@@ -491,18 +654,23 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
        words = ALIGN(bits, 64) >> LN2_BPL;
 
        if (get_ldev(mdev)) {
-               D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12));
+               u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
                put_ldev(mdev);
+               if (bits > bits_on_disk) {
+                       dev_info(DEV, "bits = %lu\n", bits);
+                       dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
+                       err = -ENOSPC;
+                       goto out;
+               }
        }
 
-       /* one extra long to catch off by one errors */
-       want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
+       want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
        have = b->bm_number_of_pages;
        if (want == have) {
                D_ASSERT(b->bm_pages != NULL);
                npages = b->bm_pages;
        } else {
-               if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC))
+               if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
                        npages = NULL;
                else
                        npages = bm_realloc_pages(b, want);
@@ -542,11 +710,6 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
                bm_free_pages(opages + want, have - want);
        }
 
-       p_addr = bm_map_paddr(b, words);
-       bm = p_addr + MLPP(words);
-       *bm = DRBD_MAGIC;
-       bm_unmap(p_addr);
-
        (void)bm_clear_surplus(b);
 
        spin_unlock_irq(&b->bm_lock);
@@ -554,7 +717,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
                bm_vk_free(opages, opages_vmalloced);
        if (!growing)
                b->bm_set = bm_count_bits(b);
-       dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words);
+       dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
 
  out:
        drbd_bm_unlock(mdev);
@@ -624,6 +787,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
        struct drbd_bitmap *b = mdev->bitmap;
        unsigned long *p_addr, *bm;
        unsigned long word, bits;
+       unsigned int idx;
        size_t end, do_now;
 
        end = offset + number;
@@ -638,16 +802,18 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
        spin_lock_irq(&b->bm_lock);
        while (offset < end) {
                do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
-               p_addr = bm_map_paddr(b, offset);
+               idx = bm_word_to_page_idx(b, offset);
+               p_addr = bm_map_pidx(b, idx);
                bm = p_addr + MLPP(offset);
                offset += do_now;
                while (do_now--) {
                        bits = hweight_long(*bm);
-                       word = *bm | lel_to_cpu(*buffer++);
+                       word = *bm | *buffer++;
                        *bm++ = word;
                        b->bm_set += hweight_long(word) - bits;
                }
                bm_unmap(p_addr);
+               bm_set_page_need_writeout(b->bm_pages[idx]);
        }
        /* with 32bit <-> 64bit cross-platform connect
         * this is only correct for current usage,
@@ -656,7 +822,6 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
         */
        if (end == b->bm_words)
                b->bm_set -= bm_clear_surplus(b);
-
        spin_unlock_irq(&b->bm_lock);
 }
 
@@ -686,11 +851,11 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
        else {
                while (offset < end) {
                        do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
-                       p_addr = bm_map_paddr(b, offset);
+                       p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
                        bm = p_addr + MLPP(offset);
                        offset += do_now;
                        while (do_now--)
-                               *buffer++ = cpu_to_lel(*bm++);
+                               *buffer++ = *bm++;
                        bm_unmap(p_addr);
                }
        }
@@ -724,9 +889,22 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
        spin_unlock_irq(&b->bm_lock);
 }
 
+struct bm_aio_ctx {
+       struct drbd_conf *mdev;
+       atomic_t in_flight;
+       struct completion done;
+       unsigned flags;
+#define BM_AIO_COPY_PAGES      1
+       int error;
+};
+
+/* bv_page may be a copy, or may be the original */
 static void bm_async_io_complete(struct bio *bio, int error)
 {
-       struct drbd_bitmap *b = bio->bi_private;
+       struct bm_aio_ctx *ctx = bio->bi_private;
+       struct drbd_conf *mdev = ctx->mdev;
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
        int uptodate = bio_flagged(bio, BIO_UPTODATE);
 
 
@@ -737,38 +915,83 @@ static void bm_async_io_complete(struct bio *bio, int error)
        if (!error && !uptodate)
                error = -EIO;
 
+       if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
+           !bm_test_page_unchanged(b->bm_pages[idx]))
+               dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
+
        if (error) {
-               /* doh. what now?
-                * for now, set all bits, and flag MD_IO_ERROR */
-               __set_bit(BM_MD_IO_ERROR, &b->bm_flags);
+               /* ctx error will hold the completed-last non-zero error code,
+                * in case error codes differ. */
+               ctx->error = error;
+               bm_set_page_io_err(b->bm_pages[idx]);
+               /* Not identical to on disk version of it.
+                * Is BM_PAGE_IO_ERROR enough? */
+               if (__ratelimit(&drbd_ratelimit_state))
+                       dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
+                                       error, idx);
+       } else {
+               bm_clear_page_io_err(b->bm_pages[idx]);
+               dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
        }
-       if (atomic_dec_and_test(&b->bm_async_io))
-               wake_up(&b->bm_io_wait);
+
+       bm_page_unlock_io(mdev, idx);
+
+       /* FIXME give back to page pool */
+       if (ctx->flags & BM_AIO_COPY_PAGES)
+               put_page(bio->bi_io_vec[0].bv_page);
 
        bio_put(bio);
+
+       if (atomic_dec_and_test(&ctx->in_flight))
+               complete(&ctx->done);
 }
 
-static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local)
+static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
 {
        /* we are process context. we always get a bio */
        struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+       struct drbd_conf *mdev = ctx->mdev;
+       struct drbd_bitmap *b = mdev->bitmap;
+       struct page *page;
        unsigned int len;
+
        sector_t on_disk_sector =
                mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
        on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
 
        /* this might happen with very small
-        * flexible external meta data device */
+        * flexible external meta data device,
+        * or with PAGE_SIZE > 4k */
        len = min_t(unsigned int, PAGE_SIZE,
                (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
 
+       /* serialize IO on this page */
+       bm_page_lock_io(mdev, page_nr);
+       /* before memcpy and submit,
+        * so it can be redirtied any time */
+       bm_set_page_unchanged(b->bm_pages[page_nr]);
+
+       if (ctx->flags & BM_AIO_COPY_PAGES) {
+               /* FIXME alloc_page is good enough for now, but actually needs
+                * to use pre-allocated page pool */
+               void *src, *dest;
+               page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+               dest = kmap_atomic(page, KM_USER0);
+               src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
+               memcpy(dest, src, PAGE_SIZE);
+               kunmap_atomic(src, KM_USER1);
+               kunmap_atomic(dest, KM_USER0);
+               bm_store_page_idx(page, page_nr);
+       } else
+               page = b->bm_pages[page_nr];
+
        bio->bi_bdev = mdev->ldev->md_bdev;
        bio->bi_sector = on_disk_sector;
-       bio_add_page(bio, b->bm_pages[page_nr], len, 0);
-       bio->bi_private = b;
+       bio_add_page(bio, page, len, 0);
+       bio->bi_private = ctx;
        bio->bi_end_io = bm_async_io_complete;
 
-       if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
+       if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
                bio->bi_rw |= rw;
                bio_endio(bio, -EIO);
        } else {
@@ -776,87 +999,84 @@ static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int
        }
 }
 
-# if defined(__LITTLE_ENDIAN)
-       /* nothing to do, on disk == in memory */
-# define bm_cpu_to_lel(x) ((void)0)
-# else
-static void bm_cpu_to_lel(struct drbd_bitmap *b)
-{
-       /* need to cpu_to_lel all the pages ...
-        * this may be optimized by using
-        * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0;
-        * the following is still not optimal, but better than nothing */
-       unsigned int i;
-       unsigned long *p_addr, *bm;
-       if (b->bm_set == 0) {
-               /* no page at all; avoid swap if all is 0 */
-               i = b->bm_number_of_pages;
-       } else if (b->bm_set == b->bm_bits) {
-               /* only the last page */
-               i = b->bm_number_of_pages - 1;
-       } else {
-               /* all pages */
-               i = 0;
-       }
-       for (; i < b->bm_number_of_pages; i++) {
-               p_addr = kmap_atomic(b->bm_pages[i], KM_USER0);
-               for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++)
-                       *bm = cpu_to_lel(*bm);
-               kunmap_atomic(p_addr, KM_USER0);
-       }
-}
-# endif
-/* lel_to_cpu == cpu_to_lel */
-# define bm_lel_to_cpu(x) bm_cpu_to_lel(x)
-
 /*
  * bm_rw: read/write the whole bitmap from/to its on disk location.
  */
-static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
 {
+       struct bm_aio_ctx ctx = {
+               .mdev = mdev,
+               .in_flight = ATOMIC_INIT(1),
+               .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+               .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
+       };
        struct drbd_bitmap *b = mdev->bitmap;
-       /* sector_t sector; */
-       int bm_words, num_pages, i;
+       int num_pages, i, count = 0;
        unsigned long now;
        char ppb[10];
        int err = 0;
 
-       WARN_ON(!bm_is_locked(b));
-
-       /* no spinlock here, the drbd_bm_lock should be enough! */
-
-       bm_words  = drbd_bm_words(mdev);
-       num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT;
+       /*
+        * We are protected against bitmap disappearing/resizing by holding an
+        * ldev reference (caller must have called get_ldev()).
+        * For read/write, we are protected against changes to the bitmap by
+        * the bitmap lock (see drbd_bitmap_io).
+        * For lazy writeout, we don't care for ongoing changes to the bitmap,
+        * as we submit copies of pages anyways.
+        */
+       if (!ctx.flags)
+               WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
 
-       /* on disk bitmap is little endian */
-       if (rw == WRITE)
-               bm_cpu_to_lel(b);
+       num_pages = b->bm_number_of_pages;
 
        now = jiffies;
-       atomic_set(&b->bm_async_io, num_pages);
-       __clear_bit(BM_MD_IO_ERROR, &b->bm_flags);
 
        /* let the layers below us try to merge these bios... */
-       for (i = 0; i < num_pages; i++)
-               bm_page_io_async(mdev, b, i, rw);
+       for (i = 0; i < num_pages; i++) {
+               /* ignore completely unchanged pages */
+               if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
+                       break;
+               if (rw & WRITE) {
+                       if (bm_test_page_unchanged(b->bm_pages[i])) {
+                               dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
+                               continue;
+                       }
+                       /* during lazy writeout,
+                        * ignore those pages not marked for lazy writeout. */
+                       if (lazy_writeout_upper_idx &&
+                           !bm_test_page_lazy_writeout(b->bm_pages[i])) {
+                               dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
+                               continue;
+                       }
+               }
+               atomic_inc(&ctx.in_flight);
+               bm_page_io_async(&ctx, i, rw);
+               ++count;
+               cond_resched();
+       }
 
-       wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
+       /*
+        * We initialize ctx.in_flight to one to make sure bm_async_io_complete
+        * will not complete() early, and decrement / test it here.  If there
+        * are still some bios in flight, we need to wait for them here.
+        */
+       if (!atomic_dec_and_test(&ctx.in_flight))
+               wait_for_completion(&ctx.done);
+       dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
+                       rw == WRITE ? "WRITE" : "READ",
+                       count, jiffies - now);
 
-       if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
+       if (ctx.error) {
                dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
-               drbd_chk_io_error(mdev, 1, TRUE);
-               err = -EIO;
+               drbd_chk_io_error(mdev, 1, true);
+               err = -EIO; /* ctx.error ? */
        }
 
        now = jiffies;
        if (rw == WRITE) {
-               /* swap back endianness */
-               bm_lel_to_cpu(b);
-               /* flush bitmap to stable storage */
                drbd_md_flush(mdev);
        } else /* rw == READ */ {
-               /* just read, if necessary adjust endianness */
-               b->bm_set = bm_count_bits_swap_endian(b);
+               b->bm_set = bm_count_bits(b);
                dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
                     jiffies - now);
        }
@@ -874,112 +1094,128 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
  */
 int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
 {
-       return bm_rw(mdev, READ);
+       return bm_rw(mdev, READ, 0);
 }
 
 /**
  * drbd_bm_write() - Write the whole bitmap to its on disk location.
  * @mdev:      DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
  */
 int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
 {
-       return bm_rw(mdev, WRITE);
+       return bm_rw(mdev, WRITE, 0);
 }
 
 /**
- * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap
+ * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
  * @mdev:      DRBD device.
- * @enr:       Extent number in the resync lru (happens to be sector offset)
- *
- * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered
- * by a single sector write. Therefore enr == sector offset from the
- * start of the bitmap.
+ * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
  */
-int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local)
+int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
 {
-       sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
-                                     + mdev->ldev->md.bm_offset;
-       int bm_words, num_words, offset;
-       int err = 0;
+       return bm_rw(mdev, WRITE, upper_idx);
+}
+
 
-       mutex_lock(&mdev->md_io_mutex);
-       bm_words  = drbd_bm_words(mdev);
-       offset    = S2W(enr);   /* word offset into bitmap */
-       num_words = min(S2W(1), bm_words - offset);
-       if (num_words < S2W(1))
-               memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE);
-       drbd_bm_get_lel(mdev, offset, num_words,
-                       page_address(mdev->md_io_page));
-       if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) {
-               int i;
-               err = -EIO;
-               dev_err(DEV, "IO ERROR writing bitmap sector %lu "
-                   "(meta-disk sector %llus)\n",
-                   enr, (unsigned long long)on_disk_sector);
-               drbd_chk_io_error(mdev, 1, TRUE);
-               for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
-                       drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
+/**
+ * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
+ * @mdev:      DRBD device.
+ * @idx:       bitmap page index
+ *
+ * We don't want to special case on logical_block_size of the backend device,
+ * so we submit PAGE_SIZE aligned pieces.
+ * Note that on "most" systems, PAGE_SIZE is 4k.
+ *
+ * In case this becomes an issue on systems with larger PAGE_SIZE,
+ * we may want to change this again to write 4k aligned 4k pieces.
+ */
+int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
+{
+       struct bm_aio_ctx ctx = {
+               .mdev = mdev,
+               .in_flight = ATOMIC_INIT(1),
+               .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+               .flags = BM_AIO_COPY_PAGES,
+       };
+
+       if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
+               dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+               return 0;
        }
+
+       bm_page_io_async(&ctx, idx, WRITE_SYNC);
+       wait_for_completion(&ctx.done);
+
+       if (ctx.error)
+               drbd_chk_io_error(mdev, 1, true);
+               /* that should force detach, so the in memory bitmap will be
+                * gone in a moment as well. */
+
        mdev->bm_writ_cnt++;
-       mutex_unlock(&mdev->md_io_mutex);
-       return err;
+       return ctx.error;
 }
 
 /* NOTE
  * find_first_bit returns int, we return unsigned long.
- * should not make much difference anyways, but ...
+ * For this to work on 32bit arch with bitnumbers > (1<<32),
+ * we'd need to return u64, and get a whole lot of other places
+ * fixed where we still use unsigned long.
  *
  * this returns a bit number, NOT a sector!
  */
-#define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1)
 static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
        const int find_zero_bit, const enum km_type km)
 {
        struct drbd_bitmap *b = mdev->bitmap;
-       unsigned long i = -1UL;
        unsigned long *p_addr;
-       unsigned long bit_offset; /* bit offset of the mapped page. */
+       unsigned long bit_offset;
+       unsigned i;
+
 
        if (bm_fo > b->bm_bits) {
                dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
+               bm_fo = DRBD_END_OF_BITMAP;
        } else {
                while (bm_fo < b->bm_bits) {
-                       unsigned long offset;
-                       bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */
-                       offset = bit_offset >> LN2_BPL;    /* word offset of the page */
-                       p_addr = __bm_map_paddr(b, offset, km);
+                       /* bit offset of the first bit in the page */
+                       bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
+                       p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
 
                        if (find_zero_bit)
-                               i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
+                               i = generic_find_next_zero_le_bit(p_addr,
+                                               PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
                        else
-                               i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
+                               i = generic_find_next_le_bit(p_addr,
+                                               PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
 
                        __bm_unmap(p_addr, km);
                        if (i < PAGE_SIZE*8) {
-                               i = bit_offset + i;
-                               if (i >= b->bm_bits)
+                               bm_fo = bit_offset + i;
+                               if (bm_fo >= b->bm_bits)
                                        break;
                                goto found;
                        }
                        bm_fo = bit_offset + PAGE_SIZE*8;
                }
-               i = -1UL;
+               bm_fo = DRBD_END_OF_BITMAP;
        }
  found:
-       return i;
+       return bm_fo;
 }
 
 static unsigned long bm_find_next(struct drbd_conf *mdev,
        unsigned long bm_fo, const int find_zero_bit)
 {
        struct drbd_bitmap *b = mdev->bitmap;
-       unsigned long i = -1UL;
+       unsigned long i = DRBD_END_OF_BITMAP;
 
        ERR_IF(!b) return i;
        ERR_IF(!b->bm_pages) return i;
 
        spin_lock_irq(&b->bm_lock);
-       if (bm_is_locked(b))
+       if (BM_DONT_TEST & b->bm_flags)
                bm_print_lock_info(mdev);
 
        i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
@@ -1005,13 +1241,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
  * you must take drbd_bm_lock() first */
 unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
 {
-       /* WARN_ON(!bm_is_locked(mdev)); */
+       /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
        return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
 }
 
 unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
 {
-       /* WARN_ON(!bm_is_locked(mdev)); */
+       /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
        return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
 }
 
@@ -1027,8 +1263,9 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
        struct drbd_bitmap *b = mdev->bitmap;
        unsigned long *p_addr = NULL;
        unsigned long bitnr;
-       unsigned long last_page_nr = -1UL;
+       unsigned int last_page_nr = -1U;
        int c = 0;
+       int changed_total = 0;
 
        if (e >= b->bm_bits) {
                dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
@@ -1036,23 +1273,33 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
                e = b->bm_bits ? b->bm_bits -1 : 0;
        }
        for (bitnr = s; bitnr <= e; bitnr++) {
-               unsigned long offset = bitnr>>LN2_BPL;
-               unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
+               unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
                if (page_nr != last_page_nr) {
                        if (p_addr)
                                __bm_unmap(p_addr, km);
-                       p_addr = __bm_map_paddr(b, offset, km);
+                       if (c < 0)
+                               bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
+                       else if (c > 0)
+                               bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
+                       changed_total += c;
+                       c = 0;
+                       p_addr = __bm_map_pidx(b, page_nr, km);
                        last_page_nr = page_nr;
                }
                if (val)
-                       c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr));
+                       c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
                else
-                       c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr));
+                       c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
        }
        if (p_addr)
                __bm_unmap(p_addr, km);
-       b->bm_set += c;
-       return c;
+       if (c < 0)
+               bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
+       else if (c > 0)
+               bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
+       changed_total += c;
+       b->bm_set += changed_total;
+       return changed_total;
 }
 
 /* returns number of bits actually changed.
@@ -1070,7 +1317,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
        ERR_IF(!b->bm_pages) return 0;
 
        spin_lock_irqsave(&b->bm_lock, flags);
-       if (bm_is_locked(b))
+       if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
                bm_print_lock_info(mdev);
 
        c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
@@ -1187,12 +1434,11 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
        ERR_IF(!b->bm_pages) return 0;
 
        spin_lock_irqsave(&b->bm_lock, flags);
-       if (bm_is_locked(b))
+       if (BM_DONT_TEST & b->bm_flags)
                bm_print_lock_info(mdev);
        if (bitnr < b->bm_bits) {
-               unsigned long offset = bitnr>>LN2_BPL;
-               p_addr = bm_map_paddr(b, offset);
-               i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0;
+               p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
+               i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
                bm_unmap(p_addr);
        } else if (bitnr == b->bm_bits) {
                i = -1;
@@ -1210,10 +1456,10 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
 {
        unsigned long flags;
        struct drbd_bitmap *b = mdev->bitmap;
-       unsigned long *p_addr = NULL, page_nr = -1;
+       unsigned long *p_addr = NULL;
        unsigned long bitnr;
+       unsigned int page_nr = -1U;
        int c = 0;
-       size_t w;
 
        /* If this is called without a bitmap, that is a bug.  But just to be
         * robust in case we screwed up elsewhere, in that case pretend there
@@ -1223,20 +1469,20 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
        ERR_IF(!b->bm_pages) return 1;
 
        spin_lock_irqsave(&b->bm_lock, flags);
-       if (bm_is_locked(b))
+       if (BM_DONT_TEST & b->bm_flags)
                bm_print_lock_info(mdev);
        for (bitnr = s; bitnr <= e; bitnr++) {
-               w = bitnr >> LN2_BPL;
-               if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) {
-                       page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3);
+               unsigned int idx = bm_bit_to_page_idx(b, bitnr);
+               if (page_nr != idx) {
+                       page_nr = idx;
                        if (p_addr)
                                bm_unmap(p_addr);
-                       p_addr = bm_map_paddr(b, w);
+                       p_addr = bm_map_pidx(b, idx);
                }
                ERR_IF (bitnr >= b->bm_bits) {
                        dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
                } else {
-                       c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
+                       c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
                }
        }
        if (p_addr)
@@ -1271,7 +1517,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
        ERR_IF(!b->bm_pages) return 0;
 
        spin_lock_irqsave(&b->bm_lock, flags);
-       if (bm_is_locked(b))
+       if (BM_DONT_TEST & b->bm_flags)
                bm_print_lock_info(mdev);
 
        s = S2W(enr);
@@ -1279,7 +1525,7 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
        count = 0;
        if (s < b->bm_words) {
                int n = e-s;
-               p_addr = bm_map_paddr(b, s);
+               p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
                bm = p_addr + MLPP(s);
                while (n--)
                        count += hweight_long(*bm++);
@@ -1291,18 +1537,20 @@ int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
        return count;
 }
 
-/* set all bits covered by the AL-extent al_enr */
+/* Set all bits covered by the AL-extent al_enr.
+ * Returns number of bits changed. */
 unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
 {
        struct drbd_bitmap *b = mdev->bitmap;
        unsigned long *p_addr, *bm;
        unsigned long weight;
-       int count, s, e, i, do_now;
+       unsigned long s, e;
+       int count, i, do_now;
        ERR_IF(!b) return 0;
        ERR_IF(!b->bm_pages) return 0;
 
        spin_lock_irq(&b->bm_lock);
-       if (bm_is_locked(b))
+       if (BM_DONT_SET & b->bm_flags)
                bm_print_lock_info(mdev);
        weight = b->bm_set;
 
@@ -1314,7 +1562,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
        count = 0;
        if (s < b->bm_words) {
                i = do_now = e-s;
-               p_addr = bm_map_paddr(b, s);
+               p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
                bm = p_addr + MLPP(s);
                while (i--) {
                        count += hweight_long(*bm);
@@ -1326,7 +1574,7 @@ unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
                if (e == b->bm_words)
                        b->bm_set -= bm_clear_surplus(b);
        } else {
-               dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s);
+               dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
        }
        weight = b->bm_set - weight;
        spin_unlock_irq(&b->bm_lock);
index b0bd27dfc1e8a88deab0c49705b3ed201da7309c..81030d8d654b26115e8d32a425cd9974e30737d1 100644 (file)
@@ -72,13 +72,6 @@ extern int fault_devs;
 extern char usermode_helper[];
 
 
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
 /* I don't remember why XCPU ...
  * This is used to wake the asender,
  * and to interrupt sending the sending task
@@ -104,6 +97,7 @@ extern char usermode_helper[];
 #define ID_SYNCER (-1ULL)
 #define ID_VACANT 0
 #define is_syncer_block_id(id) ((id) == ID_SYNCER)
+#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
 
 struct drbd_conf;
 
@@ -137,20 +131,19 @@ enum {
        DRBD_FAULT_MAX,
 };
 
-#ifdef CONFIG_DRBD_FAULT_INJECTION
 extern unsigned int
 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
+
 static inline int
 drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
+#ifdef CONFIG_DRBD_FAULT_INJECTION
        return fault_rate &&
                (enable_faults & (1<<type)) &&
                _drbd_insert_fault(mdev, type);
-}
-#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t)))
-
 #else
-#define FAULT_ACTIVE(_m, _t) (0)
+       return 0;
 #endif
+}
 
 /* integer division, round _UP_ to the next integer */
 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
@@ -212,8 +205,10 @@ enum drbd_packets {
        /* P_CKPT_FENCE_REQ      = 0x25, * currently reserved for protocol D */
        /* P_CKPT_DISABLE_REQ    = 0x26, * currently reserved for protocol D */
        P_DELAY_PROBE         = 0x27, /* is used on BOTH sockets */
+       P_OUT_OF_SYNC         = 0x28, /* Mark as out of sync (Outrunning), data socket */
+       P_RS_CANCEL           = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
 
-       P_MAX_CMD             = 0x28,
+       P_MAX_CMD             = 0x2A,
        P_MAY_IGNORE          = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
        P_MAX_OPT_CMD         = 0x101,
 
@@ -269,6 +264,7 @@ static inline const char *cmdname(enum drbd_packets cmd)
                [P_RS_IS_IN_SYNC]       = "CsumRSIsInSync",
                [P_COMPRESSED_BITMAP]   = "CBitmap",
                [P_DELAY_PROBE]         = "DelayProbe",
+               [P_OUT_OF_SYNC]         = "OutOfSync",
                [P_MAX_CMD]             = NULL,
        };
 
@@ -512,7 +508,7 @@ struct p_sizes {
        u64         d_size;  /* size of disk */
        u64         u_size;  /* user requested size */
        u64         c_size;  /* current exported size */
-       u32         max_segment_size;  /* Maximal size of a BIO */
+       u32         max_bio_size;  /* Maximal size of a BIO */
        u16         queue_order_type;  /* not yet implemented in DRBD*/
        u16         dds_flags; /* use enum dds_flags here. */
 } __packed;
@@ -550,6 +546,13 @@ struct p_discard {
        u32         pad;
 } __packed;
 
+struct p_block_desc {
+       struct p_header80 head;
+       u64 sector;
+       u32 blksize;
+       u32 pad;        /* to multiple of 8 Byte */
+} __packed;
+
 /* Valid values for the encoding field.
  * Bump proto version when changing this. */
 enum drbd_bitmap_code {
@@ -647,6 +650,7 @@ union p_polymorph {
         struct p_block_req       block_req;
        struct p_delay_probe93   delay_probe93;
        struct p_rs_uuid         rs_uuid;
+       struct p_block_desc      block_desc;
 } __packed;
 
 /**********************************************************************/
@@ -677,13 +681,6 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
        return thi->t_state;
 }
 
-
-/*
- * Having this as the first member of a struct provides sort of "inheritance".
- * "derived" structs can be "drbd_queue_work()"ed.
- * The callback should know and cast back to the descendant struct.
- * drbd_request and drbd_epoch_entry are descendants of drbd_work.
- */
 struct drbd_work;
 typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
 struct drbd_work {
@@ -712,9 +709,6 @@ struct drbd_request {
         * starting a new epoch...
         */
 
-       /* up to here, the struct layout is identical to drbd_epoch_entry;
-        * we might be able to use that to our advantage...  */
-
        struct list_head tl_requests; /* ring list in the transfer log */
        struct bio *master_bio;       /* master bio pointer */
        unsigned long rq_state; /* see comments above _req_mod() */
@@ -831,7 +825,7 @@ enum {
        CRASHED_PRIMARY,        /* This node was a crashed primary.
                                 * Gets cleared when the state.conn
                                 * goes into C_CONNECTED state. */
-       WRITE_BM_AFTER_RESYNC,  /* A kmalloc() during resync failed */
+       NO_BARRIER_SUPP,        /* underlying block device doesn't implement barriers */
        CONSIDER_RESYNC,
 
        MD_NO_FUA,              /* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -856,10 +850,37 @@ enum {
        GOT_PING_ACK,           /* set when we receive a ping_ack packet, misc wait gets woken */
        NEW_CUR_UUID,           /* Create new current UUID when thawing IO */
        AL_SUSPENDED,           /* Activity logging is currently suspended. */
+       AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
 };
 
 struct drbd_bitmap; /* opaque for drbd_conf */
 
+/* definition of bits in bm_flags to be used in drbd_bm_lock
+ * and drbd_bitmap_io and friends. */
+enum bm_flag {
+       /* do we need to kfree, or vfree bm_pages? */
+       BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
+
+       /* currently locked for bulk operation */
+       BM_LOCKED_MASK = 0x7,
+
+       /* in detail, that is: */
+       BM_DONT_CLEAR = 0x1,
+       BM_DONT_SET   = 0x2,
+       BM_DONT_TEST  = 0x4,
+
+       /* (test bit, count bit) allowed (common case) */
+       BM_LOCKED_TEST_ALLOWED = 0x3,
+
+       /* testing bits, as well as setting new bits allowed, but clearing bits
+        * would be unexpected.  Used during bitmap receive.  Setting new bits
+        * requires sending of "out-of-sync" information, though. */
+       BM_LOCKED_SET_ALLOWED = 0x1,
+
+       /* clear is not expected while bitmap is locked for bulk operation */
+};
+
+
 /* TODO sort members for performance
  * MAYBE group them further */
 
@@ -925,6 +946,7 @@ struct drbd_md_io {
 struct bm_io_work {
        struct drbd_work w;
        char *why;
+       enum bm_flag flags;
        int (*io_fn)(struct drbd_conf *mdev);
        void (*done)(struct drbd_conf *mdev, int rv);
 };
@@ -963,9 +985,12 @@ struct drbd_conf {
        struct drbd_work  resync_work,
                          unplug_work,
                          go_diskless,
-                         md_sync_work;
+                         md_sync_work,
+                         start_resync_work;
        struct timer_list resync_timer;
        struct timer_list md_sync_timer;
+       struct timer_list start_resync_timer;
+       struct timer_list request_timer;
 #ifdef DRBD_DEBUG_MD_SYNC
        struct {
                unsigned int line;
@@ -1000,9 +1025,9 @@ struct drbd_conf {
        struct hlist_head *tl_hash;
        unsigned int tl_hash_s;
 
-       /* blocks to sync in this run [unit BM_BLOCK_SIZE] */
+       /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
        unsigned long rs_total;
-       /* number of sync IOs that failed in this run */
+       /* number of resync blocks that failed in this run */
        unsigned long rs_failed;
        /* Syncer's start time [unit jiffies] */
        unsigned long rs_start;
@@ -1102,6 +1127,7 @@ struct drbd_conf {
        struct fifo_buffer rs_plan_s; /* correction values of resync planer */
        int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
        int rs_planed;    /* resync sectors already planed */
+       atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
 };
 
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1163,14 +1189,19 @@ enum dds_flags {
 };
 
 extern void drbd_init_set_defaults(struct drbd_conf *mdev);
-extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
-                       union drbd_state mask, union drbd_state val);
+extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
+                                           enum chg_state_flags f,
+                                           union drbd_state mask,
+                                           union drbd_state val);
 extern void drbd_force_state(struct drbd_conf *, union drbd_state,
                        union drbd_state);
-extern int _drbd_request_state(struct drbd_conf *, union drbd_state,
-                       union drbd_state, enum chg_state_flags);
-extern int __drbd_set_state(struct drbd_conf *, union drbd_state,
-                           enum chg_state_flags, struct completion *done);
+extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
+                                             union drbd_state,
+                                             union drbd_state,
+                                             enum chg_state_flags);
+extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
+                                          enum chg_state_flags,
+                                          struct completion *done);
 extern void print_st_err(struct drbd_conf *, union drbd_state,
                        union drbd_state, int);
 extern int  drbd_thread_start(struct drbd_thread *thi);
@@ -1195,7 +1226,7 @@ extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
 extern int drbd_send_protocol(struct drbd_conf *mdev);
 extern int drbd_send_uuids(struct drbd_conf *mdev);
 extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
-extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
+extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
 extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
 extern int _drbd_send_state(struct drbd_conf *mdev);
 extern int drbd_send_state(struct drbd_conf *mdev);
@@ -1220,11 +1251,10 @@ extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
                        struct p_data *dp, int data_size);
 extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
                            sector_t sector, int blksize, u64 block_id);
+extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
 extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
                           struct drbd_epoch_entry *e);
 extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
-extern int _drbd_send_barrier(struct drbd_conf *mdev,
-                       struct drbd_tl_epoch *barrier);
 extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
                              sector_t sector, int size, u64 block_id);
 extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
@@ -1235,14 +1265,13 @@ extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size)
 
 extern int drbd_send_bitmap(struct drbd_conf *mdev);
 extern int _drbd_send_bitmap(struct drbd_conf *mdev);
-extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode);
+extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
 extern void drbd_free_bc(struct drbd_backing_dev *ldev);
 extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
+void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
 
-/* drbd_meta-data.c (still in drbd_main.c) */
 extern void drbd_md_sync(struct drbd_conf *mdev);
 extern int  drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
-/* maybe define them below as inline? */
 extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
 extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
 extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
@@ -1261,10 +1290,12 @@ extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
 extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
                                 int (*io_fn)(struct drbd_conf *),
                                 void (*done)(struct drbd_conf *, int),
-                                char *why);
+                                char *why, enum bm_flag flags);
+extern int drbd_bitmap_io(struct drbd_conf *mdev,
+               int (*io_fn)(struct drbd_conf *),
+               char *why, enum bm_flag flags);
 extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
-extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
 extern void drbd_go_diskless(struct drbd_conf *mdev);
 extern void drbd_ldev_destroy(struct drbd_conf *mdev);
 
@@ -1313,6 +1344,7 @@ struct bm_extent {
 
 #define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
 #define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
+#define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
 
 /* drbd_bitmap.c */
 /*
@@ -1390,7 +1422,9 @@ struct bm_extent {
  * you should use 64bit OS for that much storage, anyways. */
 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
 #else
-#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32)
+/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
+#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
+/* corresponds to (1UL << 38) bits right now. */
 #endif
 #endif
 
@@ -1398,7 +1432,7 @@ struct bm_extent {
  * With a value of 8 all IO in one 128K block make it to the same slot of the
  * hash table. */
 #define HT_SHIFT 8
-#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT))
+#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
 
 #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
 
@@ -1410,16 +1444,20 @@ extern int  drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new
 extern void drbd_bm_cleanup(struct drbd_conf *mdev);
 extern void drbd_bm_set_all(struct drbd_conf *mdev);
 extern void drbd_bm_clear_all(struct drbd_conf *mdev);
+/* set/clear/test only a few bits at a time */
 extern int  drbd_bm_set_bits(
                struct drbd_conf *mdev, unsigned long s, unsigned long e);
 extern int  drbd_bm_clear_bits(
                struct drbd_conf *mdev, unsigned long s, unsigned long e);
-/* bm_set_bits variant for use while holding drbd_bm_lock */
+extern int drbd_bm_count_bits(
+       struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
+/* bm_set_bits variant for use while holding drbd_bm_lock,
+ * may process the whole bitmap in one go */
 extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
                const unsigned long s, const unsigned long e);
 extern int  drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
 extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
-extern int  drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local);
+extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
 extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
 extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
 extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
@@ -1427,6 +1465,8 @@ extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
 extern size_t       drbd_bm_words(struct drbd_conf *mdev);
 extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
 extern sector_t      drbd_bm_capacity(struct drbd_conf *mdev);
+
+#define DRBD_END_OF_BITMAP     (~(unsigned long)0)
 extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
 /* bm_find_next variants for use while you hold drbd_bm_lock() */
 extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
@@ -1437,14 +1477,12 @@ extern int drbd_bm_rs_done(struct drbd_conf *mdev);
 /* for receive_bitmap */
 extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
                size_t number, unsigned long *buffer);
-/* for _drbd_send_bitmap and drbd_bm_write_sect */
+/* for _drbd_send_bitmap */
 extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
                size_t number, unsigned long *buffer);
 
-extern void drbd_bm_lock(struct drbd_conf *mdev, char *why);
+extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
 extern void drbd_bm_unlock(struct drbd_conf *mdev);
-
-extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
 /* drbd_main.c */
 
 extern struct kmem_cache *drbd_request_cache;
@@ -1467,7 +1505,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
 extern int proc_details;
 
 /* drbd_req */
-extern int drbd_make_request_26(struct request_queue *q, struct bio *bio);
+extern int drbd_make_request(struct request_queue *q, struct bio *bio);
 extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
 extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1482,8 +1520,9 @@ enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew =
 extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
 extern void resync_after_online_grow(struct drbd_conf *);
 extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
-extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
-               int force);
+extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
+                                       enum drbd_role new_role,
+                                       int force);
 extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
 extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
 extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
@@ -1499,6 +1538,7 @@ extern int drbd_resync_finished(struct drbd_conf *mdev);
 extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
                struct drbd_backing_dev *bdev, sector_t sector, int rw);
 extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
+extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
 
 static inline void ov_oos_print(struct drbd_conf *mdev)
 {
@@ -1522,21 +1562,23 @@ extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
 extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
 extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
 extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
-extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int);
+extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
 extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
 extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
-extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int);
 extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
 extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
 extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
 extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
 extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
 extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
+extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
+extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
 
 extern void resync_timer_fn(unsigned long data);
+extern void start_resync_timer_fn(unsigned long data);
 
 /* drbd_receiver.c */
-extern int drbd_rs_should_slow_down(struct drbd_conf *mdev);
+extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
 extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
                const unsigned rw, const int fault_type);
 extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
@@ -1619,16 +1661,16 @@ extern int drbd_rs_del_all(struct drbd_conf *mdev);
 extern void drbd_rs_failed_io(struct drbd_conf *mdev,
                sector_t sector, int size);
 extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
+extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
 extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
                int size, const char *file, const unsigned int line);
 #define drbd_set_in_sync(mdev, sector, size) \
        __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
-extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
+extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
                int size, const char *file, const unsigned int line);
 #define drbd_set_out_of_sync(mdev, sector, size) \
        __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
 extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
-extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev);
 extern void drbd_al_shrink(struct drbd_conf *mdev);
 
 
@@ -1747,11 +1789,11 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev)
        wake_up(&mdev->misc_wait);
 }
 
-static inline int _drbd_set_state(struct drbd_conf *mdev,
-                                  union drbd_state ns, enum chg_state_flags flags,
-                                  struct completion *done)
+static inline enum drbd_state_rv
+_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+               enum chg_state_flags flags, struct completion *done)
 {
-       int rv;
+       enum drbd_state_rv rv;
 
        read_lock(&global_state_lock);
        rv = __drbd_set_state(mdev, ns, flags, done);
@@ -1982,17 +2024,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
 
 static inline void drbd_thread_stop(struct drbd_thread *thi)
 {
-       _drbd_thread_stop(thi, FALSE, TRUE);
+       _drbd_thread_stop(thi, false, true);
 }
 
 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
 {
-       _drbd_thread_stop(thi, FALSE, FALSE);
+       _drbd_thread_stop(thi, false, false);
 }
 
 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
 {
-       _drbd_thread_stop(thi, TRUE, FALSE);
+       _drbd_thread_stop(thi, true, false);
 }
 
 /* counts how many answer packets packets we expect from our peer,
@@ -2146,17 +2188,18 @@ extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
 static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
                unsigned long *bits_left, unsigned int *per_mil_done)
 {
-       /*
-        * this is to break it at compile time when we change that
-        * (we may feel 4TB maximum storage per drbd is not enough)
-        */
+       /* this is to break it at compile time when we change that, in case we
+        * want to support more than (1<<32) bits on a 32bit arch. */
        typecheck(unsigned long, mdev->rs_total);
 
        /* note: both rs_total and rs_left are in bits, i.e. in
         * units of BM_BLOCK_SIZE.
         * for the percentage, we don't care. */
 
-       *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+       if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+               *bits_left = mdev->ov_left;
+       else
+               *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
        /* >> 10 to prevent overflow,
         * +1 to prevent division by zero */
        if (*bits_left > mdev->rs_total) {
@@ -2171,10 +2214,19 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
                                *bits_left, mdev->rs_total, mdev->rs_failed);
                *per_mil_done = 0;
        } else {
-               /* make sure the calculation happens in long context */
-               unsigned long tmp = 1000UL -
-                               (*bits_left >> 10)*1000UL
-                               / ((mdev->rs_total >> 10) + 1UL);
+               /* Make sure the division happens in long context.
+                * We allow up to one petabyte storage right now,
+                * at a granularity of 4k per bit that is 2**38 bits.
+                * After shift right and multiplication by 1000,
+                * this should still fit easily into a 32bit long,
+                * so we don't need a 64bit division on 32bit arch.
+                * Note: currently we don't support such large bitmaps on 32bit
+                * arch anyways, but no harm done to be prepared for it here.
+                */
+               unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+               unsigned long left = *bits_left >> shift;
+               unsigned long total = 1UL + (mdev->rs_total >> shift);
+               unsigned long tmp = 1000UL - left * 1000UL/total;
                *per_mil_done = tmp;
        }
 }
@@ -2193,8 +2245,9 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
        return mxb;
 }
 
-static inline int drbd_state_is_stable(union drbd_state s)
+static inline int drbd_state_is_stable(struct drbd_conf *mdev)
 {
+       union drbd_state s = mdev->state;
 
        /* DO NOT add a default clause, we want the compiler to warn us
         * for any newly introduced state we may have forgotten to add here */
@@ -2211,11 +2264,9 @@ static inline int drbd_state_is_stable(union drbd_state s)
        case C_VERIFY_T:
        case C_PAUSED_SYNC_S:
        case C_PAUSED_SYNC_T:
-               /* maybe stable, look at the disk state */
-               break;
-
-       /* no new io accepted during tansitional states
-        * like handshake or teardown */
+       case C_AHEAD:
+       case C_BEHIND:
+               /* transitional states, IO allowed */
        case C_DISCONNECTING:
        case C_UNCONNECTED:
        case C_TIMEOUT:
@@ -2226,7 +2277,15 @@ static inline int drbd_state_is_stable(union drbd_state s)
        case C_WF_REPORT_PARAMS:
        case C_STARTING_SYNC_S:
        case C_STARTING_SYNC_T:
+               break;
+
+               /* Allow IO in BM exchange states with new protocols */
        case C_WF_BITMAP_S:
+               if (mdev->agreed_pro_version < 96)
+                       return 0;
+               break;
+
+               /* no new io accepted in these states */
        case C_WF_BITMAP_T:
        case C_WF_SYNC_UUID:
        case C_MASK:
@@ -2261,41 +2320,47 @@ static inline int is_susp(union drbd_state s)
        return s.susp || s.susp_nod || s.susp_fen;
 }
 
-static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
+static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
 {
        int mxb = drbd_get_max_buffers(mdev);
 
        if (is_susp(mdev->state))
-               return 0;
+               return false;
        if (test_bit(SUSPEND_IO, &mdev->flags))
-               return 0;
+               return false;
 
        /* to avoid potential deadlock or bitmap corruption,
         * in various places, we only allow new application io
         * to start during "stable" states. */
 
        /* no new io accepted when attaching or detaching the disk */
-       if (!drbd_state_is_stable(mdev->state))
-               return 0;
+       if (!drbd_state_is_stable(mdev))
+               return false;
 
        /* since some older kernels don't have atomic_add_unless,
         * and we are within the spinlock anyways, we have this workaround.  */
        if (atomic_read(&mdev->ap_bio_cnt) > mxb)
-               return 0;
+               return false;
        if (test_bit(BITMAP_IO, &mdev->flags))
-               return 0;
-       return 1;
+               return false;
+       return true;
 }
 
-/* I'd like to use wait_event_lock_irq,
- * but I'm not sure when it got introduced,
- * and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
 {
-       /* compare with after_state_ch,
-        * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
-       DEFINE_WAIT(wait);
+       bool rv = false;
+
+       spin_lock_irq(&mdev->req_lock);
+       rv = may_inc_ap_bio(mdev);
+       if (rv)
+               atomic_add(count, &mdev->ap_bio_cnt);
+       spin_unlock_irq(&mdev->req_lock);
+
+       return rv;
+}
 
+static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
+{
        /* we wait here
         *    as long as the device is suspended
         *    until the bitmap is no longer on the fly during connection
@@ -2304,16 +2369,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
         * to avoid races with the reconnect code,
         * we need to atomic_inc within the spinlock. */
 
-       spin_lock_irq(&mdev->req_lock);
-       while (!__inc_ap_bio_cond(mdev)) {
-               prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
-               spin_unlock_irq(&mdev->req_lock);
-               schedule();
-               finish_wait(&mdev->misc_wait, &wait);
-               spin_lock_irq(&mdev->req_lock);
-       }
-       atomic_add(count, &mdev->ap_bio_cnt);
-       spin_unlock_irq(&mdev->req_lock);
+       wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
 }
 
 static inline void dec_ap_bio(struct drbd_conf *mdev)
@@ -2333,9 +2389,11 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
        }
 }
 
-static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
+static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
 {
+       int changed = mdev->ed_uuid != val;
        mdev->ed_uuid = val;
+       return changed;
 }
 
 static inline int seq_cmp(u32 a, u32 b)
index 8a43ce0edeed12f1007cfeb9474ed857fc8f39cd..dfc85f32d3177bb90bcc2b53f8d3921de82df544 100644 (file)
@@ -85,7 +85,8 @@ MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
 MODULE_VERSION(REL_VERSION);
 MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
+MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
+                __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
 
 #include <linux/moduleparam.h>
@@ -115,7 +116,7 @@ module_param(fault_devs, int, 0644);
 #endif
 
 /* module parameter, defined */
-unsigned int minor_count = 32;
+unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
 int disable_sendpage;
 int allow_oos;
 unsigned int cn_idx = CN_IDX_DRBD;
@@ -335,6 +336,7 @@ bail:
        drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
 }
 
+
 /**
  * _tl_restart() - Walks the transfer log, and applies an action to all requests
  * @mdev:      DRBD device.
@@ -456,7 +458,7 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
 }
 
 /**
- * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
+ * cl_wide_st_chg() - true if the state change is a cluster wide one
  * @mdev:      DRBD device.
  * @os:                old (current) state.
  * @ns:                new (wanted) state.
@@ -473,12 +475,13 @@ static int cl_wide_st_chg(struct drbd_conf *mdev,
                (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
 }
 
-int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
-                     union drbd_state mask, union drbd_state val)
+enum drbd_state_rv
+drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+                 union drbd_state mask, union drbd_state val)
 {
        unsigned long flags;
        union drbd_state os, ns;
-       int rv;
+       enum drbd_state_rv rv;
 
        spin_lock_irqsave(&mdev->req_lock, flags);
        os = mdev->state;
@@ -502,20 +505,22 @@ void drbd_force_state(struct drbd_conf *mdev,
        drbd_change_state(mdev, CS_HARD, mask, val);
 }
 
-static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
-static int is_valid_state_transition(struct drbd_conf *,
-                                    union drbd_state, union drbd_state);
+static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
+static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
+                                                   union drbd_state,
+                                                   union drbd_state);
 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
                                       union drbd_state ns, const char **warn_sync_abort);
 int drbd_send_state_req(struct drbd_conf *,
                        union drbd_state, union drbd_state);
 
-static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
-                                   union drbd_state mask, union drbd_state val)
+static enum drbd_state_rv
+_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
+            union drbd_state val)
 {
        union drbd_state os, ns;
        unsigned long flags;
-       int rv;
+       enum drbd_state_rv rv;
 
        if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
                return SS_CW_SUCCESS;
@@ -536,7 +541,7 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
                if (rv == SS_SUCCESS) {
                        rv = is_valid_state_transition(mdev, ns, os);
                        if (rv == SS_SUCCESS)
-                               rv = 0; /* cont waiting, otherwise fail. */
+                               rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
                }
        }
        spin_unlock_irqrestore(&mdev->req_lock, flags);
@@ -554,14 +559,14 @@ static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
  * Should not be called directly, use drbd_request_state() or
  * _drbd_request_state().
  */
-static int drbd_req_state(struct drbd_conf *mdev,
-                         union drbd_state mask, union drbd_state val,
-                         enum chg_state_flags f)
+static enum drbd_state_rv
+drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
+              union drbd_state val, enum chg_state_flags f)
 {
        struct completion done;
        unsigned long flags;
        union drbd_state os, ns;
-       int rv;
+       enum drbd_state_rv rv;
 
        init_completion(&done);
 
@@ -636,10 +641,11 @@ abort:
  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
  * flag, or when logging of failed state change requests is not desired.
  */
-int _drbd_request_state(struct drbd_conf *mdev,        union drbd_state mask,
-                       union drbd_state val,   enum chg_state_flags f)
+enum drbd_state_rv
+_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
+                   union drbd_state val, enum chg_state_flags f)
 {
-       int rv;
+       enum drbd_state_rv rv;
 
        wait_event(mdev->state_wait,
                   (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
@@ -663,8 +669,8 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
            );
 }
 
-void print_st_err(struct drbd_conf *mdev,
-       union drbd_state os, union drbd_state ns, int err)
+void print_st_err(struct drbd_conf *mdev, union drbd_state os,
+                 union drbd_state ns, enum drbd_state_rv err)
 {
        if (err == SS_IN_TRANSIENT_STATE)
                return;
@@ -674,32 +680,18 @@ void print_st_err(struct drbd_conf *mdev,
 }
 
 
-#define drbd_peer_str drbd_role_str
-#define drbd_pdsk_str drbd_disk_str
-
-#define drbd_susp_str(A)     ((A) ? "1" : "0")
-#define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
-#define drbd_peer_isp_str(A) ((A) ? "1" : "0")
-#define drbd_user_isp_str(A) ((A) ? "1" : "0")
-
-#define PSC(A) \
-       ({ if (ns.A != os.A) { \
-               pbp += sprintf(pbp, #A "( %s -> %s ) ", \
-                             drbd_##A##_str(os.A), \
-                             drbd_##A##_str(ns.A)); \
-       } })
-
 /**
  * is_valid_state() - Returns an SS_ error code if ns is not valid
  * @mdev:      DRBD device.
  * @ns:                State to consider.
  */
-static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+static enum drbd_state_rv
+is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
 {
        /* See drbd_state_sw_errors in drbd_strings.c */
 
        enum drbd_fencing_p fp;
-       int rv = SS_SUCCESS;
+       enum drbd_state_rv rv = SS_SUCCESS;
 
        fp = FP_DONT_CARE;
        if (get_ldev(mdev)) {
@@ -762,10 +754,11 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
  * @ns:                new state.
  * @os:                old state.
  */
-static int is_valid_state_transition(struct drbd_conf *mdev,
-                                    union drbd_state ns, union drbd_state os)
+static enum drbd_state_rv
+is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
+                         union drbd_state os)
 {
-       int rv = SS_SUCCESS;
+       enum drbd_state_rv rv = SS_SUCCESS;
 
        if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
            os.conn > C_CONNECTED)
@@ -800,6 +793,10 @@ static int is_valid_state_transition(struct drbd_conf *mdev,
            os.conn < C_CONNECTED)
                rv = SS_NEED_CONNECTION;
 
+       if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
+           && os.conn < C_WF_REPORT_PARAMS)
+               rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
+
        return rv;
 }
 
@@ -817,6 +814,7 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
                                       union drbd_state ns, const char **warn_sync_abort)
 {
        enum drbd_fencing_p fp;
+       enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
 
        fp = FP_DONT_CARE;
        if (get_ldev(mdev)) {
@@ -869,56 +867,6 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
                ns.conn = C_CONNECTED;
        }
 
-       if (ns.conn >= C_CONNECTED &&
-           ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
-            (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
-               switch (ns.conn) {
-               case C_WF_BITMAP_T:
-               case C_PAUSED_SYNC_T:
-                       ns.disk = D_OUTDATED;
-                       break;
-               case C_CONNECTED:
-               case C_WF_BITMAP_S:
-               case C_SYNC_SOURCE:
-               case C_PAUSED_SYNC_S:
-                       ns.disk = D_UP_TO_DATE;
-                       break;
-               case C_SYNC_TARGET:
-                       ns.disk = D_INCONSISTENT;
-                       dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
-                       break;
-               }
-               if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
-                       dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
-       }
-
-       if (ns.conn >= C_CONNECTED &&
-           (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
-               switch (ns.conn) {
-               case C_CONNECTED:
-               case C_WF_BITMAP_T:
-               case C_PAUSED_SYNC_T:
-               case C_SYNC_TARGET:
-                       ns.pdsk = D_UP_TO_DATE;
-                       break;
-               case C_WF_BITMAP_S:
-               case C_PAUSED_SYNC_S:
-                       /* remap any consistent state to D_OUTDATED,
-                        * but disallow "upgrade" of not even consistent states.
-                        */
-                       ns.pdsk =
-                               (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
-                               ? os.pdsk : D_OUTDATED;
-                       break;
-               case C_SYNC_SOURCE:
-                       ns.pdsk = D_INCONSISTENT;
-                       dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
-                       break;
-               }
-               if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
-                       dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
-       }
-
        /* Connection breaks down before we finished "Negotiating" */
        if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
            get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -933,6 +881,94 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
                put_ldev(mdev);
        }
 
+       /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
+       if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
+               if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
+                       ns.disk = D_UP_TO_DATE;
+               if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
+                       ns.pdsk = D_UP_TO_DATE;
+       }
+
+       /* Implications of the connection stat on the disk states */
+       disk_min = D_DISKLESS;
+       disk_max = D_UP_TO_DATE;
+       pdsk_min = D_INCONSISTENT;
+       pdsk_max = D_UNKNOWN;
+       switch ((enum drbd_conns)ns.conn) {
+       case C_WF_BITMAP_T:
+       case C_PAUSED_SYNC_T:
+       case C_STARTING_SYNC_T:
+       case C_WF_SYNC_UUID:
+       case C_BEHIND:
+               disk_min = D_INCONSISTENT;
+               disk_max = D_OUTDATED;
+               pdsk_min = D_UP_TO_DATE;
+               pdsk_max = D_UP_TO_DATE;
+               break;
+       case C_VERIFY_S:
+       case C_VERIFY_T:
+               disk_min = D_UP_TO_DATE;
+               disk_max = D_UP_TO_DATE;
+               pdsk_min = D_UP_TO_DATE;
+               pdsk_max = D_UP_TO_DATE;
+               break;
+       case C_CONNECTED:
+               disk_min = D_DISKLESS;
+               disk_max = D_UP_TO_DATE;
+               pdsk_min = D_DISKLESS;
+               pdsk_max = D_UP_TO_DATE;
+               break;
+       case C_WF_BITMAP_S:
+       case C_PAUSED_SYNC_S:
+       case C_STARTING_SYNC_S:
+       case C_AHEAD:
+               disk_min = D_UP_TO_DATE;
+               disk_max = D_UP_TO_DATE;
+               pdsk_min = D_INCONSISTENT;
+               pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
+               break;
+       case C_SYNC_TARGET:
+               disk_min = D_INCONSISTENT;
+               disk_max = D_INCONSISTENT;
+               pdsk_min = D_UP_TO_DATE;
+               pdsk_max = D_UP_TO_DATE;
+               break;
+       case C_SYNC_SOURCE:
+               disk_min = D_UP_TO_DATE;
+               disk_max = D_UP_TO_DATE;
+               pdsk_min = D_INCONSISTENT;
+               pdsk_max = D_INCONSISTENT;
+               break;
+       case C_STANDALONE:
+       case C_DISCONNECTING:
+       case C_UNCONNECTED:
+       case C_TIMEOUT:
+       case C_BROKEN_PIPE:
+       case C_NETWORK_FAILURE:
+       case C_PROTOCOL_ERROR:
+       case C_TEAR_DOWN:
+       case C_WF_CONNECTION:
+       case C_WF_REPORT_PARAMS:
+       case C_MASK:
+               break;
+       }
+       if (ns.disk > disk_max)
+               ns.disk = disk_max;
+
+       if (ns.disk < disk_min) {
+               dev_warn(DEV, "Implicitly set disk from %s to %s\n",
+                        drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+               ns.disk = disk_min;
+       }
+       if (ns.pdsk > pdsk_max)
+               ns.pdsk = pdsk_max;
+
+       if (ns.pdsk < pdsk_min) {
+               dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
+                        drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+               ns.pdsk = pdsk_min;
+       }
+
        if (fp == FP_STONITH &&
            (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
            !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
@@ -961,6 +997,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
 /* helper for __drbd_set_state */
 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
 {
+       if (mdev->agreed_pro_version < 90)
+               mdev->ov_start_sector = 0;
+       mdev->rs_total = drbd_bm_bits(mdev);
+       mdev->ov_position = 0;
        if (cs == C_VERIFY_T) {
                /* starting online verify from an arbitrary position
                 * does not fit well into the existing protocol.
@@ -970,11 +1010,15 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
                mdev->ov_start_sector = ~(sector_t)0;
        } else {
                unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
-               if (bit >= mdev->rs_total)
+               if (bit >= mdev->rs_total) {
                        mdev->ov_start_sector =
                                BM_BIT_TO_SECT(mdev->rs_total - 1);
+                       mdev->rs_total = 1;
+               } else
+                       mdev->rs_total -= bit;
                mdev->ov_position = mdev->ov_start_sector;
        }
+       mdev->ov_left = mdev->rs_total;
 }
 
 static void drbd_resume_al(struct drbd_conf *mdev)
@@ -992,12 +1036,12 @@ static void drbd_resume_al(struct drbd_conf *mdev)
  *
  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
  */
-int __drbd_set_state(struct drbd_conf *mdev,
-                   union drbd_state ns, enum chg_state_flags flags,
-                   struct completion *done)
+enum drbd_state_rv
+__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
+                enum chg_state_flags flags, struct completion *done)
 {
        union drbd_state os;
-       int rv = SS_SUCCESS;
+       enum drbd_state_rv rv = SS_SUCCESS;
        const char *warn_sync_abort = NULL;
        struct after_state_chg_work *ascw;
 
@@ -1033,22 +1077,46 @@ int __drbd_set_state(struct drbd_conf *mdev,
                dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
 
        {
-               char *pbp, pb[300];
-               pbp = pb;
-               *pbp = 0;
-               PSC(role);
-               PSC(peer);
-               PSC(conn);
-               PSC(disk);
-               PSC(pdsk);
-               if (is_susp(ns) != is_susp(os))
-                       pbp += sprintf(pbp, "susp( %s -> %s ) ",
-                                      drbd_susp_str(is_susp(os)),
-                                      drbd_susp_str(is_susp(ns)));
-               PSC(aftr_isp);
-               PSC(peer_isp);
-               PSC(user_isp);
-               dev_info(DEV, "%s\n", pb);
+       char *pbp, pb[300];
+       pbp = pb;
+       *pbp = 0;
+       if (ns.role != os.role)
+               pbp += sprintf(pbp, "role( %s -> %s ) ",
+                              drbd_role_str(os.role),
+                              drbd_role_str(ns.role));
+       if (ns.peer != os.peer)
+               pbp += sprintf(pbp, "peer( %s -> %s ) ",
+                              drbd_role_str(os.peer),
+                              drbd_role_str(ns.peer));
+       if (ns.conn != os.conn)
+               pbp += sprintf(pbp, "conn( %s -> %s ) ",
+                              drbd_conn_str(os.conn),
+                              drbd_conn_str(ns.conn));
+       if (ns.disk != os.disk)
+               pbp += sprintf(pbp, "disk( %s -> %s ) ",
+                              drbd_disk_str(os.disk),
+                              drbd_disk_str(ns.disk));
+       if (ns.pdsk != os.pdsk)
+               pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
+                              drbd_disk_str(os.pdsk),
+                              drbd_disk_str(ns.pdsk));
+       if (is_susp(ns) != is_susp(os))
+               pbp += sprintf(pbp, "susp( %d -> %d ) ",
+                              is_susp(os),
+                              is_susp(ns));
+       if (ns.aftr_isp != os.aftr_isp)
+               pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
+                              os.aftr_isp,
+                              ns.aftr_isp);
+       if (ns.peer_isp != os.peer_isp)
+               pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
+                              os.peer_isp,
+                              ns.peer_isp);
+       if (ns.user_isp != os.user_isp)
+               pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
+                              os.user_isp,
+                              ns.user_isp);
+       dev_info(DEV, "%s\n", pb);
        }
 
        /* solve the race between becoming unconfigured,
@@ -1074,6 +1142,10 @@ int __drbd_set_state(struct drbd_conf *mdev,
                atomic_inc(&mdev->local_cnt);
 
        mdev->state = ns;
+
+       if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
+               drbd_print_uuids(mdev, "attached to UUIDs");
+
        wake_up(&mdev->misc_wait);
        wake_up(&mdev->state_wait);
 
@@ -1081,7 +1153,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
        if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
            ns.conn < C_CONNECTED) {
                mdev->ov_start_sector =
-                       BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
+                       BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
                dev_info(DEV, "Online Verify reached sector %llu\n",
                        (unsigned long long)mdev->ov_start_sector);
        }
@@ -1106,14 +1178,7 @@ int __drbd_set_state(struct drbd_conf *mdev,
                unsigned long now = jiffies;
                int i;
 
-               mdev->ov_position = 0;
-               mdev->rs_total = drbd_bm_bits(mdev);
-               if (mdev->agreed_pro_version >= 90)
-                       set_ov_position(mdev, ns.conn);
-               else
-                       mdev->ov_start_sector = 0;
-               mdev->ov_left = mdev->rs_total
-                             - BM_SECT_TO_BIT(mdev->ov_position);
+               set_ov_position(mdev, ns.conn);
                mdev->rs_start = now;
                mdev->rs_last_events = 0;
                mdev->rs_last_sect_ev = 0;
@@ -1121,10 +1186,12 @@ int __drbd_set_state(struct drbd_conf *mdev,
                mdev->ov_last_oos_start = 0;
 
                for (i = 0; i < DRBD_SYNC_MARKS; i++) {
-                       mdev->rs_mark_left[i] = mdev->rs_total;
+                       mdev->rs_mark_left[i] = mdev->ov_left;
                        mdev->rs_mark_time[i] = now;
                }
 
+               drbd_rs_controller_reset(mdev);
+
                if (ns.conn == C_VERIFY_S) {
                        dev_info(DEV, "Starting Online Verify from sector %llu\n",
                                        (unsigned long long)mdev->ov_position);
@@ -1228,6 +1295,26 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv)
        }
 }
 
+int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
+               int (*io_fn)(struct drbd_conf *),
+               char *why, enum bm_flag flags)
+{
+       int rv;
+
+       D_ASSERT(current == mdev->worker.task);
+
+       /* open coded non-blocking drbd_suspend_io(mdev); */
+       set_bit(SUSPEND_IO, &mdev->flags);
+
+       drbd_bm_lock(mdev, why, flags);
+       rv = io_fn(mdev);
+       drbd_bm_unlock(mdev);
+
+       drbd_resume_io(mdev);
+
+       return rv;
+}
+
 /**
  * after_state_ch() - Perform after state change actions that may sleep
  * @mdev:      DRBD device.
@@ -1266,16 +1353,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
        nsm.i = -1;
        if (ns.susp_nod) {
-               if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
-                       if (ns.conn == C_CONNECTED)
-                               what = resend, nsm.susp_nod = 0;
-                       else /* ns.conn > C_CONNECTED */
-                               dev_err(DEV, "Unexpected Resynd going on!\n");
-               }
+               if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+                       what = resend;
 
                if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
-                       what = restart_frozen_disk_io, nsm.susp_nod = 0;
+                       what = restart_frozen_disk_io;
 
+               if (what != nothing)
+                       nsm.susp_nod = 0;
        }
 
        if (ns.susp_fen) {
@@ -1306,13 +1391,30 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                spin_unlock_irq(&mdev->req_lock);
        }
 
+       /* Became sync source.  With protocol >= 96, we still need to send out
+        * the sync uuid now. Need to do that before any drbd_send_state, or
+        * the other side may go "paused sync" before receiving the sync uuids,
+        * which is unexpected. */
+       if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
+           (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
+           mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
+               drbd_gen_and_send_sync_uuid(mdev);
+               put_ldev(mdev);
+       }
+
        /* Do not change the order of the if above and the two below... */
        if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
                drbd_send_uuids(mdev);
                drbd_send_state(mdev);
        }
-       if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
-               drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
+       /* No point in queuing send_bitmap if we don't have a connection
+        * anymore, so check also the _current_ state, not only the new state
+        * at the time this work was queued. */
+       if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
+           mdev->state.conn == C_WF_BITMAP_S)
+               drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
+                               "send_bitmap (WFBitMapS)",
+                               BM_LOCKED_TEST_ALLOWED);
 
        /* Lost contact to peer's copy of the data */
        if ((os.pdsk >= D_INCONSISTENT &&
@@ -1343,7 +1445,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
                /* D_DISKLESS Peer becomes secondary */
                if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
-                       drbd_al_to_on_disk_bm(mdev);
+                       /* We may still be Primary ourselves.
+                        * No harm done if the bitmap still changes,
+                        * redirtied pages will follow later. */
+                       drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+                               "demote diskless peer", BM_LOCKED_SET_ALLOWED);
+               put_ldev(mdev);
+       }
+
+       /* Write out all changed bits on demote.
+        * Though, no need to da that just yet
+        * if there is a resync going on still */
+       if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
+               mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
+               /* No changes to the bitmap expected this time, so assert that,
+                * even though no harm was done if it did change. */
+               drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
+                               "demote", BM_LOCKED_TEST_ALLOWED);
                put_ldev(mdev);
        }
 
@@ -1371,15 +1489,23 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
                drbd_send_state(mdev);
 
+       if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
+               drbd_send_state(mdev);
+
        /* We are in the progress to start a full sync... */
        if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
            (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
-               drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
+               /* no other bitmap changes expected during this phase */
+               drbd_queue_bitmap_io(mdev,
+                       &drbd_bmio_set_n_write, &abw_start_sync,
+                       "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
 
        /* We are invalidating our self... */
        if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
            os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
-               drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
+               /* other bitmap operation expected during this phase */
+               drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
+                       "set_n_write from invalidate", BM_LOCKED_MASK);
 
        /* first half of local IO error, failure to attach,
         * or administrative detach */
@@ -1434,8 +1560,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
                if (drbd_send_state(mdev))
                        dev_warn(DEV, "Notified peer that I'm now diskless.\n");
-               else
-                       dev_err(DEV, "Sending state for being diskless failed\n");
                /* corresponding get_ldev in __drbd_set_state
                 * this may finaly trigger drbd_ldev_destroy. */
                put_ldev(mdev);
@@ -1459,6 +1583,19 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
                drbd_send_state(mdev);
 
+       /* This triggers bitmap writeout of potentially still unwritten pages
+        * if the resync finished cleanly, or aborted because of peer disk
+        * failure, or because of connection loss.
+        * For resync aborted because of local disk failure, we cannot do
+        * any bitmap writeout anymore.
+        * No harm done if some bits change during this phase.
+        */
+       if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
+               drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
+                       "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+               put_ldev(mdev);
+       }
+
        /* free tl_hash if we Got thawed and are C_STANDALONE */
        if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
                drbd_free_tl_hash(mdev);
@@ -1559,7 +1696,7 @@ int drbd_thread_start(struct drbd_thread *thi)
                if (!try_module_get(THIS_MODULE)) {
                        dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
                        spin_unlock_irqrestore(&thi->t_lock, flags);
-                       return FALSE;
+                       return false;
                }
 
                init_completion(&thi->stop);
@@ -1576,7 +1713,7 @@ int drbd_thread_start(struct drbd_thread *thi)
                        dev_err(DEV, "Couldn't start thread\n");
 
                        module_put(THIS_MODULE);
-                       return FALSE;
+                       return false;
                }
                spin_lock_irqsave(&thi->t_lock, flags);
                thi->task = nt;
@@ -1596,7 +1733,7 @@ int drbd_thread_start(struct drbd_thread *thi)
                break;
        }
 
-       return TRUE;
+       return true;
 }
 
 
@@ -1694,8 +1831,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
 {
        int sent, ok;
 
-       ERR_IF(!h) return FALSE;
-       ERR_IF(!size) return FALSE;
+       ERR_IF(!h) return false;
+       ERR_IF(!size) return false;
 
        h->magic   = BE_DRBD_MAGIC;
        h->command = cpu_to_be16(cmd);
@@ -1704,8 +1841,8 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
        sent = drbd_send(mdev, sock, h, size, msg_flags);
 
        ok = (sent == size);
-       if (!ok)
-               dev_err(DEV, "short sent %s size=%d sent=%d\n",
+       if (!ok && !signal_pending(current))
+               dev_warn(DEV, "short sent %s size=%d sent=%d\n",
                    cmdname(cmd), (int)size, sent);
        return ok;
 }
@@ -1840,7 +1977,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
                else {
                        dev_err(DEV, "--dry-run is not supported by peer");
                        kfree(p);
-                       return 0;
+                       return -1;
                }
        }
        p->conn_flags    = cpu_to_be32(cf);
@@ -1888,12 +2025,36 @@ int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
        return _drbd_send_uuids(mdev, 8);
 }
 
+void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
+{
+       if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
+               u64 *uuid = mdev->ldev->md.uuid;
+               dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
+                    text,
+                    (unsigned long long)uuid[UI_CURRENT],
+                    (unsigned long long)uuid[UI_BITMAP],
+                    (unsigned long long)uuid[UI_HISTORY_START],
+                    (unsigned long long)uuid[UI_HISTORY_END]);
+               put_ldev(mdev);
+       } else {
+               dev_info(DEV, "%s effective data uuid: %016llX\n",
+                               text,
+                               (unsigned long long)mdev->ed_uuid);
+       }
+}
 
-int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
+int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
 {
        struct p_rs_uuid p;
+       u64 uuid;
 
-       p.uuid = cpu_to_be64(val);
+       D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
+
+       uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+       drbd_uuid_set(mdev, UI_BITMAP, uuid);
+       drbd_print_uuids(mdev, "updated sync UUID");
+       drbd_md_sync(mdev);
+       p.uuid = cpu_to_be64(uuid);
 
        return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
                             (struct p_header80 *)&p, sizeof(p));
@@ -1921,7 +2082,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
        p.d_size = cpu_to_be64(d_size);
        p.u_size = cpu_to_be64(u_size);
        p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
-       p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
+       p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
        p.queue_order_type = cpu_to_be16(q_order_type);
        p.dds_flags = cpu_to_be16(flags);
 
@@ -1972,7 +2133,7 @@ int drbd_send_state_req(struct drbd_conf *mdev,
                             (struct p_header80 *)&p, sizeof(p));
 }
 
-int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
+int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
 {
        struct p_req_state_reply p;
 
@@ -2076,9 +2237,15 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
        return len;
 }
 
-enum { OK, FAILED, DONE }
+/**
+ * send_bitmap_rle_or_plain
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
-       struct p_header80 *h, struct bm_xfer_ctx *c)
+                        struct p_header80 *h, struct bm_xfer_ctx *c)
 {
        struct p_compressed_bm *p = (void*)h;
        unsigned long num_words;
@@ -2088,7 +2255,7 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
        len = fill_bitmap_rle_bits(mdev, p, c);
 
        if (len < 0)
-               return FAILED;
+               return -EIO;
 
        if (len) {
                DCBP_set_code(p, RLE_VLI_Bits);
@@ -2118,11 +2285,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
                if (c->bit_offset > c->bm_bits)
                        c->bit_offset = c->bm_bits;
        }
-       ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
-
-       if (ok == DONE)
-               INFO_bm_xfer_stats(mdev, "send", c);
-       return ok;
+       if (ok) {
+               if (len == 0) {
+                       INFO_bm_xfer_stats(mdev, "send", c);
+                       return 0;
+               } else
+                       return 1;
+       }
+       return -EIO;
 }
 
 /* See the comment at receive_bitmap() */
@@ -2130,16 +2300,16 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
 {
        struct bm_xfer_ctx c;
        struct p_header80 *p;
-       int ret;
+       int err;
 
-       ERR_IF(!mdev->bitmap) return FALSE;
+       ERR_IF(!mdev->bitmap) return false;
 
        /* maybe we should use some per thread scratch page,
         * and allocate that during initial device creation? */
        p = (struct p_header80 *) __get_free_page(GFP_NOIO);
        if (!p) {
                dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
-               return FALSE;
+               return false;
        }
 
        if (get_ldev(mdev)) {
@@ -2165,11 +2335,11 @@ int _drbd_send_bitmap(struct drbd_conf *mdev)
        };
 
        do {
-               ret = send_bitmap_rle_or_plain(mdev, p, &c);
-       } while (ret == OK);
+               err = send_bitmap_rle_or_plain(mdev, p, &c);
+       } while (err > 0);
 
        free_page((unsigned long) p);
-       return (ret == DONE);
+       return err == 0;
 }
 
 int drbd_send_bitmap(struct drbd_conf *mdev)
@@ -2192,7 +2362,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
        p.set_size = cpu_to_be32(set_size);
 
        if (mdev->state.conn < C_CONNECTED)
-               return FALSE;
+               return false;
        ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
                        (struct p_header80 *)&p, sizeof(p));
        return ok;
@@ -2220,7 +2390,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
        p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
 
        if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
-               return FALSE;
+               return false;
        ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
                                (struct p_header80 *)&p, sizeof(p));
        return ok;
@@ -2326,8 +2496,8 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
 }
 
 /* called on sndtimeo
- * returns FALSE if we should retry,
- * TRUE if we think connection is dead
+ * returns false if we should retry,
+ * true if we think connection is dead
  */
 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
 {
@@ -2340,7 +2510,7 @@ static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *
                || mdev->state.conn < C_CONNECTED;
 
        if (drop_it)
-               return TRUE;
+               return true;
 
        drop_it = !--mdev->ko_count;
        if (!drop_it) {
@@ -2531,13 +2701,39 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
        if (ok && dgs) {
                dgb = mdev->int_dig_out;
                drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
-               ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
+               ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
        }
        if (ok) {
-               if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
+               /* For protocol A, we have to memcpy the payload into
+                * socket buffers, as we may complete right away
+                * as soon as we handed it over to tcp, at which point the data
+                * pages may become invalid.
+                *
+                * For data-integrity enabled, we copy it as well, so we can be
+                * sure that even if the bio pages may still be modified, it
+                * won't change the data on the wire, thus if the digest checks
+                * out ok after sending on this side, but does not fit on the
+                * receiving side, we sure have detected corruption elsewhere.
+                */
+               if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
                        ok = _drbd_send_bio(mdev, req->master_bio);
                else
                        ok = _drbd_send_zc_bio(mdev, req->master_bio);
+
+               /* double check digest, sometimes buffers have been modified in flight. */
+               if (dgs > 0 && dgs <= 64) {
+                       /* 64 byte, 512 bit, is the larges digest size
+                        * currently supported in kernel crypto. */
+                       unsigned char digest[64];
+                       drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
+                       if (memcmp(mdev->int_dig_out, digest, dgs)) {
+                               dev_warn(DEV,
+                                       "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
+                                       (unsigned long long)req->sector, req->size);
+                       }
+               } /* else if (dgs > 64) {
+                    ... Be noisy about digest too large ...
+               } */
        }
 
        drbd_put_data_sock(mdev);
@@ -2587,7 +2783,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
        if (ok && dgs) {
                dgb = mdev->int_dig_out;
                drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
-               ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
+               ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
        }
        if (ok)
                ok = _drbd_send_zc_ee(mdev, e);
@@ -2597,6 +2793,16 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
        return ok;
 }
 
+int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
+{
+       struct p_block_desc p;
+
+       p.sector  = cpu_to_be64(req->sector);
+       p.blksize = cpu_to_be32(req->size);
+
+       return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
+}
+
 /*
   drbd_send distinguishes two cases:
 
@@ -2770,6 +2976,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
        atomic_set(&mdev->pp_in_use_by_net, 0);
        atomic_set(&mdev->rs_sect_in, 0);
        atomic_set(&mdev->rs_sect_ev, 0);
+       atomic_set(&mdev->ap_in_flight, 0);
 
        mutex_init(&mdev->md_io_mutex);
        mutex_init(&mdev->data.mutex);
@@ -2798,19 +3005,27 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
        INIT_LIST_HEAD(&mdev->unplug_work.list);
        INIT_LIST_HEAD(&mdev->go_diskless.list);
        INIT_LIST_HEAD(&mdev->md_sync_work.list);
+       INIT_LIST_HEAD(&mdev->start_resync_work.list);
        INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
 
-       mdev->resync_work.cb  = w_resync_inactive;
+       mdev->resync_work.cb  = w_resync_timer;
        mdev->unplug_work.cb  = w_send_write_hint;
        mdev->go_diskless.cb  = w_go_diskless;
        mdev->md_sync_work.cb = w_md_sync;
        mdev->bm_io_work.w.cb = w_bitmap_io;
+       mdev->start_resync_work.cb = w_start_resync;
        init_timer(&mdev->resync_timer);
        init_timer(&mdev->md_sync_timer);
+       init_timer(&mdev->start_resync_timer);
+       init_timer(&mdev->request_timer);
        mdev->resync_timer.function = resync_timer_fn;
        mdev->resync_timer.data = (unsigned long) mdev;
        mdev->md_sync_timer.function = md_sync_timer_fn;
        mdev->md_sync_timer.data = (unsigned long) mdev;
+       mdev->start_resync_timer.function = start_resync_timer_fn;
+       mdev->start_resync_timer.data = (unsigned long) mdev;
+       mdev->request_timer.function = request_timer_fn;
+       mdev->request_timer.data = (unsigned long) mdev;
 
        init_waitqueue_head(&mdev->misc_wait);
        init_waitqueue_head(&mdev->state_wait);
@@ -2881,6 +3096,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
        D_ASSERT(list_empty(&mdev->resync_work.list));
        D_ASSERT(list_empty(&mdev->unplug_work.list));
        D_ASSERT(list_empty(&mdev->go_diskless.list));
+
+       drbd_set_defaults(mdev);
 }
 
 
@@ -2923,7 +3140,7 @@ static void drbd_destroy_mempools(void)
 static int drbd_create_mempools(void)
 {
        struct page *page;
-       const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count;
+       const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
        int i;
 
        /* prepare our caches and mempools */
@@ -3087,11 +3304,20 @@ static void drbd_cleanup(void)
 
        unregister_reboot_notifier(&drbd_notifier);
 
+       /* first remove proc,
+        * drbdsetup uses it's presence to detect
+        * whether DRBD is loaded.
+        * If we would get stuck in proc removal,
+        * but have netlink already deregistered,
+        * some drbdsetup commands may wait forever
+        * for an answer.
+        */
+       if (drbd_proc)
+               remove_proc_entry("drbd", NULL);
+
        drbd_nl_cleanup();
 
        if (minor_table) {
-               if (drbd_proc)
-                       remove_proc_entry("drbd", NULL);
                i = minor_count;
                while (i--)
                        drbd_delete_device(i);
@@ -3119,7 +3345,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
        char reason = '-';
        int r = 0;
 
-       if (!__inc_ap_bio_cond(mdev)) {
+       if (!may_inc_ap_bio(mdev)) {
                /* DRBD has frozen IO */
                r = bdi_bits;
                reason = 'd';
@@ -3172,7 +3398,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
                goto out_no_disk;
        mdev->vdisk = disk;
 
-       set_disk_ro(disk, TRUE);
+       set_disk_ro(disk, true);
 
        disk->queue = q;
        disk->major = DRBD_MAJOR;
@@ -3188,8 +3414,8 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
        q->backing_dev_info.congested_fn = drbd_congested;
        q->backing_dev_info.congested_data = mdev;
 
-       blk_queue_make_request(q, drbd_make_request_26);
-       blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
+       blk_queue_make_request(q, drbd_make_request);
+       blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
        blk_queue_merge_bvec(q, drbd_merge_bvec);
        q->queue_lock = &mdev->req_lock;
@@ -3251,6 +3477,7 @@ void drbd_free_mdev(struct drbd_conf *mdev)
        put_disk(mdev->vdisk);
        blk_cleanup_queue(mdev->rq_queue);
        free_cpumask_var(mdev->cpu_mask);
+       drbd_free_tl_hash(mdev);
        kfree(mdev);
 }
 
@@ -3266,7 +3493,7 @@ int __init drbd_init(void)
                return -EINVAL;
        }
 
-       if (1 > minor_count || minor_count > 255) {
+       if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
                printk(KERN_ERR
                        "drbd: invalid minor_count (%d)\n", minor_count);
 #ifdef MODULE
@@ -3448,7 +3675,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
        if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
                /* this was a try anyways ... */
                dev_err(DEV, "meta data update failed!\n");
-               drbd_chk_io_error(mdev, 1, TRUE);
+               drbd_chk_io_error(mdev, 1, true);
        }
 
        /* Update mdev->ldev->md.la_size_sect,
@@ -3464,7 +3691,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
  * @mdev:      DRBD device.
  * @bdev:      Device from which the meta data should be read in.
  *
- * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
+ * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
  */
 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
@@ -3534,28 +3761,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
        return rv;
 }
 
-static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
-{
-       static char *uuid_str[UI_EXTENDED_SIZE] = {
-               [UI_CURRENT] = "CURRENT",
-               [UI_BITMAP] = "BITMAP",
-               [UI_HISTORY_START] = "HISTORY_START",
-               [UI_HISTORY_END] = "HISTORY_END",
-               [UI_SIZE] = "SIZE",
-               [UI_FLAGS] = "FLAGS",
-       };
-
-       if (index >= UI_EXTENDED_SIZE) {
-               dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
-               return;
-       }
-
-       dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
-                uuid_str[index],
-                (unsigned long long)mdev->ldev->md.uuid[index]);
-}
-
-
 /**
  * drbd_md_mark_dirty() - Mark meta data super block as dirty
  * @mdev:      DRBD device.
@@ -3585,10 +3790,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
 {
        int i;
 
-       for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
+       for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
                mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
-               debug_drbd_uuid(mdev, i+1);
-       }
 }
 
 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
@@ -3603,7 +3806,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
        }
 
        mdev->ldev->md.uuid[idx] = val;
-       debug_drbd_uuid(mdev, idx);
        drbd_md_mark_dirty(mdev);
 }
 
@@ -3613,7 +3815,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
        if (mdev->ldev->md.uuid[idx]) {
                drbd_uuid_move_history(mdev);
                mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
-               debug_drbd_uuid(mdev, UI_HISTORY_START);
        }
        _drbd_uuid_set(mdev, idx, val);
 }
@@ -3628,14 +3829,16 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
 {
        u64 val;
+       unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+
+       if (bm_uuid)
+               dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
 
-       dev_info(DEV, "Creating new current UUID\n");
-       D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
        mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
-       debug_drbd_uuid(mdev, UI_BITMAP);
 
        get_random_bytes(&val, sizeof(u64));
        _drbd_uuid_set(mdev, UI_CURRENT, val);
+       drbd_print_uuids(mdev, "new current UUID");
        /* get it to stable storage _now_ */
        drbd_md_sync(mdev);
 }
@@ -3649,16 +3852,12 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
                drbd_uuid_move_history(mdev);
                mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
                mdev->ldev->md.uuid[UI_BITMAP] = 0;
-               debug_drbd_uuid(mdev, UI_HISTORY_START);
-               debug_drbd_uuid(mdev, UI_BITMAP);
        } else {
-               if (mdev->ldev->md.uuid[UI_BITMAP])
-                       dev_warn(DEV, "bm UUID already set");
-
-               mdev->ldev->md.uuid[UI_BITMAP] = val;
-               mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
+               unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+               if (bm_uuid)
+                       dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
 
-               debug_drbd_uuid(mdev, UI_BITMAP);
+               mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
        }
        drbd_md_mark_dirty(mdev);
 }
@@ -3714,15 +3913,19 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
        struct bm_io_work *work = container_of(w, struct bm_io_work, w);
-       int rv;
+       int rv = -EIO;
 
        D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
 
-       drbd_bm_lock(mdev, work->why);
-       rv = work->io_fn(mdev);
-       drbd_bm_unlock(mdev);
+       if (get_ldev(mdev)) {
+               drbd_bm_lock(mdev, work->why, work->flags);
+               rv = work->io_fn(mdev);
+               drbd_bm_unlock(mdev);
+               put_ldev(mdev);
+       }
 
        clear_bit(BITMAP_IO, &mdev->flags);
+       smp_mb__after_clear_bit();
        wake_up(&mdev->misc_wait);
 
        if (work->done)
@@ -3730,6 +3933,7 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 
        clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
        work->why = NULL;
+       work->flags = 0;
 
        return 1;
 }
@@ -3784,7 +3988,7 @@ void drbd_go_diskless(struct drbd_conf *mdev)
 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
                          int (*io_fn)(struct drbd_conf *),
                          void (*done)(struct drbd_conf *, int),
-                         char *why)
+                         char *why, enum bm_flag flags)
 {
        D_ASSERT(current == mdev->worker.task);
 
@@ -3798,15 +4002,15 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
        mdev->bm_io_work.io_fn = io_fn;
        mdev->bm_io_work.done = done;
        mdev->bm_io_work.why = why;
+       mdev->bm_io_work.flags = flags;
 
+       spin_lock_irq(&mdev->req_lock);
        set_bit(BITMAP_IO, &mdev->flags);
        if (atomic_read(&mdev->ap_bio_cnt) == 0) {
-               if (list_empty(&mdev->bm_io_work.w.list)) {
-                       set_bit(BITMAP_IO_QUEUED, &mdev->flags);
+               if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
                        drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
-               } else
-                       dev_err(DEV, "FIXME avoided double queuing bm_io_work\n");
        }
+       spin_unlock_irq(&mdev->req_lock);
 }
 
 /**
@@ -3818,19 +4022,22 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
  * freezes application IO while that the actual IO operations runs. This
  * functions MAY NOT be called from worker context.
  */
-int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
+int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
+               char *why, enum bm_flag flags)
 {
        int rv;
 
        D_ASSERT(current != mdev->worker.task);
 
-       drbd_suspend_io(mdev);
+       if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+               drbd_suspend_io(mdev);
 
-       drbd_bm_lock(mdev, why);
+       drbd_bm_lock(mdev, why, flags);
        rv = io_fn(mdev);
        drbd_bm_unlock(mdev);
 
-       drbd_resume_io(mdev);
+       if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+               drbd_resume_io(mdev);
 
        return rv;
 }
index fe81c851ca8800fdce14bccc3c7be032b7263669..03b29f78a37d78ac8955db9ae43ca8609b963d91 100644 (file)
@@ -288,10 +288,11 @@ void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
                dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
 }
 
-int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
+enum drbd_state_rv
+drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 {
        const int max_tries = 4;
-       int r = 0;
+       enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
        int try = 0;
        int forced = 0;
        union drbd_state mask, val;
@@ -306,17 +307,17 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        val.i  = 0; val.role  = new_role;
 
        while (try++ < max_tries) {
-               r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
+               rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
 
                /* in case we first succeeded to outdate,
                 * but now suddenly could establish a connection */
-               if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
+               if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
                        val.pdsk = 0;
                        mask.pdsk = 0;
                        continue;
                }
 
-               if (r == SS_NO_UP_TO_DATE_DISK && force &&
+               if (rv == SS_NO_UP_TO_DATE_DISK && force &&
                    (mdev->state.disk < D_UP_TO_DATE &&
                     mdev->state.disk >= D_INCONSISTENT)) {
                        mask.disk = D_MASK;
@@ -325,7 +326,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        continue;
                }
 
-               if (r == SS_NO_UP_TO_DATE_DISK &&
+               if (rv == SS_NO_UP_TO_DATE_DISK &&
                    mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
                        D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
                        nps = drbd_try_outdate_peer(mdev);
@@ -341,9 +342,9 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        continue;
                }
 
-               if (r == SS_NOTHING_TO_DO)
+               if (rv == SS_NOTHING_TO_DO)
                        goto fail;
-               if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
+               if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
                        nps = drbd_try_outdate_peer(mdev);
 
                        if (force && nps > D_OUTDATED) {
@@ -356,25 +357,24 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 
                        continue;
                }
-               if (r == SS_TWO_PRIMARIES) {
+               if (rv == SS_TWO_PRIMARIES) {
                        /* Maybe the peer is detected as dead very soon...
                           retry at most once more in this case. */
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
+                       schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
                        if (try < max_tries)
                                try = max_tries - 1;
                        continue;
                }
-               if (r < SS_SUCCESS) {
-                       r = _drbd_request_state(mdev, mask, val,
+               if (rv < SS_SUCCESS) {
+                       rv = _drbd_request_state(mdev, mask, val,
                                                CS_VERBOSE + CS_WAIT_COMPLETE);
-                       if (r < SS_SUCCESS)
+                       if (rv < SS_SUCCESS)
                                goto fail;
                }
                break;
        }
 
-       if (r < SS_SUCCESS)
+       if (rv < SS_SUCCESS)
                goto fail;
 
        if (forced)
@@ -384,7 +384,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
 
        if (new_role == R_SECONDARY) {
-               set_disk_ro(mdev->vdisk, TRUE);
+               set_disk_ro(mdev->vdisk, true);
                if (get_ldev(mdev)) {
                        mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
                        put_ldev(mdev);
@@ -394,7 +394,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        mdev->net_conf->want_lose = 0;
                        put_net_conf(mdev);
                }
-               set_disk_ro(mdev->vdisk, FALSE);
+               set_disk_ro(mdev->vdisk, false);
                if (get_ldev(mdev)) {
                        if (((mdev->state.conn < C_CONNECTED ||
                               mdev->state.pdsk <= D_FAILED)
@@ -406,10 +406,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                }
        }
 
-       if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
-               drbd_al_to_on_disk_bm(mdev);
-               put_ldev(mdev);
-       }
+       /* writeout of activity log covered areas of the bitmap
+        * to stable storage done in after state change already */
 
        if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
                /* if this was forced, we should consider sync */
@@ -423,7 +421,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  fail:
        mutex_unlock(&mdev->state_mutex);
-       return r;
+       return rv;
 }
 
 static struct drbd_conf *ensure_mdev(int minor, int create)
@@ -528,17 +526,19 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
        }
 }
 
+/* input size is expected to be in KB */
 char *ppsize(char *buf, unsigned long long size)
 {
-       /* Needs 9 bytes at max. */
+       /* Needs 9 bytes at max including trailing NUL:
+        * -1ULL ==> "16384 EB" */
        static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
        int base = 0;
-       while (size >= 10000) {
+       while (size >= 10000 && base < sizeof(units)-1) {
                /* shift + round */
                size = (size >> 10) + !!(size & (1<<9));
                base++;
        }
-       sprintf(buf, "%lu %cB", (long)size, units[base]);
+       sprintf(buf, "%u %cB", (unsigned)size, units[base]);
 
        return buf;
 }
@@ -642,11 +642,19 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_
                || prev_size       != mdev->ldev->md.md_size_sect;
 
        if (la_size_changed || md_moved) {
+               int err;
+
                drbd_al_shrink(mdev); /* All extents inactive. */
                dev_info(DEV, "Writing the whole bitmap, %s\n",
                         la_size_changed && md_moved ? "size changed and md moved" :
                         la_size_changed ? "size changed" : "md moved");
-               rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
+               /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
+               err = drbd_bitmap_io(mdev, &drbd_bm_write,
+                               "size changed", BM_LOCKED_MASK);
+               if (err) {
+                       rv = dev_size_error;
+                       goto out;
+               }
                drbd_md_mark_dirty(mdev);
        }
 
@@ -765,22 +773,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
        return 0;
 }
 
-void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
+void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local)
 {
        struct request_queue * const q = mdev->rq_queue;
        struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
        int max_segments = mdev->ldev->dc.max_bio_bvecs;
+       int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
 
-       max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
-
-       blk_queue_max_hw_sectors(q, max_seg_s >> 9);
-       blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
-       blk_queue_max_segment_size(q, max_seg_s);
        blk_queue_logical_block_size(q, 512);
-       blk_queue_segment_boundary(q, PAGE_SIZE-1);
-       blk_stack_limits(&q->limits, &b->limits, 0);
+       blk_queue_max_hw_sectors(q, max_hw_sectors);
+       /* This is the workaround for "bio would need to, but cannot, be split" */
+       blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
+       blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+       blk_queue_stack_limits(q, b);
 
-       dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
+       dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9);
 
        if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
                dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
@@ -850,7 +857,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
 static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                             struct drbd_nl_cfg_reply *reply)
 {
-       enum drbd_ret_codes retcode;
+       enum drbd_ret_code retcode;
        enum determine_dev_size dd;
        sector_t max_possible_sectors;
        sector_t min_md_device_sectors;
@@ -858,8 +865,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        struct block_device *bdev;
        struct lru_cache *resync_lru = NULL;
        union drbd_state ns, os;
-       unsigned int max_seg_s;
-       int rv;
+       unsigned int max_bio_size;
+       enum drbd_state_rv rv;
        int cp_discovered = 0;
        int logical_block_size;
 
@@ -1005,9 +1012,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        /* and for any other previously queued work */
        drbd_flush_workqueue(mdev);
 
-       retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+       rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+       retcode = rv;  /* FIXME: Type mismatch. */
        drbd_resume_io(mdev);
-       if (retcode < SS_SUCCESS)
+       if (rv < SS_SUCCESS)
                goto fail;
 
        if (!get_ldev_if_state(mdev, D_ATTACHING))
@@ -1109,20 +1117,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        mdev->read_cnt = 0;
        mdev->writ_cnt = 0;
 
-       max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+       max_bio_size = DRBD_MAX_BIO_SIZE;
        if (mdev->state.conn == C_CONNECTED) {
                /* We are Primary, Connected, and now attach a new local
                 * backing store. We must not increase the user visible maximum
                 * bio size on this device to something the peer may not be
                 * able to handle. */
                if (mdev->agreed_pro_version < 94)
-                       max_seg_s = queue_max_segment_size(mdev->rq_queue);
+                       max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
                else if (mdev->agreed_pro_version == 94)
-                       max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
+                       max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
                /* else: drbd 8.3.9 and later, stay with default */
        }
 
-       drbd_setup_queue_param(mdev, max_seg_s);
+       drbd_setup_queue_param(mdev, max_bio_size);
 
        /* If I am currently not R_PRIMARY,
         * but meta data primary indicator is set,
@@ -1154,12 +1162,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
                dev_info(DEV, "Assuming that all blocks are out of sync "
                     "(aka FullSync)\n");
-               if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
+               if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+                       "set_n_write from attaching", BM_LOCKED_MASK)) {
                        retcode = ERR_IO_MD_DISK;
                        goto force_diskless_dec;
                }
        } else {
-               if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
+               if (drbd_bitmap_io(mdev, &drbd_bm_read,
+                       "read from attaching", BM_LOCKED_MASK) < 0) {
                        retcode = ERR_IO_MD_DISK;
                        goto force_diskless_dec;
                }
@@ -1167,7 +1177,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 
        if (cp_discovered) {
                drbd_al_apply_to_bm(mdev);
-               drbd_al_to_on_disk_bm(mdev);
+               if (drbd_bitmap_io(mdev, &drbd_bm_write,
+                       "crashed primary apply AL", BM_LOCKED_MASK)) {
+                       retcode = ERR_IO_MD_DISK;
+                       goto force_diskless_dec;
+               }
        }
 
        if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
@@ -1279,7 +1293,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                            struct drbd_nl_cfg_reply *reply)
 {
        int i, ns;
-       enum drbd_ret_codes retcode;
+       enum drbd_ret_code retcode;
        struct net_conf *new_conf = NULL;
        struct crypto_hash *tfm = NULL;
        struct crypto_hash *integrity_w_tfm = NULL;
@@ -1324,6 +1338,8 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
        new_conf->wire_protocol    = DRBD_PROT_C;
        new_conf->ping_timeo       = DRBD_PING_TIMEO_DEF;
        new_conf->rr_conflict      = DRBD_RR_CONFLICT_DEF;
+       new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
+       new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
 
        if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
                retcode = ERR_MANDATORY_TAG;
@@ -1345,6 +1361,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                }
        }
 
+       if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
+               retcode = ERR_CONG_NOT_PROTO_A;
+               goto fail;
+       }
+
        if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
                retcode = ERR_DISCARD;
                goto fail;
@@ -1525,6 +1546,21 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
                              struct drbd_nl_cfg_reply *reply)
 {
        int retcode;
+       struct disconnect dc;
+
+       memset(&dc, 0, sizeof(struct disconnect));
+       if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
+               retcode = ERR_MANDATORY_TAG;
+               goto fail;
+       }
+
+       if (dc.force) {
+               spin_lock_irq(&mdev->req_lock);
+               if (mdev->state.conn >= C_WF_CONNECTION)
+                       _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
+               spin_unlock_irq(&mdev->req_lock);
+               goto done;
+       }
 
        retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
 
@@ -1842,6 +1878,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 {
        int retcode;
 
+       /* If there is still bitmap IO pending, probably because of a previous
+        * resync just being finished, wait for it before requesting a new resync. */
+       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
 
        if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
@@ -1877,6 +1917,10 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
 {
        int retcode;
 
+       /* If there is still bitmap IO pending, probably because of a previous
+        * resync just being finished, wait for it before requesting a new resync. */
+       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
 
        if (retcode < SS_SUCCESS) {
@@ -1885,9 +1929,9 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
                           into a full resync. */
                        retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
                        if (retcode >= SS_SUCCESS) {
-                               /* open coded drbd_bitmap_io() */
                                if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
-                                                  "set_n_write from invalidate_peer"))
+                                       "set_n_write from invalidate_peer",
+                                       BM_LOCKED_SET_ALLOWED))
                                        retcode = ERR_IO_MD_DISK;
                        }
                } else
@@ -1914,9 +1958,17 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
                               struct drbd_nl_cfg_reply *reply)
 {
        int retcode = NO_ERROR;
+       union drbd_state s;
 
-       if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
-               retcode = ERR_PAUSE_IS_CLEAR;
+       if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+               s = mdev->state;
+               if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
+                       retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
+                                 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
+               } else {
+                       retcode = ERR_PAUSE_IS_CLEAR;
+               }
+       }
 
        reply->ret_code = retcode;
        return 0;
@@ -2054,6 +2106,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
                reply->ret_code = ERR_MANDATORY_TAG;
                return 0;
        }
+
+       /* If there is still bitmap IO pending, e.g. previous resync or verify
+        * just being finished, wait for it before requesting a new resync. */
+       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
        /* w_make_ov_request expects position to be aligned */
        mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
        reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
@@ -2097,7 +2154,8 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
        drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
 
        if (args.clear_bm) {
-               err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
+               err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+                       "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
                if (err) {
                        dev_err(DEV, "Writing bitmap failed with %d\n",err);
                        retcode = ERR_IO_MD_DISK;
@@ -2105,6 +2163,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
                if (skip_initial_sync) {
                        drbd_send_uuids_skip_initial_sync(mdev);
                        _drbd_uuid_set(mdev, UI_BITMAP, 0);
+                       drbd_print_uuids(mdev, "cleared bitmap UUID");
                        spin_lock_irq(&mdev->req_lock);
                        _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
                                        CS_VERBOSE, NULL);
@@ -2189,7 +2248,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
                goto fail;
        }
 
-       if (nlp->packet_type >= P_nl_after_last_packet) {
+       if (nlp->packet_type >= P_nl_after_last_packet ||
+           nlp->packet_type == P_return_code_only) {
                retcode = ERR_PACKET_NR;
                goto fail;
        }
@@ -2205,7 +2265,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
        reply_size += cm->reply_body_size;
 
        /* allocation not in the IO path, cqueue thread context */
-       cn_reply = kmalloc(reply_size, GFP_KERNEL);
+       cn_reply = kzalloc(reply_size, GFP_KERNEL);
        if (!cn_reply) {
                retcode = ERR_NOMEM;
                goto fail;
@@ -2213,7 +2273,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
        reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
 
        reply->packet_type =
-               cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
+               cm->reply_body_size ? nlp->packet_type : P_return_code_only;
        reply->minor = nlp->drbd_minor;
        reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
        /* reply->tag_list; might be modified by cm->function. */
@@ -2376,7 +2436,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
        /* receiver thread context, which is not in the writeout path (of this node),
         * but may be in the writeout path of the _other_ node.
         * GFP_NOIO to avoid potential "distributed deadlock". */
-       cn_reply = kmalloc(
+       cn_reply = kzalloc(
                sizeof(struct cn_msg)+
                sizeof(struct drbd_nl_cfg_reply)+
                sizeof(struct dump_ee_tag_len_struct)+
@@ -2398,10 +2458,11 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
        tl = tl_add_int(tl, T_ee_sector, &e->sector);
        tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
 
+       /* dump the first 32k */
+       len = min_t(unsigned, e->size, 32 << 10);
        put_unaligned(T_ee_data, tl++);
-       put_unaligned(e->size, tl++);
+       put_unaligned(len, tl++);
 
-       len = e->size;
        page = e->pages;
        page_chain_for_each(page) {
                void *d = kmap_atomic(page, KM_USER0);
@@ -2410,6 +2471,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
                kunmap_atomic(d, KM_USER0);
                tl = (unsigned short*)((char*)tl + l);
                len -= l;
+               if (len == 0)
+                       break;
        }
        put_unaligned(TT_END, tl++); /* Close the tag list */
 
@@ -2508,6 +2571,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
                (struct drbd_nl_cfg_reply *)cn_reply->data;
        int rr;
 
+       memset(buffer, 0, sizeof(buffer));
        cn_reply->id = req->id;
 
        cn_reply->seq = req->seq;
@@ -2515,6 +2579,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
        cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
        cn_reply->flags = 0;
 
+       reply->packet_type = P_return_code_only;
        reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
        reply->ret_code = ret_code;
 
index 7e6ac307e2dec3ba1c9de2f825d0b1c9cbbbf9af..2959cdfb77f556e0bed2a8131eb69c69cbfed84f 100644 (file)
@@ -34,6 +34,7 @@
 #include "drbd_int.h"
 
 static int drbd_proc_open(struct inode *inode, struct file *file);
+static int drbd_proc_release(struct inode *inode, struct file *file);
 
 
 struct proc_dir_entry *drbd_proc;
@@ -42,9 +43,22 @@ const struct file_operations drbd_proc_fops = {
        .open           = drbd_proc_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release,
+       .release        = drbd_proc_release,
 };
 
+void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
+{
+       /* v is in kB/sec. We don't expect TiByte/sec yet. */
+       if (unlikely(v >= 1000000)) {
+               /* cool: > GiByte/s */
+               seq_printf(seq, "%ld,", v / 1000000);
+               v /= 1000000;
+               seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
+       } else if (likely(v >= 1000))
+               seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
+       else
+               seq_printf(seq, "%ld", v);
+}
 
 /*lge
  * progress bars shamelessly adapted from driver/md/md.c
@@ -71,10 +85,15 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
                seq_printf(seq, ".");
        seq_printf(seq, "] ");
 
-       seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
-       /* if more than 1 GB display in MB */
-       if (mdev->rs_total > 0x100000L)
-               seq_printf(seq, "(%lu/%lu)M\n\t",
+       if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+               seq_printf(seq, "verified:");
+       else
+               seq_printf(seq, "sync'ed:");
+       seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
+
+       /* if more than a few GB, display in MB */
+       if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
+               seq_printf(seq, "(%lu/%lu)M",
                            (unsigned long) Bit2KB(rs_left >> 10),
                            (unsigned long) Bit2KB(mdev->rs_total >> 10));
        else
@@ -94,6 +113,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
        /* Rolling marks. last_mark+1 may just now be modified.  last_mark+2 is
         * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
         * least DRBD_SYNC_MARK_STEP time before it will be modified. */
+       /* ------------------------ ~18s average ------------------------ */
        i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
        dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
        if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
@@ -107,14 +127,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
        seq_printf(seq, "finish: %lu:%02lu:%02lu",
                rt / 3600, (rt % 3600) / 60, rt % 60);
 
-       /* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */
        dbdt = Bit2KB(db/dt);
-       if (dbdt > 1000)
-               seq_printf(seq, " speed: %ld,%03ld",
-                       dbdt/1000, dbdt % 1000);
-       else
-               seq_printf(seq, " speed: %ld", dbdt);
+       seq_printf(seq, " speed: ");
+       seq_printf_with_thousands_grouping(seq, dbdt);
+       seq_printf(seq, " (");
+       /* ------------------------- ~3s average ------------------------ */
+       if (proc_details >= 1) {
+               /* this is what drbd_rs_should_slow_down() uses */
+               i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+               dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
+               if (!dt)
+                       dt++;
+               db = mdev->rs_mark_left[i] - rs_left;
+               dbdt = Bit2KB(db/dt);
+               seq_printf_with_thousands_grouping(seq, dbdt);
+               seq_printf(seq, " -- ");
+       }
 
+       /* --------------------- long term average ---------------------- */
        /* mean speed since syncer started
         * we do account for PausedSync periods */
        dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
@@ -122,20 +152,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
                dt = 1;
        db = mdev->rs_total - rs_left;
        dbdt = Bit2KB(db/dt);
-       if (dbdt > 1000)
-               seq_printf(seq, " (%ld,%03ld)",
-                       dbdt/1000, dbdt % 1000);
-       else
-               seq_printf(seq, " (%ld)", dbdt);
+       seq_printf_with_thousands_grouping(seq, dbdt);
+       seq_printf(seq, ")");
 
-       if (mdev->state.conn == C_SYNC_TARGET) {
-               if (mdev->c_sync_rate > 1000)
-                       seq_printf(seq, " want: %d,%03d",
-                                  mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
-               else
-                       seq_printf(seq, " want: %d", mdev->c_sync_rate);
+       if (mdev->state.conn == C_SYNC_TARGET ||
+           mdev->state.conn == C_VERIFY_S) {
+               seq_printf(seq, " want: ");
+               seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
        }
        seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
+
+       if (proc_details >= 1) {
+               /* 64 bit:
+                * we convert to sectors in the display below. */
+               unsigned long bm_bits = drbd_bm_bits(mdev);
+               unsigned long bit_pos;
+               if (mdev->state.conn == C_VERIFY_S ||
+                   mdev->state.conn == C_VERIFY_T)
+                       bit_pos = bm_bits - mdev->ov_left;
+               else
+                       bit_pos = mdev->bm_resync_fo;
+               /* Total sectors may be slightly off for oddly
+                * sized devices. So what. */
+               seq_printf(seq,
+                       "\t%3d%% sector pos: %llu/%llu\n",
+                       (int)(bit_pos / (bm_bits/100+1)),
+                       (unsigned long long)bit_pos * BM_SECT_PER_BIT,
+                       (unsigned long long)bm_bits * BM_SECT_PER_BIT);
+       }
 }
 
 static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
@@ -232,20 +276,16 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
                           mdev->epochs,
                           write_ordering_chars[mdev->write_ordering]
                        );
-                       seq_printf(seq, " oos:%lu\n",
-                                  Bit2KB(drbd_bm_total_weight(mdev)));
+                       seq_printf(seq, " oos:%llu\n",
+                                  Bit2KB((unsigned long long)
+                                          drbd_bm_total_weight(mdev)));
                }
                if (mdev->state.conn == C_SYNC_SOURCE ||
-                   mdev->state.conn == C_SYNC_TARGET)
+                   mdev->state.conn == C_SYNC_TARGET ||
+                   mdev->state.conn == C_VERIFY_S ||
+                   mdev->state.conn == C_VERIFY_T)
                        drbd_syncer_progress(mdev, seq);
 
-               if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
-                       seq_printf(seq, "\t%3d%%      %lu/%lu\n",
-                                  (int)((mdev->rs_total-mdev->ov_left) /
-                                        (mdev->rs_total/100+1)),
-                                  mdev->rs_total - mdev->ov_left,
-                                  mdev->rs_total);
-
                if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
                        lc_seq_printf_stats(seq, mdev->resync);
                        lc_seq_printf_stats(seq, mdev->act_log);
@@ -265,7 +305,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
 
 static int drbd_proc_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, drbd_seq_show, PDE(inode)->data);
+       if (try_module_get(THIS_MODULE))
+               return single_open(file, drbd_seq_show, PDE(inode)->data);
+       return -ENODEV;
+}
+
+static int drbd_proc_release(struct inode *inode, struct file *file)
+{
+       module_put(THIS_MODULE);
+       return single_release(inode, file);
 }
 
 /* PROC FS stuff end */
index 8e68be939debefb6e5960ed38c2dd42d67c3a18d..fe1564c7d8b6e835643939d18f3aba2bc9e6b60d 100644 (file)
@@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
        atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
        int i;
 
-       if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
+       if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
                i = page_chain_free(page);
        else {
                struct page *tmp;
@@ -319,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
        struct page *page;
        unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
 
-       if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
+       if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
                return NULL;
 
        e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
@@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
        char tb[4];
 
        if (!*sock)
-               return FALSE;
+               return false;
 
        rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
 
        if (rr > 0 || rr == -EAGAIN) {
-               return TRUE;
+               return true;
        } else {
                sock_release(*sock);
                *sock = NULL;
-               return FALSE;
+               return false;
        }
 }
 
@@ -768,8 +768,7 @@ static int drbd_connect(struct drbd_conf *mdev)
                        if (s || ++try >= 3)
                                break;
                        /* give the other side time to call bind() & listen() */
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(HZ / 10);
+                       schedule_timeout_interruptible(HZ / 10);
                }
 
                if (s) {
@@ -788,8 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev)
                }
 
                if (sock && msock) {
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(HZ / 10);
+                       schedule_timeout_interruptible(HZ / 10);
                        ok = drbd_socket_okay(mdev, &sock);
                        ok = drbd_socket_okay(mdev, &msock) && ok;
                        if (ok)
@@ -906,7 +904,7 @@ retry:
                put_ldev(mdev);
        }
 
-       if (!drbd_send_protocol(mdev))
+       if (drbd_send_protocol(mdev) == -1)
                return -1;
        drbd_send_sync_param(mdev, &mdev->sync_conf);
        drbd_send_sizes(mdev, 0, 0);
@@ -914,6 +912,7 @@ retry:
        drbd_send_state(mdev);
        clear_bit(USE_DEGR_WFC_T, &mdev->flags);
        clear_bit(RESIZE_PENDING, &mdev->flags);
+       mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
 
        return 1;
 
@@ -932,8 +931,9 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
 
        r = drbd_recv(mdev, h, sizeof(*h));
        if (unlikely(r != sizeof(*h))) {
-               dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
-               return FALSE;
+               if (!signal_pending(current))
+                       dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
+               return false;
        }
 
        if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
@@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
                    be32_to_cpu(h->h80.magic),
                    be16_to_cpu(h->h80.command),
                    be16_to_cpu(h->h80.length));
-               return FALSE;
+               return false;
        }
        mdev->last_received = jiffies;
 
-       return TRUE;
+       return true;
 }
 
 static void drbd_flush(struct drbd_conf *mdev)
@@ -1074,6 +1074,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
  * @mdev:      DRBD device.
  * @e:         epoch entry
  * @rw:                flag field, see bio->bi_rw
+ *
+ * May spread the pages to multiple bios,
+ * depending on bio_add_page restrictions.
+ *
+ * Returns 0 if all bios have been submitted,
+ * -ENOMEM if we could not allocate enough bios,
+ * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
+ *  single page to an empty bio (which should never happen and likely indicates
+ *  that the lower level IO stack is in some way broken). This has been observed
+ *  on certain Xen deployments.
  */
 /* TODO allocate from our own bio_set. */
 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
@@ -1086,6 +1096,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
        unsigned ds = e->size;
        unsigned n_bios = 0;
        unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
+       int err = -ENOMEM;
 
        /* In most cases, we will only need one bio.  But in case the lower
         * level restrictions happen to be different at this offset on this
@@ -1111,8 +1122,17 @@ next_bio:
        page_chain_for_each(page) {
                unsigned len = min_t(unsigned, ds, PAGE_SIZE);
                if (!bio_add_page(bio, page, len, 0)) {
-                       /* a single page must always be possible! */
-                       BUG_ON(bio->bi_vcnt == 0);
+                       /* A single page must always be possible!
+                        * But in case it fails anyways,
+                        * we deal with it, and complain (below). */
+                       if (bio->bi_vcnt == 0) {
+                               dev_err(DEV,
+                                       "bio_add_page failed for len=%u, "
+                                       "bi_vcnt=0 (bi_sector=%llu)\n",
+                                       len, (unsigned long long)bio->bi_sector);
+                               err = -ENOSPC;
+                               goto fail;
+                       }
                        goto next_bio;
                }
                ds -= len;
@@ -1138,7 +1158,7 @@ fail:
                bios = bios->bi_next;
                bio_put(bio);
        }
-       return -ENOMEM;
+       return err;
 }
 
 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -1160,7 +1180,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
        switch (mdev->write_ordering) {
        case WO_none:
                if (rv == FE_RECYCLED)
-                       return TRUE;
+                       return true;
 
                /* receiver context, in the writeout path of the other node.
                 * avoid potential distributed deadlock */
@@ -1188,10 +1208,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
                D_ASSERT(atomic_read(&epoch->active) == 0);
                D_ASSERT(epoch->flags == 0);
 
-               return TRUE;
+               return true;
        default:
                dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
-               return FALSE;
+               return false;
        }
 
        epoch->flags = 0;
@@ -1209,7 +1229,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
        }
        spin_unlock(&mdev->epoch_lock);
 
-       return TRUE;
+       return true;
 }
 
 /* used from receive_RSDataReply (recv_resync_read)
@@ -1231,21 +1251,25 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
        if (dgs) {
                rr = drbd_recv(mdev, dig_in, dgs);
                if (rr != dgs) {
-                       dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
-                            rr, dgs);
+                       if (!signal_pending(current))
+                               dev_warn(DEV,
+                                       "short read receiving data digest: read %d expected %d\n",
+                                       rr, dgs);
                        return NULL;
                }
        }
 
        data_size -= dgs;
 
+       ERR_IF(data_size == 0) return NULL;
        ERR_IF(data_size &  0x1ff) return NULL;
-       ERR_IF(data_size >  DRBD_MAX_SEGMENT_SIZE) return NULL;
+       ERR_IF(data_size >  DRBD_MAX_BIO_SIZE) return NULL;
 
        /* even though we trust out peer,
         * we sometimes have to double check. */
        if (sector + (data_size>>9) > capacity) {
-               dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
+               dev_err(DEV, "request from peer beyond end of local disk: "
+                       "capacity: %llus < sector: %llus + size: %u\n",
                        (unsigned long long)capacity,
                        (unsigned long long)sector, data_size);
                return NULL;
@@ -1264,15 +1288,16 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
                unsigned len = min_t(int, ds, PAGE_SIZE);
                data = kmap(page);
                rr = drbd_recv(mdev, data, len);
-               if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
+               if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
                        dev_err(DEV, "Fault injection: Corrupting data on receive\n");
                        data[0] = data[0] ^ (unsigned long)-1;
                }
                kunmap(page);
                if (rr != len) {
                        drbd_free_ee(mdev, e);
-                       dev_warn(DEV, "short read receiving data: read %d expected %d\n",
-                            rr, len);
+                       if (!signal_pending(current))
+                               dev_warn(DEV, "short read receiving data: read %d expected %d\n",
+                               rr, len);
                        return NULL;
                }
                ds -= rr;
@@ -1281,7 +1306,8 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
        if (dgs) {
                drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
                if (memcmp(dig_in, dig_vv, dgs)) {
-                       dev_err(DEV, "Digest integrity check FAILED.\n");
+                       dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
+                               (unsigned long long)sector, data_size);
                        drbd_bcast_ee(mdev, "digest failed",
                                        dgs, dig_in, dig_vv, e);
                        drbd_free_ee(mdev, e);
@@ -1302,7 +1328,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
        void *data;
 
        if (!data_size)
-               return TRUE;
+               return true;
 
        page = drbd_pp_alloc(mdev, 1, 1);
 
@@ -1311,8 +1337,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
                rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
                if (rr != min_t(int, data_size, PAGE_SIZE)) {
                        rv = 0;
-                       dev_warn(DEV, "short read receiving data: read %d expected %d\n",
-                            rr, min_t(int, data_size, PAGE_SIZE));
+                       if (!signal_pending(current))
+                               dev_warn(DEV,
+                                       "short read receiving data: read %d expected %d\n",
+                                       rr, min_t(int, data_size, PAGE_SIZE));
                        break;
                }
                data_size -= rr;
@@ -1337,8 +1365,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        if (dgs) {
                rr = drbd_recv(mdev, dig_in, dgs);
                if (rr != dgs) {
-                       dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
-                            rr, dgs);
+                       if (!signal_pending(current))
+                               dev_warn(DEV,
+                                       "short read receiving data reply digest: read %d expected %d\n",
+                                       rr, dgs);
                        return 0;
                }
        }
@@ -1359,9 +1389,10 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
                             expect);
                kunmap(bvec->bv_page);
                if (rr != expect) {
-                       dev_warn(DEV, "short read receiving data reply: "
-                            "read %d expected %d\n",
-                            rr, expect);
+                       if (!signal_pending(current))
+                               dev_warn(DEV, "short read receiving data reply: "
+                                       "read %d expected %d\n",
+                                       rr, expect);
                        return 0;
                }
                data_size -= rr;
@@ -1425,11 +1456,10 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
 
        atomic_add(data_size >> 9, &mdev->rs_sect_ev);
        if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
-               return TRUE;
+               return true;
 
-       /* drbd_submit_ee currently fails for one reason only:
-        * not being able to allocate enough bios.
-        * Is dropping the connection going to help? */
+       /* don't care for the reason here */
+       dev_err(DEV, "submit failed, triggering re-connect\n");
        spin_lock_irq(&mdev->req_lock);
        list_del(&e->w.list);
        spin_unlock_irq(&mdev->req_lock);
@@ -1437,7 +1467,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
        drbd_free_ee(mdev, e);
 fail:
        put_ldev(mdev);
-       return FALSE;
+       return false;
 }
 
 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -1454,7 +1484,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
        spin_unlock_irq(&mdev->req_lock);
        if (unlikely(!req)) {
                dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
-               return FALSE;
+               return false;
        }
 
        /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
@@ -1611,15 +1641,15 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
        return ret;
 }
 
-static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
+/* see also bio_flags_to_wire()
+ * DRBD_REQ_*, because we need to semantically map the flags to data packet
+ * flags and back. We may replicate to other kernel versions. */
+static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
 {
-       if (mdev->agreed_pro_version >= 95)
-               return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
-                       (dpf & DP_FUA ? REQ_FUA : 0) |
-                       (dpf & DP_FLUSH ? REQ_FUA : 0) |
-                       (dpf & DP_DISCARD ? REQ_DISCARD : 0);
-       else
-               return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
+       return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
+               (dpf & DP_FUA ? REQ_FUA : 0) |
+               (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
+               (dpf & DP_DISCARD ? REQ_DISCARD : 0);
 }
 
 /* mirrored write */
@@ -1632,9 +1662,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        u32 dp_flags;
 
        if (!get_ldev(mdev)) {
-               if (__ratelimit(&drbd_ratelimit_state))
-                       dev_err(DEV, "Can not write mirrored data block "
-                           "to local disk.\n");
                spin_lock(&mdev->peer_seq_lock);
                if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
                        mdev->peer_seq++;
@@ -1654,23 +1681,23 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        e = read_in_block(mdev, p->block_id, sector, data_size);
        if (!e) {
                put_ldev(mdev);
-               return FALSE;
+               return false;
        }
 
        e->w.cb = e_end_block;
 
+       dp_flags = be32_to_cpu(p->dp_flags);
+       rw |= wire_flags_to_bio(mdev, dp_flags);
+
+       if (dp_flags & DP_MAY_SET_IN_SYNC)
+               e->flags |= EE_MAY_SET_IN_SYNC;
+
        spin_lock(&mdev->epoch_lock);
        e->epoch = mdev->current_epoch;
        atomic_inc(&e->epoch->epoch_size);
        atomic_inc(&e->epoch->active);
        spin_unlock(&mdev->epoch_lock);
 
-       dp_flags = be32_to_cpu(p->dp_flags);
-       rw |= write_flags_to_bio(mdev, dp_flags);
-
-       if (dp_flags & DP_MAY_SET_IN_SYNC)
-               e->flags |= EE_MAY_SET_IN_SYNC;
-
        /* I'm the receiver, I do hold a net_cnt reference. */
        if (!mdev->net_conf->two_primaries) {
                spin_lock_irq(&mdev->req_lock);
@@ -1773,7 +1800,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                                put_ldev(mdev);
                                wake_asender(mdev);
                                finish_wait(&mdev->misc_wait, &wait);
-                               return TRUE;
+                               return true;
                        }
 
                        if (signal_pending(current)) {
@@ -1829,11 +1856,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        }
 
        if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
-               return TRUE;
+               return true;
 
-       /* drbd_submit_ee currently fails for one reason only:
-        * not being able to allocate enough bios.
-        * Is dropping the connection going to help? */
+       /* don't care for the reason here */
+       dev_err(DEV, "submit failed, triggering re-connect\n");
        spin_lock_irq(&mdev->req_lock);
        list_del(&e->w.list);
        hlist_del_init(&e->colision);
@@ -1842,12 +1868,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                drbd_al_complete_io(mdev, e->sector);
 
 out_interrupted:
-       /* yes, the epoch_size now is imbalanced.
-        * but we drop the connection anyways, so we don't have a chance to
-        * receive a barrier... atomic_inc(&mdev->epoch_size); */
+       drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
        put_ldev(mdev);
        drbd_free_ee(mdev, e);
-       return FALSE;
+       return false;
 }
 
 /* We may throttle resync, if the lower device seems to be busy,
@@ -1861,10 +1885,11 @@ out_interrupted:
  * The current sync rate used here uses only the most recent two step marks,
  * to have a short time average so we can react faster.
  */
-int drbd_rs_should_slow_down(struct drbd_conf *mdev)
+int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
 {
        struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
        unsigned long db, dt, dbdt;
+       struct lc_element *tmp;
        int curr_events;
        int throttle = 0;
 
@@ -1872,9 +1897,22 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev)
        if (mdev->sync_conf.c_min_rate == 0)
                return 0;
 
+       spin_lock_irq(&mdev->al_lock);
+       tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
+       if (tmp) {
+               struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
+               if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
+                       spin_unlock_irq(&mdev->al_lock);
+                       return 0;
+               }
+               /* Do not slow down if app IO is already waiting for this extent */
+       }
+       spin_unlock_irq(&mdev->al_lock);
+
        curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
                      (int)part_stat_read(&disk->part0, sectors[1]) -
                        atomic_read(&mdev->rs_sect_ev);
+
        if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
                unsigned long rs_left;
                int i;
@@ -1883,8 +1921,12 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev)
 
                /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
                 * approx. */
-               i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
-               rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+               i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
+
+               if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+                       rs_left = mdev->ov_left;
+               else
+                       rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
 
                dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
                if (!dt)
@@ -1912,15 +1954,15 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
        sector = be64_to_cpu(p->sector);
        size   = be32_to_cpu(p->blksize);
 
-       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
                dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
                                (unsigned long long)sector, size);
-               return FALSE;
+               return false;
        }
        if (sector + (size>>9) > capacity) {
                dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
                                (unsigned long long)sector, size);
-               return FALSE;
+               return false;
        }
 
        if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
@@ -1957,7 +1999,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
        e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
        if (!e) {
                put_ldev(mdev);
-               return FALSE;
+               return false;
        }
 
        switch (cmd) {
@@ -1970,6 +2012,8 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
        case P_RS_DATA_REQUEST:
                e->w.cb = w_e_end_rsdata_req;
                fault_type = DRBD_FAULT_RS_RD;
+               /* used in the sector offset progress display */
+               mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
                break;
 
        case P_OV_REPLY:
@@ -1991,7 +2035,11 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
                if (cmd == P_CSUM_RS_REQUEST) {
                        D_ASSERT(mdev->agreed_pro_version >= 89);
                        e->w.cb = w_e_end_csum_rs_req;
+                       /* used in the sector offset progress display */
+                       mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
                } else if (cmd == P_OV_REPLY) {
+                       /* track progress, we may need to throttle */
+                       atomic_add(size >> 9, &mdev->rs_sect_in);
                        e->w.cb = w_e_end_ov_reply;
                        dec_rs_pending(mdev);
                        /* drbd_rs_begin_io done when we sent this request,
@@ -2003,9 +2051,16 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
        case P_OV_REQUEST:
                if (mdev->ov_start_sector == ~(sector_t)0 &&
                    mdev->agreed_pro_version >= 90) {
+                       unsigned long now = jiffies;
+                       int i;
                        mdev->ov_start_sector = sector;
                        mdev->ov_position = sector;
-                       mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
+                       mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
+                       mdev->rs_total = mdev->ov_left;
+                       for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+                               mdev->rs_mark_left[i] = mdev->ov_left;
+                               mdev->rs_mark_time[i] = now;
+                       }
                        dev_info(DEV, "Online Verify start sector: %llu\n",
                                        (unsigned long long)sector);
                }
@@ -2042,9 +2097,9 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
         * we would also throttle its application reads.
         * In that case, throttling is done on the SyncTarget only.
         */
-       if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
-               msleep(100);
-       if (drbd_rs_begin_io(mdev, e->sector))
+       if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
+               schedule_timeout_uninterruptible(HZ/10);
+       if (drbd_rs_begin_io(mdev, sector))
                goto out_free_e;
 
 submit_for_resync:
@@ -2057,11 +2112,10 @@ submit:
        spin_unlock_irq(&mdev->req_lock);
 
        if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
-               return TRUE;
+               return true;
 
-       /* drbd_submit_ee currently fails for one reason only:
-        * not being able to allocate enough bios.
-        * Is dropping the connection going to help? */
+       /* don't care for the reason here */
+       dev_err(DEV, "submit failed, triggering re-connect\n");
        spin_lock_irq(&mdev->req_lock);
        list_del(&e->w.list);
        spin_unlock_irq(&mdev->req_lock);
@@ -2070,7 +2124,7 @@ submit:
 out_free_e:
        put_ldev(mdev);
        drbd_free_ee(mdev, e);
-       return FALSE;
+       return false;
 }
 
 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
@@ -2147,10 +2201,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
 
 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
 {
-       int self, peer, hg, rv = -100;
-
-       self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
-       peer = mdev->p_uuid[UI_BITMAP] & 1;
+       int hg, rv = -100;
 
        switch (mdev->net_conf->after_sb_1p) {
        case ASB_DISCARD_YOUNGER_PRI:
@@ -2177,12 +2228,14 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
        case ASB_CALL_HELPER:
                hg = drbd_asb_recover_0p(mdev);
                if (hg == -1 && mdev->state.role == R_PRIMARY) {
-                       self = drbd_set_role(mdev, R_SECONDARY, 0);
+                       enum drbd_state_rv rv2;
+
+                       drbd_set_role(mdev, R_SECONDARY, 0);
                         /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
                          * we might be here in C_WF_REPORT_PARAMS which is transient.
                          * we do not need to wait for the after state change work either. */
-                       self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
-                       if (self != SS_SUCCESS) {
+                       rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+                       if (rv2 != SS_SUCCESS) {
                                drbd_khelper(mdev, "pri-lost-after-sb");
                        } else {
                                dev_warn(DEV, "Successfully gave up primary role.\n");
@@ -2197,10 +2250,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
 
 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
 {
-       int self, peer, hg, rv = -100;
-
-       self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
-       peer = mdev->p_uuid[UI_BITMAP] & 1;
+       int hg, rv = -100;
 
        switch (mdev->net_conf->after_sb_2p) {
        case ASB_DISCARD_YOUNGER_PRI:
@@ -2220,11 +2270,13 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
        case ASB_CALL_HELPER:
                hg = drbd_asb_recover_0p(mdev);
                if (hg == -1) {
+                       enum drbd_state_rv rv2;
+
                         /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
                          * we might be here in C_WF_REPORT_PARAMS which is transient.
                          * we do not need to wait for the after state change work either. */
-                       self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
-                       if (self != SS_SUCCESS) {
+                       rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
+                       if (rv2 != SS_SUCCESS) {
                                drbd_khelper(mdev, "pri-lost-after-sb");
                        } else {
                                dev_warn(DEV, "Successfully gave up primary role.\n");
@@ -2263,6 +2315,8 @@ static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
    -2  C_SYNC_TARGET set BitMap
  -100  after split brain, disconnect
 -1000  unrelated data
+-1091   requires proto 91
+-1096   requires proto 96
  */
 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
 {
@@ -2292,7 +2346,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
                if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
 
                        if (mdev->agreed_pro_version < 91)
-                               return -1001;
+                               return -1091;
 
                        if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
                            (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
@@ -2313,7 +2367,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
                if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
 
                        if (mdev->agreed_pro_version < 91)
-                               return -1001;
+                               return -1091;
 
                        if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
                            (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
@@ -2358,17 +2412,22 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
        *rule_nr = 51;
        peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
        if (self == peer) {
-               self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
-               peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
-               if (self == peer) {
+               if (mdev->agreed_pro_version < 96 ?
+                   (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
+                   (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
+                   peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
                        /* The last P_SYNC_UUID did not get though. Undo the last start of
                           resync as sync source modifications of the peer's UUIDs. */
 
                        if (mdev->agreed_pro_version < 91)
-                               return -1001;
+                               return -1091;
 
                        mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
                        mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
+
+                       dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+                       drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
+
                        return -1;
                }
        }
@@ -2390,20 +2449,20 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
        *rule_nr = 71;
        self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
        if (self == peer) {
-               self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
-               peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
-               if (self == peer) {
+               if (mdev->agreed_pro_version < 96 ?
+                   (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
+                   (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
+                   self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
                        /* The last P_SYNC_UUID did not get though. Undo the last start of
                           resync as sync source modifications of our UUIDs. */
 
                        if (mdev->agreed_pro_version < 91)
-                               return -1001;
+                               return -1091;
 
                        _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
                        _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
 
-                       dev_info(DEV, "Undid last start of resync:\n");
-
+                       dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
                        drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
                                       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
 
@@ -2466,8 +2525,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
                dev_alert(DEV, "Unrelated data, aborting!\n");
                return C_MASK;
        }
-       if (hg == -1001) {
-               dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
+       if (hg < -1000) {
+               dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
                return C_MASK;
        }
 
@@ -2566,7 +2625,8 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
 
        if (abs(hg) >= 2) {
                dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
-               if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
+               if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
+                                       BM_LOCKED_SET_ALLOWED))
                        return C_MASK;
        }
 
@@ -2660,7 +2720,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
                unsigned char *my_alg = mdev->net_conf->integrity_alg;
 
                if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
-                       return FALSE;
+                       return false;
 
                p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
                if (strcmp(p_integrity_alg, my_alg)) {
@@ -2671,11 +2731,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
                     my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
        }
 
-       return TRUE;
+       return true;
 
 disconnect:
        drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-       return FALSE;
+       return false;
 }
 
 /* helper function
@@ -2707,7 +2767,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
 
 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
 {
-       int ok = TRUE;
+       int ok = true;
        struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
        unsigned int header_size, data_size, exp_max_sz;
        struct crypto_hash *verify_tfm = NULL;
@@ -2725,7 +2785,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
        if (packet_size > exp_max_sz) {
                dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
                    packet_size, exp_max_sz);
-               return FALSE;
+               return false;
        }
 
        if (apv <= 88) {
@@ -2745,7 +2805,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
        memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
 
        if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
-               return FALSE;
+               return false;
 
        mdev->sync_conf.rate      = be32_to_cpu(p->rate);
 
@@ -2755,11 +2815,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
                                dev_err(DEV, "verify-alg too long, "
                                    "peer wants %u, accepting only %u byte\n",
                                                data_size, SHARED_SECRET_MAX);
-                               return FALSE;
+                               return false;
                        }
 
                        if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
-                               return FALSE;
+                               return false;
 
                        /* we expect NUL terminated string */
                        /* but just in case someone tries to be evil */
@@ -2853,7 +2913,7 @@ disconnect:
        /* but free the verify_tfm again, if csums_tfm did not work out */
        crypto_free_hash(verify_tfm);
        drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-       return FALSE;
+       return false;
 }
 
 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
@@ -2879,7 +2939,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 {
        struct p_sizes *p = &mdev->data.rbuf.sizes;
        enum determine_dev_size dd = unchanged;
-       unsigned int max_seg_s;
+       unsigned int max_bio_size;
        sector_t p_size, p_usize, my_usize;
        int ldsc = 0; /* local disk size changed */
        enum dds_flags ddsf;
@@ -2890,7 +2950,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
                dev_err(DEV, "some backing storage is needed\n");
                drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-               return FALSE;
+               return false;
        }
 
        /* just store the peer's disk size for now.
@@ -2927,18 +2987,17 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                        drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
                        mdev->ldev->dc.disk_size = my_usize;
                        put_ldev(mdev);
-                       return FALSE;
+                       return false;
                }
                put_ldev(mdev);
        }
-#undef min_not_zero
 
        ddsf = be16_to_cpu(p->dds_flags);
        if (get_ldev(mdev)) {
                dd = drbd_determin_dev_size(mdev, ddsf);
                put_ldev(mdev);
                if (dd == dev_size_error)
-                       return FALSE;
+                       return false;
                drbd_md_sync(mdev);
        } else {
                /* I am diskless, need to accept the peer's size. */
@@ -2952,14 +3011,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                }
 
                if (mdev->agreed_pro_version < 94)
-                       max_seg_s = be32_to_cpu(p->max_segment_size);
+                       max_bio_size = be32_to_cpu(p->max_bio_size);
                else if (mdev->agreed_pro_version == 94)
-                       max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
+                       max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
                else /* drbd 8.3.8 onwards */
-                       max_seg_s = DRBD_MAX_SEGMENT_SIZE;
+                       max_bio_size = DRBD_MAX_BIO_SIZE;
 
-               if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
-                       drbd_setup_queue_param(mdev, max_seg_s);
+               if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
+                       drbd_setup_queue_param(mdev, max_bio_size);
 
                drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
                put_ldev(mdev);
@@ -2985,14 +3044,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                }
        }
 
-       return TRUE;
+       return true;
 }
 
 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
        struct p_uuids *p = &mdev->data.rbuf.uuids;
        u64 *p_uuid;
-       int i;
+       int i, updated_uuids = 0;
 
        p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
 
@@ -3009,7 +3068,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
                    (unsigned long long)mdev->ed_uuid);
                drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-               return FALSE;
+               return false;
        }
 
        if (get_ldev(mdev)) {
@@ -3021,19 +3080,21 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                if (skip_initial_sync) {
                        dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
                        drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
-                                       "clear_n_write from receive_uuids");
+                                       "clear_n_write from receive_uuids",
+                                       BM_LOCKED_TEST_ALLOWED);
                        _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
                        _drbd_uuid_set(mdev, UI_BITMAP, 0);
                        _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
                                        CS_VERBOSE, NULL);
                        drbd_md_sync(mdev);
+                       updated_uuids = 1;
                }
                put_ldev(mdev);
        } else if (mdev->state.disk < D_INCONSISTENT &&
                   mdev->state.role == R_PRIMARY) {
                /* I am a diskless primary, the peer just created a new current UUID
                   for me. */
-               drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+               updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
        }
 
        /* Before we test for the disk state, we should wait until an eventually
@@ -3042,9 +3103,12 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
           new disk state... */
        wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
        if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
-               drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
+               updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
 
-       return TRUE;
+       if (updated_uuids)
+               drbd_print_uuids(mdev, "receiver updated UUIDs to");
+
+       return true;
 }
 
 /**
@@ -3081,7 +3145,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
 {
        struct p_req_state *p = &mdev->data.rbuf.req_state;
        union drbd_state mask, val;
-       int rv;
+       enum drbd_state_rv rv;
 
        mask.i = be32_to_cpu(p->mask);
        val.i = be32_to_cpu(p->val);
@@ -3089,7 +3153,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
        if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
            test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
                drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
-               return TRUE;
+               return true;
        }
 
        mask = convert_state(mask);
@@ -3100,7 +3164,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
        drbd_send_sr_reply(mdev, rv);
        drbd_md_sync(mdev);
 
-       return TRUE;
+       return true;
 }
 
 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3145,7 +3209,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                         peer_state.conn == C_CONNECTED) {
                        if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
                                drbd_resync_finished(mdev);
-                       return TRUE;
+                       return true;
                }
        }
 
@@ -3161,6 +3225,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        if (ns.conn == C_WF_REPORT_PARAMS)
                ns.conn = C_CONNECTED;
 
+       if (peer_state.conn == C_AHEAD)
+               ns.conn = C_BEHIND;
+
        if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
            get_ldev_if_state(mdev, D_NEGOTIATING)) {
                int cr; /* consider resync */
@@ -3195,10 +3262,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                                real_peer_disk = D_DISKLESS;
                        } else {
                                if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
-                                       return FALSE;
+                                       return false;
                                D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
                                drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-                               return FALSE;
+                               return false;
                        }
                }
        }
@@ -3223,7 +3290,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                drbd_uuid_new_current(mdev);
                clear_bit(NEW_CUR_UUID, &mdev->flags);
                drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
-               return FALSE;
+               return false;
        }
        rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
        ns = mdev->state;
@@ -3231,7 +3298,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
        if (rv < SS_SUCCESS) {
                drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
-               return FALSE;
+               return false;
        }
 
        if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3249,7 +3316,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
        drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
 
-       return TRUE;
+       return true;
 }
 
 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3258,6 +3325,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
 
        wait_event(mdev->misc_wait,
                   mdev->state.conn == C_WF_SYNC_UUID ||
+                  mdev->state.conn == C_BEHIND ||
                   mdev->state.conn < C_CONNECTED ||
                   mdev->state.disk < D_NEGOTIATING);
 
@@ -3269,32 +3337,42 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
                _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
                _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
 
+               drbd_print_uuids(mdev, "updated sync uuid");
                drbd_start_resync(mdev, C_SYNC_TARGET);
 
                put_ldev(mdev);
        } else
                dev_err(DEV, "Ignoring SyncUUID packet!\n");
 
-       return TRUE;
+       return true;
 }
 
-enum receive_bitmap_ret { OK, DONE, FAILED };
-
-static enum receive_bitmap_ret
+/**
+ * receive_bitmap_plain
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
                     unsigned long *buffer, struct bm_xfer_ctx *c)
 {
        unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
        unsigned want = num_words * sizeof(long);
+       int err;
 
        if (want != data_size) {
                dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
-               return FAILED;
+               return -EIO;
        }
        if (want == 0)
-               return DONE;
-       if (drbd_recv(mdev, buffer, want) != want)
-               return FAILED;
+               return 0;
+       err = drbd_recv(mdev, buffer, want);
+       if (err != want) {
+               if (err >= 0)
+                       err = -EIO;
+               return err;
+       }
 
        drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
 
@@ -3303,10 +3381,16 @@ receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
        if (c->bit_offset > c->bm_bits)
                c->bit_offset = c->bm_bits;
 
-       return OK;
+       return 1;
 }
 
-static enum receive_bitmap_ret
+/**
+ * recv_bm_rle_bits
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
 recv_bm_rle_bits(struct drbd_conf *mdev,
                struct p_compressed_bm *p,
                struct bm_xfer_ctx *c)
@@ -3326,18 +3410,18 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
 
        bits = bitstream_get_bits(&bs, &look_ahead, 64);
        if (bits < 0)
-               return FAILED;
+               return -EIO;
 
        for (have = bits; have > 0; s += rl, toggle = !toggle) {
                bits = vli_decode_bits(&rl, look_ahead);
                if (bits <= 0)
-                       return FAILED;
+                       return -EIO;
 
                if (toggle) {
                        e = s + rl -1;
                        if (e >= c->bm_bits) {
                                dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
-                               return FAILED;
+                               return -EIO;
                        }
                        _drbd_bm_set_bits(mdev, s, e);
                }
@@ -3347,14 +3431,14 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
                                have, bits, look_ahead,
                                (unsigned int)(bs.cur.b - p->code),
                                (unsigned int)bs.buf_len);
-                       return FAILED;
+                       return -EIO;
                }
                look_ahead >>= bits;
                have -= bits;
 
                bits = bitstream_get_bits(&bs, &tmp, 64 - have);
                if (bits < 0)
-                       return FAILED;
+                       return -EIO;
                look_ahead |= tmp << have;
                have += bits;
        }
@@ -3362,10 +3446,16 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
        c->bit_offset = s;
        bm_xfer_ctx_bit_to_word_offset(c);
 
-       return (s == c->bm_bits) ? DONE : OK;
+       return (s != c->bm_bits);
 }
 
-static enum receive_bitmap_ret
+/**
+ * decode_bitmap_c
+ *
+ * Return 0 when done, 1 when another iteration is needed, and a negative error
+ * code upon failure.
+ */
+static int
 decode_bitmap_c(struct drbd_conf *mdev,
                struct p_compressed_bm *p,
                struct bm_xfer_ctx *c)
@@ -3379,7 +3469,7 @@ decode_bitmap_c(struct drbd_conf *mdev,
 
        dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
        drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
-       return FAILED;
+       return -EIO;
 }
 
 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
@@ -3428,13 +3518,13 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
 {
        struct bm_xfer_ctx c;
        void *buffer;
-       enum receive_bitmap_ret ret;
-       int ok = FALSE;
+       int err;
+       int ok = false;
        struct p_header80 *h = &mdev->data.rbuf.header.h80;
 
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
-
-       drbd_bm_lock(mdev, "receive bitmap");
+       drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
+       /* you are supposed to send additional out-of-sync information
+        * if you actually set bits during this phase */
 
        /* maybe we should use some per thread scratch page,
         * and allocate that during initial device creation? */
@@ -3449,9 +3539,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
                .bm_words = drbd_bm_words(mdev),
        };
 
-       do {
+       for(;;) {
                if (cmd == P_BITMAP) {
-                       ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
+                       err = receive_bitmap_plain(mdev, data_size, buffer, &c);
                } else if (cmd == P_COMPRESSED_BITMAP) {
                        /* MAYBE: sanity check that we speak proto >= 90,
                         * and the feature is enabled! */
@@ -3468,9 +3558,9 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
                                goto out;
                        if (data_size <= (sizeof(*p) - sizeof(p->head))) {
                                dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
-                               return FAILED;
+                               goto out;
                        }
-                       ret = decode_bitmap_c(mdev, p, &c);
+                       err = decode_bitmap_c(mdev, p, &c);
                } else {
                        dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
                        goto out;
@@ -3479,24 +3569,26 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
                c.packets[cmd == P_BITMAP]++;
                c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
 
-               if (ret != OK)
+               if (err <= 0) {
+                       if (err < 0)
+                               goto out;
                        break;
-
+               }
                if (!drbd_recv_header(mdev, &cmd, &data_size))
                        goto out;
-       } while (ret == OK);
-       if (ret == FAILED)
-               goto out;
+       }
 
        INFO_bm_xfer_stats(mdev, "receive", &c);
 
        if (mdev->state.conn == C_WF_BITMAP_T) {
+               enum drbd_state_rv rv;
+
                ok = !drbd_send_bitmap(mdev);
                if (!ok)
                        goto out;
                /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
-               ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
-               D_ASSERT(ok == SS_SUCCESS);
+               rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+               D_ASSERT(rv == SS_SUCCESS);
        } else if (mdev->state.conn != C_WF_BITMAP_S) {
                /* admin may have requested C_DISCONNECTING,
                 * other threads may have noticed network errors */
@@ -3504,7 +3596,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
                    drbd_conn_str(mdev->state.conn));
        }
 
-       ok = TRUE;
+       ok = true;
  out:
        drbd_bm_unlock(mdev);
        if (ok && mdev->state.conn == C_WF_BITMAP_S)
@@ -3538,7 +3630,26 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u
         * with the data requests being unplugged */
        drbd_tcp_quickack(mdev->data.socket);
 
-       return TRUE;
+       return true;
+}
+
+static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
+{
+       struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+
+       switch (mdev->state.conn) {
+       case C_WF_SYNC_UUID:
+       case C_WF_BITMAP_T:
+       case C_BEHIND:
+                       break;
+       default:
+               dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
+                               drbd_conn_str(mdev->state.conn));
+       }
+
+       drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
+
+       return true;
 }
 
 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
@@ -3571,6 +3682,7 @@ static struct data_cmd drbd_cmd_handler[] = {
        [P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
        [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
        [P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
+       [P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
        /* anything missing from this table is in
         * the asender_tbl, see get_asender_cmd */
        [P_MAX_CMD]         = { 0, 0, NULL },
@@ -3610,7 +3722,8 @@ static void drbdd(struct drbd_conf *mdev)
                if (shs) {
                        rv = drbd_recv(mdev, &header->h80.payload, shs);
                        if (unlikely(rv != shs)) {
-                               dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
+                               if (!signal_pending(current))
+                                       dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
                                goto err_out;
                        }
                }
@@ -3682,9 +3795,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
 
        if (mdev->state.conn == C_STANDALONE)
                return;
-       if (mdev->state.conn >= C_WF_CONNECTION)
-               dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
-                               drbd_conn_str(mdev->state.conn));
 
        /* asender does not clean up anything. it must not interfere, either */
        drbd_thread_stop(&mdev->asender);
@@ -3713,6 +3823,8 @@ static void drbd_disconnect(struct drbd_conf *mdev)
        atomic_set(&mdev->rs_pending_cnt, 0);
        wake_up(&mdev->misc_wait);
 
+       del_timer(&mdev->request_timer);
+
        /* make sure syncer is stopped and w_resume_next_sg queued */
        del_timer_sync(&mdev->resync_timer);
        resync_timer_fn((unsigned long)mdev);
@@ -3758,13 +3870,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
        if (os.conn == C_DISCONNECTING) {
                wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
 
-               if (!is_susp(mdev->state)) {
-                       /* we must not free the tl_hash
-                        * while application io is still on the fly */
-                       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
-                       drbd_free_tl_hash(mdev);
-               }
-
                crypto_free_hash(mdev->cram_hmac_tfm);
                mdev->cram_hmac_tfm = NULL;
 
@@ -3773,6 +3878,10 @@ static void drbd_disconnect(struct drbd_conf *mdev)
                drbd_request_state(mdev, NS(conn, C_STANDALONE));
        }
 
+       /* serialize with bitmap writeout triggered by the state change,
+        * if any. */
+       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+
        /* tcp_close and release of sendpage pages can be deferred.  I don't
         * want to use SO_LINGER, because apparently it can be deferred for
         * more than 20 seconds (longest time I checked).
@@ -3873,7 +3982,8 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
        rv = drbd_recv(mdev, &p->head.payload, expect);
 
        if (rv != expect) {
-               dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
+               if (!signal_pending(current))
+                       dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
                return 0;
        }
 
@@ -3975,7 +4085,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        rv = drbd_recv(mdev, peers_ch, length);
 
        if (rv != length) {
-               dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
+               if (!signal_pending(current))
+                       dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
                rv = 0;
                goto fail;
        }
@@ -4022,7 +4133,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        rv = drbd_recv(mdev, response , resp_size);
 
        if (rv != resp_size) {
-               dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
+               if (!signal_pending(current))
+                       dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
                rv = 0;
                goto fail;
        }
@@ -4074,8 +4186,7 @@ int drbdd_init(struct drbd_thread *thi)
                h = drbd_connect(mdev);
                if (h == 0) {
                        drbd_disconnect(mdev);
-                       __set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(HZ);
+                       schedule_timeout_interruptible(HZ);
                }
                if (h == -1) {
                        dev_warn(DEV, "Discarding network configuration.\n");
@@ -4113,7 +4224,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
        }
        wake_up(&mdev->state_wait);
 
-       return TRUE;
+       return true;
 }
 
 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4129,7 +4240,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
        if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
                wake_up(&mdev->misc_wait);
 
-       return TRUE;
+       return true;
 }
 
 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4152,7 +4263,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
        dec_rs_pending(mdev);
        atomic_add(blksize >> 9, &mdev->rs_sect_in);
 
-       return TRUE;
+       return true;
 }
 
 /* when we receive the ACK for a write request,
@@ -4176,8 +4287,6 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
                        return req;
                }
        }
-       dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
-               (void *)(unsigned long)id, (unsigned long long)sector);
        return NULL;
 }
 
@@ -4195,15 +4304,17 @@ static int validate_req_change_req_state(struct drbd_conf *mdev,
        req = validator(mdev, id, sector);
        if (unlikely(!req)) {
                spin_unlock_irq(&mdev->req_lock);
-               dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
-               return FALSE;
+
+               dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
+                       (void *)(unsigned long)id, (unsigned long long)sector);
+               return false;
        }
        __req_mod(req, what, &m);
        spin_unlock_irq(&mdev->req_lock);
 
        if (m.bio)
                complete_master_bio(mdev, &m);
-       return TRUE;
+       return true;
 }
 
 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4218,7 +4329,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
        if (is_syncer_block_id(p->block_id)) {
                drbd_set_in_sync(mdev, sector, blksize);
                dec_rs_pending(mdev);
-               return TRUE;
+               return true;
        }
        switch (be16_to_cpu(h->command)) {
        case P_RS_WRITE_ACK:
@@ -4239,7 +4350,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
                break;
        default:
                D_ASSERT(0);
-               return FALSE;
+               return false;
        }
 
        return validate_req_change_req_state(mdev, p->block_id, sector,
@@ -4250,20 +4361,44 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
 {
        struct p_block_ack *p = (struct p_block_ack *)h;
        sector_t sector = be64_to_cpu(p->sector);
-
-       if (__ratelimit(&drbd_ratelimit_state))
-               dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
+       int size = be32_to_cpu(p->blksize);
+       struct drbd_request *req;
+       struct bio_and_error m;
 
        update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
        if (is_syncer_block_id(p->block_id)) {
-               int size = be32_to_cpu(p->blksize);
                dec_rs_pending(mdev);
                drbd_rs_failed_io(mdev, sector, size);
-               return TRUE;
+               return true;
        }
-       return validate_req_change_req_state(mdev, p->block_id, sector,
-               _ack_id_to_req, __func__ , neg_acked);
+
+       spin_lock_irq(&mdev->req_lock);
+       req = _ack_id_to_req(mdev, p->block_id, sector);
+       if (!req) {
+               spin_unlock_irq(&mdev->req_lock);
+               if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
+                   mdev->net_conf->wire_protocol == DRBD_PROT_B) {
+                       /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+                          The master bio might already be completed, therefore the
+                          request is no longer in the collision hash.
+                          => Do not try to validate block_id as request. */
+                       /* In Protocol B we might already have got a P_RECV_ACK
+                          but then get a P_NEG_ACK after wards. */
+                       drbd_set_out_of_sync(mdev, sector, size);
+                       return true;
+               } else {
+                       dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
+                               (void *)(unsigned long)p->block_id, (unsigned long long)sector);
+                       return false;
+               }
+       }
+       __req_mod(req, neg_acked, &m);
+       spin_unlock_irq(&mdev->req_lock);
+
+       if (m.bio)
+               complete_master_bio(mdev, &m);
+       return true;
 }
 
 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4294,11 +4429,20 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
 
        if (get_ldev_if_state(mdev, D_FAILED)) {
                drbd_rs_complete_io(mdev, sector);
-               drbd_rs_failed_io(mdev, sector, size);
+               switch (be16_to_cpu(h->command)) {
+               case P_NEG_RS_DREPLY:
+                       drbd_rs_failed_io(mdev, sector, size);
+               case P_RS_CANCEL:
+                       break;
+               default:
+                       D_ASSERT(0);
+                       put_ldev(mdev);
+                       return false;
+               }
                put_ldev(mdev);
        }
 
-       return TRUE;
+       return true;
 }
 
 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4307,7 +4451,14 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
 
        tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
 
-       return TRUE;
+       if (mdev->state.conn == C_AHEAD &&
+           atomic_read(&mdev->ap_in_flight) == 0 &&
+           !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+               mdev->start_resync_timer.expires = jiffies + HZ;
+               add_timer(&mdev->start_resync_timer);
+       }
+
+       return true;
 }
 
 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4328,12 +4479,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
                ov_oos_print(mdev);
 
        if (!get_ldev(mdev))
-               return TRUE;
+               return true;
 
        drbd_rs_complete_io(mdev, sector);
        dec_rs_pending(mdev);
 
-       if (--mdev->ov_left == 0) {
+       --mdev->ov_left;
+
+       /* let's advance progress step marks only for every other megabyte */
+       if ((mdev->ov_left & 0x200) == 0x200)
+               drbd_advance_rs_marks(mdev, mdev->ov_left);
+
+       if (mdev->ov_left == 0) {
                w = kmalloc(sizeof(*w), GFP_NOIO);
                if (w) {
                        w->cb = w_ov_finished;
@@ -4345,12 +4502,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
                }
        }
        put_ldev(mdev);
-       return TRUE;
+       return true;
 }
 
 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
 {
-       return TRUE;
+       return true;
 }
 
 struct asender_cmd {
@@ -4378,6 +4535,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
        [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
        [P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
        [P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
+       [P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply},
        [P_MAX_CMD]         = { 0, NULL },
        };
        if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
index ad3fc6228f27924d3523976fd3a5a4486c3a5b4a..5c0c8be1bb0ae144e4d82ef9d3da257ca06af99d 100644 (file)
@@ -140,9 +140,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
        struct hlist_node *n;
        struct hlist_head *slot;
 
-       /* before we can signal completion to the upper layers,
-        * we may need to close the current epoch */
+       /* Before we can signal completion to the upper layers,
+        * we may need to close the current epoch.
+        * We can skip this, if this request has not even been sent, because we
+        * did not have a fully established connection yet/anymore, during
+        * bitmap exchange, or while we are C_AHEAD due to congestion policy.
+        */
        if (mdev->state.conn >= C_CONNECTED &&
+           (s & RQ_NET_SENT) != 0 &&
            req->epoch == mdev->newest_tle->br_number)
                queue_barrier(mdev);
 
@@ -440,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_LOCAL_COMPLETED;
                req->rq_state &= ~RQ_LOCAL_PENDING;
 
-               __drbd_chk_io_error(mdev, FALSE);
+               __drbd_chk_io_error(mdev, false);
                _req_may_be_done_not_susp(req, m);
                put_ldev(mdev);
                break;
@@ -461,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
                D_ASSERT(!(req->rq_state & RQ_NET_MASK));
 
-               __drbd_chk_io_error(mdev, FALSE);
+               __drbd_chk_io_error(mdev, false);
                put_ldev(mdev);
 
                /* no point in retrying if there is no good remote data,
@@ -545,6 +550,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
                break;
 
+       case queue_for_send_oos:
+               req->rq_state |= RQ_NET_QUEUED;
+               req->w.cb =  w_send_oos;
+               drbd_queue_work(&mdev->data.work, &req->w);
+               break;
+
+       case oos_handed_to_network:
+               /* actually the same */
        case send_canceled:
                /* treat it the same */
        case send_failed:
@@ -558,6 +571,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 
        case handed_over_to_network:
                /* assert something? */
+               if (bio_data_dir(req->master_bio) == WRITE)
+                       atomic_add(req->size>>9, &mdev->ap_in_flight);
+
                if (bio_data_dir(req->master_bio) == WRITE &&
                    mdev->net_conf->wire_protocol == DRBD_PROT_A) {
                        /* this is what is dangerous about protocol A:
@@ -591,6 +607,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                        dec_ap_pending(mdev);
                req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
                req->rq_state |= RQ_NET_DONE;
+               if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
+                       atomic_sub(req->size>>9, &mdev->ap_in_flight);
+
                /* if it is still queued, we may not complete it here.
                 * it will be canceled soon. */
                if (!(req->rq_state & RQ_NET_QUEUED))
@@ -628,14 +647,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                req->rq_state |= RQ_NET_OK;
                D_ASSERT(req->rq_state & RQ_NET_PENDING);
                dec_ap_pending(mdev);
+               atomic_sub(req->size>>9, &mdev->ap_in_flight);
                req->rq_state &= ~RQ_NET_PENDING;
                _req_may_be_done_not_susp(req, m);
                break;
 
        case neg_acked:
                /* assert something? */
-               if (req->rq_state & RQ_NET_PENDING)
+               if (req->rq_state & RQ_NET_PENDING) {
                        dec_ap_pending(mdev);
+                       atomic_sub(req->size>>9, &mdev->ap_in_flight);
+               }
                req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
 
                req->rq_state |= RQ_NET_DONE;
@@ -690,8 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                        dev_err(DEV, "FIXME (barrier_acked but pending)\n");
                        list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
                }
-               D_ASSERT(req->rq_state & RQ_NET_SENT);
-               req->rq_state |= RQ_NET_DONE;
+               if ((req->rq_state & RQ_NET_MASK) != 0) {
+                       req->rq_state |= RQ_NET_DONE;
+                       if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
+                               atomic_sub(req->size>>9, &mdev->ap_in_flight);
+               }
                _req_may_be_done(req, m); /* Allowed while state.susp */
                break;
 
@@ -738,14 +763,14 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
        return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 }
 
-static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
+static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 {
        const int rw = bio_rw(bio);
        const int size = bio->bi_size;
        const sector_t sector = bio->bi_sector;
        struct drbd_tl_epoch *b = NULL;
        struct drbd_request *req;
-       int local, remote;
+       int local, remote, send_oos = 0;
        int err = -EIO;
        int ret = 0;
 
@@ -759,6 +784,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
                bio_endio(bio, -ENOMEM);
                return 0;
        }
+       req->start_time = start_time;
 
        local = get_ldev(mdev);
        if (!local) {
@@ -808,9 +834,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
                drbd_al_begin_io(mdev, sector);
        }
 
-       remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
-                           (mdev->state.pdsk == D_INCONSISTENT &&
-                            mdev->state.conn >= C_CONNECTED));
+       remote = remote && drbd_should_do_remote(mdev->state);
+       send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+       D_ASSERT(!(remote && send_oos));
 
        if (!(local || remote) && !is_susp(mdev->state)) {
                if (__ratelimit(&drbd_ratelimit_state))
@@ -824,7 +850,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
         * but there is a race between testing the bit and pointer outside the
         * spinlock, and grabbing the spinlock.
         * if we lost that race, we retry.  */
-       if (rw == WRITE && remote &&
+       if (rw == WRITE && (remote || send_oos) &&
            mdev->unused_spare_tle == NULL &&
            test_bit(CREATE_BARRIER, &mdev->flags)) {
 allocate_barrier:
@@ -842,18 +868,19 @@ allocate_barrier:
        if (is_susp(mdev->state)) {
                /* If we got suspended, use the retry mechanism of
                   generic_make_request() to restart processing of this
-                  bio. In the next call to drbd_make_request_26
+                  bio. In the next call to drbd_make_request
                   we sleep in inc_ap_bio() */
                ret = 1;
                spin_unlock_irq(&mdev->req_lock);
                goto fail_free_complete;
        }
 
-       if (remote) {
-               remote = (mdev->state.pdsk == D_UP_TO_DATE ||
-                           (mdev->state.pdsk == D_INCONSISTENT &&
-                            mdev->state.conn >= C_CONNECTED));
-               if (!remote)
+       if (remote || send_oos) {
+               remote = drbd_should_do_remote(mdev->state);
+               send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+               D_ASSERT(!(remote && send_oos));
+
+               if (!(remote || send_oos))
                        dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
                if (!(local || remote)) {
                        dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
@@ -866,7 +893,7 @@ allocate_barrier:
                mdev->unused_spare_tle = b;
                b = NULL;
        }
-       if (rw == WRITE && remote &&
+       if (rw == WRITE && (remote || send_oos) &&
            mdev->unused_spare_tle == NULL &&
            test_bit(CREATE_BARRIER, &mdev->flags)) {
                /* someone closed the current epoch
@@ -889,7 +916,7 @@ allocate_barrier:
         * barrier packet.  To get the write ordering right, we only have to
         * make sure that, if this is a write request and it triggered a
         * barrier packet, this request is queued within the same spinlock. */
-       if (remote && mdev->unused_spare_tle &&
+       if ((remote || send_oos) && mdev->unused_spare_tle &&
            test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
                _tl_add_barrier(mdev, mdev->unused_spare_tle);
                mdev->unused_spare_tle = NULL;
@@ -937,6 +964,34 @@ allocate_barrier:
                                ? queue_for_net_write
                                : queue_for_net_read);
        }
+       if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
+               _req_mod(req, queue_for_send_oos);
+
+       if (remote &&
+           mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
+               int congested = 0;
+
+               if (mdev->net_conf->cong_fill &&
+                   atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+                       dev_info(DEV, "Congestion-fill threshold reached\n");
+                       congested = 1;
+               }
+
+               if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+                       dev_info(DEV, "Congestion-extents threshold reached\n");
+                       congested = 1;
+               }
+
+               if (congested) {
+                       queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+                       if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+                               _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+                       else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+                               _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+               }
+       }
+
        spin_unlock_irq(&mdev->req_lock);
        kfree(b); /* if someone else has beaten us to it... */
 
@@ -949,9 +1004,9 @@ allocate_barrier:
                 * stable storage, and this is a WRITE, we may not even submit
                 * this bio. */
                if (get_ldev(mdev)) {
-                       if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
-                                            : rw == READ  ? DRBD_FAULT_DT_RD
-                                            :               DRBD_FAULT_DT_RA))
+                       if (drbd_insert_fault(mdev,   rw == WRITE ? DRBD_FAULT_DT_WR
+                                                   : rw == READ  ? DRBD_FAULT_DT_RD
+                                                   :               DRBD_FAULT_DT_RA))
                                bio_endio(req->private_bio, -EIO);
                        else
                                generic_make_request(req->private_bio);
@@ -1018,16 +1073,19 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
        return 0;
 }
 
-int drbd_make_request_26(struct request_queue *q, struct bio *bio)
+int drbd_make_request(struct request_queue *q, struct bio *bio)
 {
        unsigned int s_enr, e_enr;
        struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
+       unsigned long start_time;
 
        if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
                bio_endio(bio, -EPERM);
                return 0;
        }
 
+       start_time = jiffies;
+
        /*
         * what we "blindly" assume:
         */
@@ -1042,12 +1100,12 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 
        if (likely(s_enr == e_enr)) {
                inc_ap_bio(mdev, 1);
-               return drbd_make_request_common(mdev, bio);
+               return drbd_make_request_common(mdev, bio, start_time);
        }
 
        /* can this bio be split generically?
         * Maybe add our own split-arbitrary-bios function. */
-       if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) {
+       if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
                /* rather error out here than BUG in bio_split */
                dev_err(DEV, "bio would need to, but cannot, be split: "
                    "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
@@ -1069,11 +1127,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
                const int sps = 1 << HT_SHIFT; /* sectors per slot */
                const int mask = sps - 1;
                const sector_t first_sectors = sps - (sect & mask);
-               bp = bio_split(bio,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-                               bio_split_pool,
-#endif
-                               first_sectors);
+               bp = bio_split(bio, first_sectors);
 
                /* we need to get a "reference count" (ap_bio_cnt)
                 * to avoid races with the disconnect/reconnect/suspend code.
@@ -1084,10 +1138,10 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 
                D_ASSERT(e_enr == s_enr + 1);
 
-               while (drbd_make_request_common(mdev, &bp->bio1))
+               while (drbd_make_request_common(mdev, &bp->bio1, start_time))
                        inc_ap_bio(mdev, 1);
 
-               while (drbd_make_request_common(mdev, &bp->bio2))
+               while (drbd_make_request_common(mdev, &bp->bio2, start_time))
                        inc_ap_bio(mdev, 1);
 
                dec_ap_bio(mdev);
@@ -1098,7 +1152,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 }
 
 /* This is called by bio_add_page().  With this function we reduce
- * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs
+ * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
  * units (was AL_EXTENTs).
  *
  * we do the calculation within the lower 32bit of the byte offsets,
@@ -1108,7 +1162,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
  * As long as the BIO is empty we have to allow at least one bvec,
  * regardless of size and offset.  so the resulting bio may still
  * cross extent boundaries.  those are dealt with (bio_split) in
- * drbd_make_request_26.
+ * drbd_make_request.
  */
 int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
 {
@@ -1118,8 +1172,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
        unsigned int bio_size = bvm->bi_size;
        int limit, backing_limit;
 
-       limit = DRBD_MAX_SEGMENT_SIZE
-             - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
+       limit = DRBD_MAX_BIO_SIZE
+             - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
        if (limit < 0)
                limit = 0;
        if (bio_size == 0) {
@@ -1136,3 +1190,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
        }
        return limit;
 }
+
+void request_timer_fn(unsigned long data)
+{
+       struct drbd_conf *mdev = (struct drbd_conf *) data;
+       struct drbd_request *req; /* oldest request */
+       struct list_head *le;
+       unsigned long et = 0; /* effective timeout = ko_count * timeout */
+
+       if (get_net_conf(mdev)) {
+               et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+               put_net_conf(mdev);
+       }
+       if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+               return; /* Recurring timer stopped */
+
+       spin_lock_irq(&mdev->req_lock);
+       le = &mdev->oldest_tle->requests;
+       if (list_empty(le)) {
+               spin_unlock_irq(&mdev->req_lock);
+               mod_timer(&mdev->request_timer, jiffies + et);
+               return;
+       }
+
+       le = le->prev;
+       req = list_entry(le, struct drbd_request, tl_requests);
+       if (time_is_before_eq_jiffies(req->start_time + et)) {
+               if (req->rq_state & RQ_NET_PENDING) {
+                       dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+                       _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
+               } else {
+                       dev_warn(DEV, "Local backing block device frozen?\n");
+                       mod_timer(&mdev->request_timer, jiffies + et);
+               }
+       } else {
+               mod_timer(&mdev->request_timer, req->start_time + et);
+       }
+
+       spin_unlock_irq(&mdev->req_lock);
+}
index ab2bd09d54b4bfc9fbf7bf74c2e46dbbeb389db5..32e2c3e6a8134220943873c671cb7ac6adf83a10 100644 (file)
@@ -82,14 +82,16 @@ enum drbd_req_event {
        to_be_submitted,
 
        /* XXX yes, now I am inconsistent...
-        * these two are not "events" but "actions"
+        * these are not "events" but "actions"
         * oh, well... */
        queue_for_net_write,
        queue_for_net_read,
+       queue_for_send_oos,
 
        send_canceled,
        send_failed,
        handed_over_to_network,
+       oos_handed_to_network,
        connection_lost_while_pending,
        read_retry_remote_canceled,
        recv_acked_by_peer,
@@ -289,7 +291,6 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
                req->epoch       = 0;
                req->sector      = bio_src->bi_sector;
                req->size        = bio_src->bi_size;
-               req->start_time  = jiffies;
                INIT_HLIST_NODE(&req->colision);
                INIT_LIST_HEAD(&req->tl_requests);
                INIT_LIST_HEAD(&req->w.list);
@@ -321,6 +322,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
                struct bio_and_error *m);
 extern void complete_master_bio(struct drbd_conf *mdev,
                struct bio_and_error *m);
+extern void request_timer_fn(unsigned long data);
 
 /* use this if you don't want to deal with calling complete_master_bio()
  * outside the spinlock, e.g. when walking some list on cleanup. */
@@ -338,23 +340,43 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
        return rv;
 }
 
-/* completion of master bio is outside of spinlock.
- * If you need it irqsave, do it your self!
- * Which means: don't use from bio endio callback. */
+/* completion of master bio is outside of our spinlock.
+ * We still may or may not be inside some irqs disabled section
+ * of the lower level driver completion callback, so we need to
+ * spin_lock_irqsave here. */
 static inline int req_mod(struct drbd_request *req,
                enum drbd_req_event what)
 {
+       unsigned long flags;
        struct drbd_conf *mdev = req->mdev;
        struct bio_and_error m;
        int rv;
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irqsave(&mdev->req_lock, flags);
        rv = __req_mod(req, what, &m);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irqrestore(&mdev->req_lock, flags);
 
        if (m.bio)
                complete_master_bio(mdev, &m);
 
        return rv;
 }
+
+static inline bool drbd_should_do_remote(union drbd_state s)
+{
+       return s.pdsk == D_UP_TO_DATE ||
+               (s.pdsk >= D_INCONSISTENT &&
+                s.conn >= C_WF_BITMAP_T &&
+                s.conn < C_AHEAD);
+       /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
+          That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
+          states. */
+}
+static inline bool drbd_should_send_oos(union drbd_state s)
+{
+       return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
+       /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
+          since we enter state C_AHEAD only if proto >= 96 */
+}
+
 #endif
index 85179e1fb50a62ff6d7593e3204a4d19d835e6b3..c44a2a6027724ddaf373664728bf6b2b592b68b7 100644 (file)
@@ -48,6 +48,8 @@ static const char *drbd_conn_s_names[] = {
        [C_PAUSED_SYNC_T]    = "PausedSyncT",
        [C_VERIFY_S]         = "VerifyS",
        [C_VERIFY_T]         = "VerifyT",
+       [C_AHEAD]            = "Ahead",
+       [C_BEHIND]           = "Behind",
 };
 
 static const char *drbd_role_s_names[] = {
@@ -92,7 +94,7 @@ static const char *drbd_state_sw_errors[] = {
 const char *drbd_conn_str(enum drbd_conns s)
 {
        /* enums are unsigned... */
-       return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s];
+       return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s];
 }
 
 const char *drbd_role_str(enum drbd_role s)
@@ -105,7 +107,7 @@ const char *drbd_disk_str(enum drbd_disk_state s)
        return s > D_UP_TO_DATE    ? "TOO_LARGE" : drbd_disk_s_names[s];
 }
 
-const char *drbd_set_st_err_str(enum drbd_state_ret_codes err)
+const char *drbd_set_st_err_str(enum drbd_state_rv err)
 {
        return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" :
               err > SS_TWO_PRIMARIES ? "TOO_LARGE"
index e027446590d3752d63d2301c7d65e17d749f7b13..f7e6c92f8d03d001c1a452fe92b0c605c450bd38 100644 (file)
 #include "drbd_req.h"
 
 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
+static int w_make_resync_request(struct drbd_conf *mdev,
+                                struct drbd_work *w, int cancel);
 
 
 
-/* defined here:
-   drbd_md_io_complete
-   drbd_endio_sec
-   drbd_endio_pri
-
- * more endio handlers:
-   atodb_endio in drbd_actlog.c
-   drbd_bm_async_io_complete in drbd_bitmap.c
-
+/* endio handlers:
+ *   drbd_md_io_complete (defined here)
+ *   drbd_endio_pri (defined here)
+ *   drbd_endio_sec (defined here)
+ *   bm_async_io_complete (defined in drbd_bitmap.c)
+ *
  * For all these callbacks, note the following:
  * The callbacks will be called in irq context by the IDE drivers,
  * and in Softirqs/Tasklets/BH context by the SCSI drivers.
@@ -94,7 +93,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
        if (list_empty(&mdev->read_ee))
                wake_up(&mdev->ee_wait);
        if (test_bit(__EE_WAS_ERROR, &e->flags))
-               __drbd_chk_io_error(mdev, FALSE);
+               __drbd_chk_io_error(mdev, false);
        spin_unlock_irqrestore(&mdev->req_lock, flags);
 
        drbd_queue_work(&mdev->data.work, &e->w);
@@ -137,7 +136,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
                : list_empty(&mdev->active_ee);
 
        if (test_bit(__EE_WAS_ERROR, &e->flags))
-               __drbd_chk_io_error(mdev, FALSE);
+               __drbd_chk_io_error(mdev, false);
        spin_unlock_irqrestore(&mdev->req_lock, flags);
 
        if (is_syncer_req)
@@ -163,14 +162,15 @@ void drbd_endio_sec(struct bio *bio, int error)
        int uptodate = bio_flagged(bio, BIO_UPTODATE);
        int is_write = bio_data_dir(bio) == WRITE;
 
-       if (error)
+       if (error && __ratelimit(&drbd_ratelimit_state))
                dev_warn(DEV, "%s: error=%d s=%llus\n",
                                is_write ? "write" : "read", error,
                                (unsigned long long)e->sector);
        if (!error && !uptodate) {
-               dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
-                               is_write ? "write" : "read",
-                               (unsigned long long)e->sector);
+               if (__ratelimit(&drbd_ratelimit_state))
+                       dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
+                                       is_write ? "write" : "read",
+                                       (unsigned long long)e->sector);
                /* strange behavior of some lower level drivers...
                 * fail the request by clearing the uptodate flag,
                 * but do not return any error?! */
@@ -250,13 +250,6 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
        return w_send_read_req(mdev, w, 0);
 }
 
-int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
-       ERR_IF(cancel) return 1;
-       dev_err(DEV, "resync inactive, but callback triggered??\n");
-       return 1; /* Simply ignore this! */
-}
-
 void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
 {
        struct hash_desc desc;
@@ -355,7 +348,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
        if (!get_ldev(mdev))
                return -EIO;
 
-       if (drbd_rs_should_slow_down(mdev))
+       if (drbd_rs_should_slow_down(mdev, sector))
                goto defer;
 
        /* GFP_TRY, because if there is no memory available right now, this may
@@ -373,9 +366,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
        if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
                return 0;
 
-       /* drbd_submit_ee currently fails for one reason only:
-        * not being able to allocate enough bios.
-        * Is dropping the connection going to help? */
+       /* If it failed because of ENOMEM, retry should help.  If it failed
+        * because bio_add_page failed (probably broken lower level driver),
+        * retry may or may not help.
+        * If it does not, you may need to force disconnect. */
        spin_lock_irq(&mdev->req_lock);
        list_del(&e->w.list);
        spin_unlock_irq(&mdev->req_lock);
@@ -386,26 +380,25 @@ defer:
        return -EAGAIN;
 }
 
-void resync_timer_fn(unsigned long data)
+int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 {
-       struct drbd_conf *mdev = (struct drbd_conf *) data;
-       int queue;
-
-       queue = 1;
        switch (mdev->state.conn) {
        case C_VERIFY_S:
-               mdev->resync_work.cb = w_make_ov_request;
+               w_make_ov_request(mdev, w, cancel);
                break;
        case C_SYNC_TARGET:
-               mdev->resync_work.cb = w_make_resync_request;
+               w_make_resync_request(mdev, w, cancel);
                break;
-       default:
-               queue = 0;
-               mdev->resync_work.cb = w_resync_inactive;
        }
 
-       /* harmless race: list_empty outside data.work.q_lock */
-       if (list_empty(&mdev->resync_work.list) && queue)
+       return 1;
+}
+
+void resync_timer_fn(unsigned long data)
+{
+       struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+       if (list_empty(&mdev->resync_work.list))
                drbd_queue_work(&mdev->data.work, &mdev->resync_work);
 }
 
@@ -438,7 +431,7 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
                fb->values[i] += value;
 }
 
-int drbd_rs_controller(struct drbd_conf *mdev)
+static int drbd_rs_controller(struct drbd_conf *mdev)
 {
        unsigned int sect_in;  /* Number of sectors that came in since the last turn */
        unsigned int want;     /* The number of sectors we want in the proxy */
@@ -492,29 +485,36 @@ int drbd_rs_controller(struct drbd_conf *mdev)
        return req_sect;
 }
 
-int w_make_resync_request(struct drbd_conf *mdev,
-               struct drbd_work *w, int cancel)
+static int drbd_rs_number_requests(struct drbd_conf *mdev)
+{
+       int number;
+       if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
+               number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
+               mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
+       } else {
+               mdev->c_sync_rate = mdev->sync_conf.rate;
+               number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
+       }
+
+       /* ignore the amount of pending requests, the resync controller should
+        * throttle down to incoming reply rate soon enough anyways. */
+       return number;
+}
+
+static int w_make_resync_request(struct drbd_conf *mdev,
+                                struct drbd_work *w, int cancel)
 {
        unsigned long bit;
        sector_t sector;
        const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
-       int max_segment_size;
-       int number, rollback_i, size, pe, mx;
+       int max_bio_size;
+       int number, rollback_i, size;
        int align, queued, sndbuf;
        int i = 0;
 
        if (unlikely(cancel))
                return 1;
 
-       if (unlikely(mdev->state.conn < C_CONNECTED)) {
-               dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
-               return 0;
-       }
-
-       if (mdev->state.conn != C_SYNC_TARGET)
-               dev_err(DEV, "%s in w_make_resync_request\n",
-                       drbd_conn_str(mdev->state.conn));
-
        if (mdev->rs_total == 0) {
                /* empty resync? */
                drbd_resync_finished(mdev);
@@ -527,49 +527,19 @@ int w_make_resync_request(struct drbd_conf *mdev,
                   to continue resync with a broken disk makes no sense at
                   all */
                dev_err(DEV, "Disk broke down during resync!\n");
-               mdev->resync_work.cb = w_resync_inactive;
                return 1;
        }
 
        /* starting with drbd 8.3.8, we can handle multi-bio EEs,
         * if it should be necessary */
-       max_segment_size =
-               mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
-               mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
+       max_bio_size =
+               mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
+               mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
 
-       if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
-               number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
-               mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
-       } else {
-               mdev->c_sync_rate = mdev->sync_conf.rate;
-               number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
-       }
-
-       /* Throttle resync on lower level disk activity, which may also be
-        * caused by application IO on Primary/SyncTarget.
-        * Keep this after the call to drbd_rs_controller, as that assumes
-        * to be called as precisely as possible every SLEEP_TIME,
-        * and would be confused otherwise. */
-       if (drbd_rs_should_slow_down(mdev))
+       number = drbd_rs_number_requests(mdev);
+       if (number == 0)
                goto requeue;
 
-       mutex_lock(&mdev->data.mutex);
-       if (mdev->data.socket)
-               mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
-       else
-               mx = 1;
-       mutex_unlock(&mdev->data.mutex);
-
-       /* For resync rates >160MB/sec, allow more pending RS requests */
-       if (number > mx)
-               mx = number;
-
-       /* Limit the number of pending RS requests to no more than the peer's receive buffer */
-       pe = atomic_read(&mdev->rs_pending_cnt);
-       if ((pe + number) > mx) {
-               number = mx - pe;
-       }
-
        for (i = 0; i < number; i++) {
                /* Stop generating RS requests, when half of the send buffer is filled */
                mutex_lock(&mdev->data.mutex);
@@ -588,16 +558,16 @@ next_sector:
                size = BM_BLOCK_SIZE;
                bit  = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
 
-               if (bit == -1UL) {
+               if (bit == DRBD_END_OF_BITMAP) {
                        mdev->bm_resync_fo = drbd_bm_bits(mdev);
-                       mdev->resync_work.cb = w_resync_inactive;
                        put_ldev(mdev);
                        return 1;
                }
 
                sector = BM_BIT_TO_SECT(bit);
 
-               if (drbd_try_rs_begin_io(mdev, sector)) {
+               if (drbd_rs_should_slow_down(mdev, sector) ||
+                   drbd_try_rs_begin_io(mdev, sector)) {
                        mdev->bm_resync_fo = bit;
                        goto requeue;
                }
@@ -608,7 +578,7 @@ next_sector:
                        goto next_sector;
                }
 
-#if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
+#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
                /* try to find some adjacent bits.
                 * we stop if we have already the maximum req size.
                 *
@@ -618,7 +588,7 @@ next_sector:
                align = 1;
                rollback_i = i;
                for (;;) {
-                       if (size + BM_BLOCK_SIZE > max_segment_size)
+                       if (size + BM_BLOCK_SIZE > max_bio_size)
                                break;
 
                        /* Be always aligned */
@@ -685,7 +655,6 @@ next_sector:
                 * resync data block, and the last bit is cleared.
                 * until then resync "work" is "inactive" ...
                 */
-               mdev->resync_work.cb = w_resync_inactive;
                put_ldev(mdev);
                return 1;
        }
@@ -706,27 +675,18 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
        if (unlikely(cancel))
                return 1;
 
-       if (unlikely(mdev->state.conn < C_CONNECTED)) {
-               dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
-               return 0;
-       }
-
-       number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
-       if (atomic_read(&mdev->rs_pending_cnt) > number)
-               goto requeue;
-
-       number -= atomic_read(&mdev->rs_pending_cnt);
+       number = drbd_rs_number_requests(mdev);
 
        sector = mdev->ov_position;
        for (i = 0; i < number; i++) {
                if (sector >= capacity) {
-                       mdev->resync_work.cb = w_resync_inactive;
                        return 1;
                }
 
                size = BM_BLOCK_SIZE;
 
-               if (drbd_try_rs_begin_io(mdev, sector)) {
+               if (drbd_rs_should_slow_down(mdev, sector) ||
+                   drbd_try_rs_begin_io(mdev, sector)) {
                        mdev->ov_position = sector;
                        goto requeue;
                }
@@ -744,11 +704,33 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
        mdev->ov_position = sector;
 
  requeue:
+       mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
        mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
        return 1;
 }
 
 
+void start_resync_timer_fn(unsigned long data)
+{
+       struct drbd_conf *mdev = (struct drbd_conf *) data;
+
+       drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
+}
+
+int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+       if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
+               dev_warn(DEV, "w_start_resync later...\n");
+               mdev->start_resync_timer.expires = jiffies + HZ/10;
+               add_timer(&mdev->start_resync_timer);
+               return 1;
+       }
+
+       drbd_start_resync(mdev, C_SYNC_SOURCE);
+       clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+       return 1;
+}
+
 int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 {
        kfree(w);
@@ -782,6 +764,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
        union drbd_state os, ns;
        struct drbd_work *w;
        char *khelper_cmd = NULL;
+       int verify_done = 0;
 
        /* Remove all elements from the resync LRU. Since future actions
         * might set bits in the (main) bitmap, then the entries in the
@@ -792,8 +775,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
                 * queue (or even the read operations for those packets
                 * is not finished by now).   Retry in 100ms. */
 
-               __set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(HZ / 10);
+               schedule_timeout_interruptible(HZ / 10);
                w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
                if (w) {
                        w->cb = w_resync_finished;
@@ -818,6 +800,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
        spin_lock_irq(&mdev->req_lock);
        os = mdev->state;
 
+       verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
+
        /* This protects us against multiple calls (that can happen in the presence
           of application IO), and against connectivity loss just before we arrive here. */
        if (os.conn <= C_CONNECTED)
@@ -827,8 +811,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
        ns.conn = C_CONNECTED;
 
        dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
-            (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
-            "Online verify " : "Resync",
+            verify_done ? "Online verify " : "Resync",
             dt + mdev->rs_paused, mdev->rs_paused, dbdt);
 
        n_oos = drbd_bm_total_weight(mdev);
@@ -886,14 +869,18 @@ int drbd_resync_finished(struct drbd_conf *mdev)
                        }
                }
 
-               drbd_uuid_set_bm(mdev, 0UL);
-
-               if (mdev->p_uuid) {
-                       /* Now the two UUID sets are equal, update what we
-                        * know of the peer. */
-                       int i;
-                       for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
-                               mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
+               if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
+                       /* for verify runs, we don't update uuids here,
+                        * so there would be nothing to report. */
+                       drbd_uuid_set_bm(mdev, 0UL);
+                       drbd_print_uuids(mdev, "updated UUIDs");
+                       if (mdev->p_uuid) {
+                               /* Now the two UUID sets are equal, update what we
+                                * know of the peer. */
+                               int i;
+                               for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
+                                       mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
+                       }
                }
        }
 
@@ -905,15 +892,11 @@ out:
        mdev->rs_total  = 0;
        mdev->rs_failed = 0;
        mdev->rs_paused = 0;
-       mdev->ov_start_sector = 0;
+       if (verify_done)
+               mdev->ov_start_sector = 0;
 
        drbd_md_sync(mdev);
 
-       if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
-               dev_info(DEV, "Writing the whole bitmap\n");
-               drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
-       }
-
        if (khelper_cmd)
                drbd_khelper(mdev, khelper_cmd);
 
@@ -994,7 +977,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
                put_ldev(mdev);
        }
 
-       if (likely((e->flags & EE_WAS_ERROR) == 0)) {
+       if (mdev->state.conn == C_AHEAD) {
+               ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
+       } else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
                if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
                        inc_rs_pending(mdev);
                        ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
@@ -1096,25 +1081,27 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
        if (unlikely(cancel))
                goto out;
 
-       if (unlikely((e->flags & EE_WAS_ERROR) != 0))
-               goto out;
-
        digest_size = crypto_hash_digestsize(mdev->verify_tfm);
-       /* FIXME if this allocation fails, online verify will not terminate! */
        digest = kmalloc(digest_size, GFP_NOIO);
-       if (digest) {
-               drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
-               inc_rs_pending(mdev);
-               ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
-                                            digest, digest_size, P_OV_REPLY);
-               if (!ok)
-                       dec_rs_pending(mdev);
-               kfree(digest);
+       if (!digest) {
+               ok = 0; /* terminate the connection in case the allocation failed */
+               goto out;
        }
 
+       if (likely(!(e->flags & EE_WAS_ERROR)))
+               drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
+       else
+               memset(digest, 0, digest_size);
+
+       inc_rs_pending(mdev);
+       ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
+                                    digest, digest_size, P_OV_REPLY);
+       if (!ok)
+               dec_rs_pending(mdev);
+       kfree(digest);
+
 out:
        drbd_free_ee(mdev, e);
-
        dec_unacked(mdev);
 
        return ok;
@@ -1129,7 +1116,6 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
                mdev->ov_last_oos_size = size>>9;
        }
        drbd_set_out_of_sync(mdev, sector, size);
-       set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
 }
 
 int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
@@ -1165,10 +1151,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
                        eq = !memcmp(digest, di->digest, digest_size);
                        kfree(digest);
                }
-       } else {
-               ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
-               if (__ratelimit(&drbd_ratelimit_state))
-                       dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
        }
 
        dec_unacked(mdev);
@@ -1182,7 +1164,13 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 
        drbd_free_ee(mdev, e);
 
-       if (--mdev->ov_left == 0) {
+       --mdev->ov_left;
+
+       /* let's advance progress step marks only for every other megabyte */
+       if ((mdev->ov_left & 0x200) == 0x200)
+               drbd_advance_rs_marks(mdev, mdev->ov_left);
+
+       if (mdev->ov_left == 0) {
                ov_oos_print(mdev);
                drbd_resync_finished(mdev);
        }
@@ -1235,6 +1223,22 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
        return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
 }
 
+int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
+{
+       struct drbd_request *req = container_of(w, struct drbd_request, w);
+       int ok;
+
+       if (unlikely(cancel)) {
+               req_mod(req, send_canceled);
+               return 1;
+       }
+
+       ok = drbd_send_oos(mdev, req);
+       req_mod(req, oos_handed_to_network);
+
+       return ok;
+}
+
 /**
  * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
  * @mdev:      DRBD device.
@@ -1430,6 +1434,17 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
        return retcode;
 }
 
+void drbd_rs_controller_reset(struct drbd_conf *mdev)
+{
+       atomic_set(&mdev->rs_sect_in, 0);
+       atomic_set(&mdev->rs_sect_ev, 0);
+       mdev->rs_in_flight = 0;
+       mdev->rs_planed = 0;
+       spin_lock(&mdev->peer_seq_lock);
+       fifo_set(&mdev->rs_plan_s, 0);
+       spin_unlock(&mdev->peer_seq_lock);
+}
+
 /**
  * drbd_start_resync() - Start the resync process
  * @mdev:      DRBD device.
@@ -1443,13 +1458,18 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
        union drbd_state ns;
        int r;
 
-       if (mdev->state.conn >= C_SYNC_SOURCE) {
+       if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
                dev_err(DEV, "Resync already running!\n");
                return;
        }
 
-       /* In case a previous resync run was aborted by an IO error/detach on the peer. */
-       drbd_rs_cancel_all(mdev);
+       if (mdev->state.conn < C_AHEAD) {
+               /* In case a previous resync run was aborted by an IO error/detach on the peer. */
+               drbd_rs_cancel_all(mdev);
+               /* This should be done when we abort the resync. We definitely do not
+                  want to have this for connections going back and forth between
+                  Ahead/Behind and SyncSource/SyncTarget */
+       }
 
        if (side == C_SYNC_TARGET) {
                /* Since application IO was locked out during C_WF_BITMAP_T and
@@ -1463,6 +1483,20 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
                        drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
                        return;
                }
+       } else /* C_SYNC_SOURCE */ {
+               r = drbd_khelper(mdev, "before-resync-source");
+               r = (r >> 8) & 0xff;
+               if (r > 0) {
+                       if (r == 3) {
+                               dev_info(DEV, "before-resync-source handler returned %d, "
+                                        "ignoring. Old userland tools?", r);
+                       } else {
+                               dev_info(DEV, "before-resync-source handler returned %d, "
+                                        "dropping connection.\n", r);
+                               drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+                               return;
+                       }
+               }
        }
 
        drbd_state_lock(mdev);
@@ -1472,18 +1506,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
                return;
        }
 
-       if (side == C_SYNC_TARGET) {
-               mdev->bm_resync_fo = 0;
-       } else /* side == C_SYNC_SOURCE */ {
-               u64 uuid;
-
-               get_random_bytes(&uuid, sizeof(u64));
-               drbd_uuid_set(mdev, UI_BITMAP, uuid);
-               drbd_send_sync_uuid(mdev, uuid);
-
-               D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
-       }
-
        write_lock_irq(&global_state_lock);
        ns = mdev->state;
 
@@ -1521,13 +1543,24 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
                _drbd_pause_after(mdev);
        }
        write_unlock_irq(&global_state_lock);
-       put_ldev(mdev);
 
        if (r == SS_SUCCESS) {
                dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
                     drbd_conn_str(ns.conn),
                     (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
                     (unsigned long) mdev->rs_total);
+               if (side == C_SYNC_TARGET)
+                       mdev->bm_resync_fo = 0;
+
+               /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
+                * with w_send_oos, or the sync target will get confused as to
+                * how much bits to resync.  We cannot do that always, because for an
+                * empty resync and protocol < 95, we need to do it here, as we call
+                * drbd_resync_finished from here in that case.
+                * We drbd_gen_and_send_sync_uuid here for protocol < 96,
+                * and from after_state_ch otherwise. */
+               if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
+                       drbd_gen_and_send_sync_uuid(mdev);
 
                if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
                        /* This still has a race (about when exactly the peers
@@ -1547,13 +1580,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
                        drbd_resync_finished(mdev);
                }
 
-               atomic_set(&mdev->rs_sect_in, 0);
-               atomic_set(&mdev->rs_sect_ev, 0);
-               mdev->rs_in_flight = 0;
-               mdev->rs_planed = 0;
-               spin_lock(&mdev->peer_seq_lock);
-               fifo_set(&mdev->rs_plan_s, 0);
-               spin_unlock(&mdev->peer_seq_lock);
+               drbd_rs_controller_reset(mdev);
                /* ns.conn may already be != mdev->state.conn,
                 * we may have been paused in between, or become paused until
                 * the timer triggers.
@@ -1563,6 +1590,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 
                drbd_md_sync(mdev);
        }
+       put_ldev(mdev);
        drbd_state_unlock(mdev);
 }
 
index 53586fa5ae1b098686e6171ea821fa21dc1bdb1f..151f1a37478f954360cd4fe91767922acf9ec1e7 100644 (file)
@@ -39,7 +39,7 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
                return;
        }
 
-       if (FAULT_ACTIVE(mdev, fault_type))
+       if (drbd_insert_fault(mdev, fault_type))
                bio_endio(bio, -EIO);
        else
                generic_make_request(bio);
index ef44c7a0638cdaaf087a65e01476aca73d29239b..d18d673ebc78e90a617670c80ca93e0f854ed70f 100644 (file)
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.9"
+#define REL_VERSION "8.3.10"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 95
+#define PRO_VERSION_MAX 96
 
 
 enum drbd_io_error_p {
@@ -96,8 +96,14 @@ enum drbd_on_no_data {
        OND_SUSPEND_IO
 };
 
+enum drbd_on_congestion {
+       OC_BLOCK,
+       OC_PULL_AHEAD,
+       OC_DISCONNECT,
+};
+
 /* KEEP the order, do not delete or insert. Only append. */
-enum drbd_ret_codes {
+enum drbd_ret_code {
        ERR_CODE_BASE           = 100,
        NO_ERROR                = 101,
        ERR_LOCAL_ADDR          = 102,
@@ -146,6 +152,9 @@ enum drbd_ret_codes {
        ERR_PERM                = 152,
        ERR_NEED_APV_93         = 153,
        ERR_STONITH_AND_PROT_A  = 154,
+       ERR_CONG_NOT_PROTO_A    = 155,
+       ERR_PIC_AFTER_DEP       = 156,
+       ERR_PIC_PEER_DEP        = 157,
 
        /* insert new ones above this line */
        AFTER_LAST_ERR_CODE
@@ -199,6 +208,10 @@ enum drbd_conns {
        C_VERIFY_T,
        C_PAUSED_SYNC_S,
        C_PAUSED_SYNC_T,
+
+       C_AHEAD,
+       C_BEHIND,
+
        C_MASK = 31
 };
 
@@ -259,7 +272,7 @@ union drbd_state {
        unsigned int i;
 };
 
-enum drbd_state_ret_codes {
+enum drbd_state_rv {
        SS_CW_NO_NEED = 4,
        SS_CW_SUCCESS = 3,
        SS_NOTHING_TO_DO = 2,
@@ -290,7 +303,7 @@ enum drbd_state_ret_codes {
 extern const char *drbd_conn_str(enum drbd_conns);
 extern const char *drbd_role_str(enum drbd_role);
 extern const char *drbd_disk_str(enum drbd_disk_state);
-extern const char *drbd_set_st_err_str(enum drbd_state_ret_codes);
+extern const char *drbd_set_st_err_str(enum drbd_state_rv);
 
 #define SHARED_SECRET_MAX 64
 
index 4ac33f34b77e478d42cd4038d1259965a7434742..bb264a5732de7257727d66caeea01c13927e4790 100644 (file)
@@ -16,7 +16,8 @@
 #define DEBUG_RANGE_CHECK 0
 
 #define DRBD_MINOR_COUNT_MIN 1
-#define DRBD_MINOR_COUNT_MAX 255
+#define DRBD_MINOR_COUNT_MAX 256
+#define DRBD_MINOR_COUNT_DEF 32
 
 #define DRBD_DIALOG_REFRESH_MIN 0
 #define DRBD_DIALOG_REFRESH_MAX 600
 #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT
 #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
 #define DRBD_ON_NO_DATA_DEF OND_IO_ERROR
+#define DRBD_ON_CONGESTION_DEF OC_BLOCK
 
 #define DRBD_MAX_BIO_BVECS_MIN 0
 #define DRBD_MAX_BIO_BVECS_MAX 128
 #define DRBD_C_MIN_RATE_MAX     (4 << 20)
 #define DRBD_C_MIN_RATE_DEF     4096
 
+#define DRBD_CONG_FILL_MIN     0
+#define DRBD_CONG_FILL_MAX     (10<<21) /* 10GByte in sectors */
+#define DRBD_CONG_FILL_DEF     0
+
+#define DRBD_CONG_EXTENTS_MIN  DRBD_AL_EXTENTS_MIN
+#define DRBD_CONG_EXTENTS_MAX  DRBD_AL_EXTENTS_MAX
+#define DRBD_CONG_EXTENTS_DEF  DRBD_AL_EXTENTS_DEF
+
 #undef RANGE
 #endif
index ade91107c9a5031f186257844dd8bfa03d95b572..ab6159e4fcf0c6122cf840aef801fef822b0ab46 100644 (file)
@@ -56,6 +56,9 @@ NL_PACKET(net_conf, 5,
        NL_INTEGER(     39,     T_MAY_IGNORE,   rr_conflict)
        NL_INTEGER(     40,     T_MAY_IGNORE,   ping_timeo)
        NL_INTEGER(     67,     T_MAY_IGNORE,   rcvbuf_size)
+       NL_INTEGER(     81,     T_MAY_IGNORE,   on_congestion)
+       NL_INTEGER(     82,     T_MAY_IGNORE,   cong_fill)
+       NL_INTEGER(     83,     T_MAY_IGNORE,   cong_extents)
          /* 59 addr_family was available in GIT, never released */
        NL_BIT(         60,     T_MANDATORY,    mind_af)
        NL_BIT(         27,     T_MAY_IGNORE,   want_lose)
@@ -66,7 +69,9 @@ NL_PACKET(net_conf, 5,
        NL_BIT(         70,     T_MANDATORY,    dry_run)
 )
 
-NL_PACKET(disconnect, 6, )
+NL_PACKET(disconnect, 6,
+       NL_BIT(         84,     T_MAY_IGNORE,   force)
+)
 
 NL_PACKET(resize, 7,
        NL_INT64(               29,     T_MAY_IGNORE,   resize_size)
@@ -143,9 +148,13 @@ NL_PACKET(new_c_uuid, 26,
        NL_BIT(         63,     T_MANDATORY,    clear_bm)
 )
 
+#ifdef NL_RESPONSE
+NL_RESPONSE(return_code_only, 27)
+#endif
+
 #undef NL_PACKET
 #undef NL_INTEGER
 #undef NL_INT64
 #undef NL_BIT
 #undef NL_STRING
-
+#undef NL_RESPONSE
index fcdff8410e99ddad7f1dac756bd915aefce5c494..f14a165e82dc162ef0306d6dc8d9d7266a816387 100644 (file)
@@ -7,6 +7,7 @@
 /* declare packet_type enums */
 enum packet_types {
 #define NL_PACKET(name, number, fields) P_ ## name = number,
+#define NL_RESPONSE(name, number) P_ ## name = number,
 #define NL_INTEGER(pn, pr, member)
 #define NL_INT64(pn, pr, member)
 #define NL_BIT(pn, pr, member)