]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/block/drbd/drbd_nl.c
drbd: when receiving P_TRIM, zero-out partial unaligned chunks
[karo-tx-linux.git] / drivers / block / drbd / drbd_nl.c
index 0bac9c8246bc40cf47c330aa8db7cb7410932aaf..8d757d6f21e79f291838eecfb1c36d89a6f38b15 100644 (file)
@@ -343,7 +343,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
                         (char[20]) { }, /* address family */
                         (char[60]) { }, /* address */
                        NULL };
-       char mb[12];
+       char mb[14];
        char *argv[] = {usermode_helper, cmd, mb, NULL };
        struct drbd_connection *connection = first_peer_device(device)->connection;
        struct sib_info sib;
@@ -352,7 +352,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
        if (current == connection->worker.task)
                set_bit(CALLBACK_PENDING, &connection->flags);
 
-       snprintf(mb, 12, "minor-%d", device_to_minor(device));
+       snprintf(mb, 14, "minor-%d", device_to_minor(device));
        setup_khelper_env(connection, envp);
 
        /* The helper may take some time.
@@ -1161,13 +1161,17 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
        unsigned int max_hw_sectors = max_bio_size >> 9;
        unsigned int max_segments = 0;
        struct request_queue *b = NULL;
+       struct disk_conf *dc;
+       bool discard_zeroes_if_aligned = true;
 
        if (bdev) {
                b = bdev->backing_bdev->bd_disk->queue;
 
                max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
                rcu_read_lock();
-               max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
+               dc = rcu_dereference(device->ldev->disk_conf);
+               max_segments = dc->max_bio_bvecs;
+               discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
                rcu_read_unlock();
 
                blk_set_stacking_limits(&q->limits);
@@ -1185,7 +1189,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
 
                blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
 
-               if (blk_queue_discard(b) &&
+               if (blk_queue_discard(b) && (b->limits.discard_zeroes_data || discard_zeroes_if_aligned) &&
                    (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
                        /* We don't care, stacking below should fix it for the local device.
                         * Whether or not it is a suitable granularity on the remote device
@@ -1216,7 +1220,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
        }
 }
 
-void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
+void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev)
 {
        unsigned int now, new, local, peer;
 
@@ -1348,6 +1352,40 @@ static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
                a->disk_drain != b->disk_drain;
 }
 
+static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
+                              struct drbd_backing_dev *nbc)
+{
+       struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
+
+       if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+               disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+       if (disk_conf->al_extents > drbd_al_extents_max(nbc))
+               disk_conf->al_extents = drbd_al_extents_max(nbc);
+
+       if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data) {
+               disk_conf->rs_discard_granularity = 0; /* disable feature */
+               drbd_info(device, "rs_discard_granularity feature disabled\n");
+       }
+
+       if (disk_conf->rs_discard_granularity) {
+               int orig_value = disk_conf->rs_discard_granularity;
+               int remainder;
+
+               if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
+                       disk_conf->rs_discard_granularity = q->limits.discard_granularity;
+
+               remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
+               disk_conf->rs_discard_granularity += remainder;
+
+               if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
+                       disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
+
+               if (disk_conf->rs_discard_granularity != orig_value)
+                       drbd_info(device, "rs_discard_granularity changed to %d\n",
+                                 disk_conf->rs_discard_granularity);
+       }
+}
+
 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
 {
        struct drbd_config_context adm_ctx;
@@ -1395,10 +1433,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
        if (!expect(new_disk_conf->resync_rate >= 1))
                new_disk_conf->resync_rate = 1;
 
-       if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
-               new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
-       if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
-               new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
+       sanitize_disk_conf(device, new_disk_conf, device->ldev);
 
        if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
                new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
@@ -1457,6 +1492,9 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
        if (write_ordering_changed(old_disk_conf, new_disk_conf))
                drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
 
+       if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
+               drbd_reconsider_queue_parameters(device, device->ldev);
+
        drbd_md_sync(device);
 
        if (device->state.conn >= C_CONNECTED) {
@@ -1693,10 +1731,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto fail;
 
-       if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
-               new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
-       if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
-               new_disk_conf->al_extents = drbd_al_extents_max(nbc);
+       sanitize_disk_conf(device, new_disk_conf, nbc);
 
        if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
                drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
@@ -1838,7 +1873,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        device->read_cnt = 0;
        device->writ_cnt = 0;
 
-       drbd_reconsider_max_bio_size(device, device->ldev);
+       drbd_reconsider_queue_parameters(device, device->ldev);
 
        /* If I am currently not R_PRIMARY,
         * but meta data primary indicator is set,