]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_main.c
da98bff7c33359592708e8e40d1f3db29c15009d
[karo-tx-linux.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60         struct drbd_work w;
61         union drbd_state os;
62         union drbd_state ns;
63         enum chg_state_flags flags;
64         struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77                            union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84               "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92 #include <linux/moduleparam.h>
93 /* allow_open_on_secondary */
94 MODULE_PARM_DESC(allow_oos, "DONT USE!");
95 /* thanks to these macros, if compiled into the kernel (not-module),
96  * this becomes the boot parameter drbd.minor_count */
97 module_param(minor_count, uint, 0444);
98 module_param(disable_sendpage, bool, 0644);
99 module_param(allow_oos, bool, 0);
100 module_param(cn_idx, uint, 0444);
101 module_param(proc_details, int, 0644);
102
103 #ifdef CONFIG_DRBD_FAULT_INJECTION
104 int enable_faults;
105 int fault_rate;
106 static int fault_count;
107 int fault_devs;
108 /* bitmap of enabled faults */
109 module_param(enable_faults, int, 0664);
110 /* fault rate % value - applies to all enabled faults */
111 module_param(fault_rate, int, 0664);
112 /* count of faults inserted */
113 module_param(fault_count, int, 0664);
114 /* bitmap of devices to insert faults on */
115 module_param(fault_devs, int, 0644);
116 #endif
117
118 /* module parameter, defined */
119 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
120 int disable_sendpage;
121 int allow_oos;
122 unsigned int cn_idx = CN_IDX_DRBD;
123 int proc_details;       /* Detail level in proc drbd*/
124
125 /* Module parameter for setting the user mode helper program
126  * to run. Default is /sbin/drbdadm */
127 char usermode_helper[80] = "/sbin/drbdadm";
128
129 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
132  * as member "struct gendisk *vdisk;"
133  */
134 struct drbd_conf **minor_table;
135
136 struct kmem_cache *drbd_request_cache;
137 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
138 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
139 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
140 mempool_t *drbd_request_mempool;
141 mempool_t *drbd_ee_mempool;
142
143 /* I do not use a standard mempool, because:
144    1) I want to hand out the pre-allocated objects first.
145    2) I want to be able to interrupt sleeping allocation with a signal.
146    Note: This is a single linked list, the next pointer is the private
147          member of struct page.
148  */
149 struct page *drbd_pp_pool;
150 spinlock_t   drbd_pp_lock;
151 int          drbd_pp_vacant;
152 wait_queue_head_t drbd_pp_wait;
153
154 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
156 static const struct block_device_operations drbd_ops = {
157         .owner =   THIS_MODULE,
158         .open =    drbd_open,
159         .release = drbd_release,
160 };
161
162 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164 #ifdef __CHECKER__
165 /* When checking with sparse, and this is an inline function, sparse will
166    give tons of false positives. When this is a real functions sparse works.
167  */
168 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 {
170         int io_allowed;
171
172         atomic_inc(&mdev->local_cnt);
173         io_allowed = (mdev->state.disk >= mins);
174         if (!io_allowed) {
175                 if (atomic_dec_and_test(&mdev->local_cnt))
176                         wake_up(&mdev->misc_wait);
177         }
178         return io_allowed;
179 }
180
181 #endif
182
183 /**
184  * DOC: The transfer log
185  *
186  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188  * of the list. There is always at least one &struct drbd_tl_epoch object.
189  *
190  * Each &struct drbd_tl_epoch has a circular double linked list of requests
191  * attached.
192  */
193 static int tl_init(struct drbd_conf *mdev)
194 {
195         struct drbd_tl_epoch *b;
196
197         /* during device minor initialization, we may well use GFP_KERNEL */
198         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199         if (!b)
200                 return 0;
201         INIT_LIST_HEAD(&b->requests);
202         INIT_LIST_HEAD(&b->w.list);
203         b->next = NULL;
204         b->br_number = 4711;
205         b->n_writes = 0;
206         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208         mdev->oldest_tle = b;
209         mdev->newest_tle = b;
210         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
212         mdev->tl_hash = NULL;
213         mdev->tl_hash_s = 0;
214
215         return 1;
216 }
217
218 static void tl_cleanup(struct drbd_conf *mdev)
219 {
220         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
221         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
222         kfree(mdev->oldest_tle);
223         mdev->oldest_tle = NULL;
224         kfree(mdev->unused_spare_tle);
225         mdev->unused_spare_tle = NULL;
226         kfree(mdev->tl_hash);
227         mdev->tl_hash = NULL;
228         mdev->tl_hash_s = 0;
229 }
230
231 /**
232  * _tl_add_barrier() - Adds a barrier to the transfer log
233  * @mdev:       DRBD device.
234  * @new:        Barrier to be added before the current head of the TL.
235  *
236  * The caller must hold the req_lock.
237  */
238 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
239 {
240         struct drbd_tl_epoch *newest_before;
241
242         INIT_LIST_HEAD(&new->requests);
243         INIT_LIST_HEAD(&new->w.list);
244         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245         new->next = NULL;
246         new->n_writes = 0;
247
248         newest_before = mdev->newest_tle;
249         /* never send a barrier number == 0, because that is special-cased
250          * when using TCQ for our write ordering code */
251         new->br_number = (newest_before->br_number+1) ?: 1;
252         if (mdev->newest_tle != new) {
253                 mdev->newest_tle->next = new;
254                 mdev->newest_tle = new;
255         }
256 }
257
258 /**
259  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260  * @mdev:       DRBD device.
261  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262  * @set_size:   Expected number of requests before that barrier.
263  *
264  * In case the passed barrier_nr or set_size does not match the oldest
265  * &struct drbd_tl_epoch objects this function will cause a termination
266  * of the connection.
267  */
268 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
269                        unsigned int set_size)
270 {
271         struct drbd_tl_epoch *b, *nob; /* next old barrier */
272         struct list_head *le, *tle;
273         struct drbd_request *r;
274
275         spin_lock_irq(&mdev->req_lock);
276
277         b = mdev->oldest_tle;
278
279         /* first some paranoia code */
280         if (b == NULL) {
281                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282                         barrier_nr);
283                 goto bail;
284         }
285         if (b->br_number != barrier_nr) {
286                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
287                         barrier_nr, b->br_number);
288                 goto bail;
289         }
290         if (b->n_writes != set_size) {
291                 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292                         barrier_nr, set_size, b->n_writes);
293                 goto bail;
294         }
295
296         /* Clean up list of requests processed during current epoch */
297         list_for_each_safe(le, tle, &b->requests) {
298                 r = list_entry(le, struct drbd_request, tl_requests);
299                 _req_mod(r, barrier_acked);
300         }
301         /* There could be requests on the list waiting for completion
302            of the write to the local disk. To avoid corruptions of
303            slab's data structures we have to remove the lists head.
304
305            Also there could have been a barrier ack out of sequence, overtaking
306            the write acks - which would be a bug and violating write ordering.
307            To not deadlock in case we lose connection while such requests are
308            still pending, we need some way to find them for the
309            _req_mode(connection_lost_while_pending).
310
311            These have been list_move'd to the out_of_sequence_requests list in
312            _req_mod(, barrier_acked) above.
313            */
314         list_del_init(&b->requests);
315
316         nob = b->next;
317         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
318                 _tl_add_barrier(mdev, b);
319                 if (nob)
320                         mdev->oldest_tle = nob;
321                 /* if nob == NULL b was the only barrier, and becomes the new
322                    barrier. Therefore mdev->oldest_tle points already to b */
323         } else {
324                 D_ASSERT(nob != NULL);
325                 mdev->oldest_tle = nob;
326                 kfree(b);
327         }
328
329         spin_unlock_irq(&mdev->req_lock);
330         dec_ap_pending(mdev);
331
332         return;
333
334 bail:
335         spin_unlock_irq(&mdev->req_lock);
336         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337 }
338
339
340 /* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach
341  * those requests from the newsest barrier when changing to an other cstate.
342  *
343  * That headless list vanishes when the last request finished its write or
344  * send out_of_sync packet.  */
345 static void tl_forget(struct drbd_conf *mdev)
346 {
347         struct drbd_tl_epoch *b;
348
349         if (test_bit(CREATE_BARRIER, &mdev->flags))
350                 return;
351
352         b = mdev->newest_tle;
353         list_del(&b->requests);
354         _tl_add_barrier(mdev, b);
355 }
356
357 /**
358  * _tl_restart() - Walks the transfer log, and applies an action to all requests
359  * @mdev:       DRBD device.
360  * @what:       The action/event to perform with all request objects
361  *
362  * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
363  * restart_frozen_disk_io.
364  */
365 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
366 {
367         struct drbd_tl_epoch *b, *tmp, **pn;
368         struct list_head *le, *tle, carry_reads;
369         struct drbd_request *req;
370         int rv, n_writes, n_reads;
371
372         b = mdev->oldest_tle;
373         pn = &mdev->oldest_tle;
374         while (b) {
375                 n_writes = 0;
376                 n_reads = 0;
377                 INIT_LIST_HEAD(&carry_reads);
378                 list_for_each_safe(le, tle, &b->requests) {
379                         req = list_entry(le, struct drbd_request, tl_requests);
380                         rv = _req_mod(req, what);
381
382                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
383                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
384                 }
385                 tmp = b->next;
386
387                 if (n_writes) {
388                         if (what == resend) {
389                                 b->n_writes = n_writes;
390                                 if (b->w.cb == NULL) {
391                                         b->w.cb = w_send_barrier;
392                                         inc_ap_pending(mdev);
393                                         set_bit(CREATE_BARRIER, &mdev->flags);
394                                 }
395
396                                 drbd_queue_work(&mdev->data.work, &b->w);
397                         }
398                         pn = &b->next;
399                 } else {
400                         if (n_reads)
401                                 list_add(&carry_reads, &b->requests);
402                         /* there could still be requests on that ring list,
403                          * in case local io is still pending */
404                         list_del(&b->requests);
405
406                         /* dec_ap_pending corresponding to queue_barrier.
407                          * the newest barrier may not have been queued yet,
408                          * in which case w.cb is still NULL. */
409                         if (b->w.cb != NULL)
410                                 dec_ap_pending(mdev);
411
412                         if (b == mdev->newest_tle) {
413                                 /* recycle, but reinit! */
414                                 D_ASSERT(tmp == NULL);
415                                 INIT_LIST_HEAD(&b->requests);
416                                 list_splice(&carry_reads, &b->requests);
417                                 INIT_LIST_HEAD(&b->w.list);
418                                 b->w.cb = NULL;
419                                 b->br_number = net_random();
420                                 b->n_writes = 0;
421
422                                 *pn = b;
423                                 break;
424                         }
425                         *pn = tmp;
426                         kfree(b);
427                 }
428                 b = tmp;
429                 list_splice(&carry_reads, &b->requests);
430         }
431 }
432
433
434 /**
435  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
436  * @mdev:       DRBD device.
437  *
438  * This is called after the connection to the peer was lost. The storage covered
439  * by the requests on the transfer gets marked as our of sync. Called from the
440  * receiver thread and the worker thread.
441  */
442 void tl_clear(struct drbd_conf *mdev)
443 {
444         struct list_head *le, *tle;
445         struct drbd_request *r;
446
447         spin_lock_irq(&mdev->req_lock);
448
449         _tl_restart(mdev, connection_lost_while_pending);
450
451         /* we expect this list to be empty. */
452         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
453
454         /* but just in case, clean it up anyways! */
455         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
456                 r = list_entry(le, struct drbd_request, tl_requests);
457                 /* It would be nice to complete outside of spinlock.
458                  * But this is easier for now. */
459                 _req_mod(r, connection_lost_while_pending);
460         }
461
462         /* ensure bit indicating barrier is required is clear */
463         clear_bit(CREATE_BARRIER, &mdev->flags);
464
465         memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
466
467         spin_unlock_irq(&mdev->req_lock);
468 }
469
470 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
471 {
472         spin_lock_irq(&mdev->req_lock);
473         _tl_restart(mdev, what);
474         spin_unlock_irq(&mdev->req_lock);
475 }
476
477 /**
478  * cl_wide_st_chg() - true if the state change is a cluster wide one
479  * @mdev:       DRBD device.
480  * @os:         old (current) state.
481  * @ns:         new (wanted) state.
482  */
483 static int cl_wide_st_chg(struct drbd_conf *mdev,
484                           union drbd_state os, union drbd_state ns)
485 {
486         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
487                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
488                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
489                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
490                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
491                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
492                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
493 }
494
495 enum drbd_state_rv
496 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
497                   union drbd_state mask, union drbd_state val)
498 {
499         unsigned long flags;
500         union drbd_state os, ns;
501         enum drbd_state_rv rv;
502
503         spin_lock_irqsave(&mdev->req_lock, flags);
504         os = mdev->state;
505         ns.i = (os.i & ~mask.i) | val.i;
506         rv = _drbd_set_state(mdev, ns, f, NULL);
507         ns = mdev->state;
508         spin_unlock_irqrestore(&mdev->req_lock, flags);
509
510         return rv;
511 }
512
513 /**
514  * drbd_force_state() - Impose a change which happens outside our control on our state
515  * @mdev:       DRBD device.
516  * @mask:       mask of state bits to change.
517  * @val:        value of new state bits.
518  */
519 void drbd_force_state(struct drbd_conf *mdev,
520         union drbd_state mask, union drbd_state val)
521 {
522         drbd_change_state(mdev, CS_HARD, mask, val);
523 }
524
525 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
526 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
527                                                     union drbd_state,
528                                                     union drbd_state);
529 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
530                                        union drbd_state ns, const char **warn_sync_abort);
531 int drbd_send_state_req(struct drbd_conf *,
532                         union drbd_state, union drbd_state);
533
534 static enum drbd_state_rv
535 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
536              union drbd_state val)
537 {
538         union drbd_state os, ns;
539         unsigned long flags;
540         enum drbd_state_rv rv;
541
542         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
543                 return SS_CW_SUCCESS;
544
545         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
546                 return SS_CW_FAILED_BY_PEER;
547
548         rv = 0;
549         spin_lock_irqsave(&mdev->req_lock, flags);
550         os = mdev->state;
551         ns.i = (os.i & ~mask.i) | val.i;
552         ns = sanitize_state(mdev, os, ns, NULL);
553
554         if (!cl_wide_st_chg(mdev, os, ns))
555                 rv = SS_CW_NO_NEED;
556         if (!rv) {
557                 rv = is_valid_state(mdev, ns);
558                 if (rv == SS_SUCCESS) {
559                         rv = is_valid_state_transition(mdev, ns, os);
560                         if (rv == SS_SUCCESS)
561                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
562                 }
563         }
564         spin_unlock_irqrestore(&mdev->req_lock, flags);
565
566         return rv;
567 }
568
569 /**
570  * drbd_req_state() - Perform an eventually cluster wide state change
571  * @mdev:       DRBD device.
572  * @mask:       mask of state bits to change.
573  * @val:        value of new state bits.
574  * @f:          flags
575  *
576  * Should not be called directly, use drbd_request_state() or
577  * _drbd_request_state().
578  */
579 static enum drbd_state_rv
580 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
581                union drbd_state val, enum chg_state_flags f)
582 {
583         struct completion done;
584         unsigned long flags;
585         union drbd_state os, ns;
586         enum drbd_state_rv rv;
587
588         init_completion(&done);
589
590         if (f & CS_SERIALIZE)
591                 mutex_lock(&mdev->state_mutex);
592
593         spin_lock_irqsave(&mdev->req_lock, flags);
594         os = mdev->state;
595         ns.i = (os.i & ~mask.i) | val.i;
596         ns = sanitize_state(mdev, os, ns, NULL);
597
598         if (cl_wide_st_chg(mdev, os, ns)) {
599                 rv = is_valid_state(mdev, ns);
600                 if (rv == SS_SUCCESS)
601                         rv = is_valid_state_transition(mdev, ns, os);
602                 spin_unlock_irqrestore(&mdev->req_lock, flags);
603
604                 if (rv < SS_SUCCESS) {
605                         if (f & CS_VERBOSE)
606                                 print_st_err(mdev, os, ns, rv);
607                         goto abort;
608                 }
609
610                 drbd_state_lock(mdev);
611                 if (!drbd_send_state_req(mdev, mask, val)) {
612                         drbd_state_unlock(mdev);
613                         rv = SS_CW_FAILED_BY_PEER;
614                         if (f & CS_VERBOSE)
615                                 print_st_err(mdev, os, ns, rv);
616                         goto abort;
617                 }
618
619                 wait_event(mdev->state_wait,
620                         (rv = _req_st_cond(mdev, mask, val)));
621
622                 if (rv < SS_SUCCESS) {
623                         drbd_state_unlock(mdev);
624                         if (f & CS_VERBOSE)
625                                 print_st_err(mdev, os, ns, rv);
626                         goto abort;
627                 }
628                 spin_lock_irqsave(&mdev->req_lock, flags);
629                 os = mdev->state;
630                 ns.i = (os.i & ~mask.i) | val.i;
631                 rv = _drbd_set_state(mdev, ns, f, &done);
632                 drbd_state_unlock(mdev);
633         } else {
634                 rv = _drbd_set_state(mdev, ns, f, &done);
635         }
636
637         spin_unlock_irqrestore(&mdev->req_lock, flags);
638
639         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
640                 D_ASSERT(current != mdev->worker.task);
641                 wait_for_completion(&done);
642         }
643
644 abort:
645         if (f & CS_SERIALIZE)
646                 mutex_unlock(&mdev->state_mutex);
647
648         return rv;
649 }
650
651 /**
652  * _drbd_request_state() - Request a state change (with flags)
653  * @mdev:       DRBD device.
654  * @mask:       mask of state bits to change.
655  * @val:        value of new state bits.
656  * @f:          flags
657  *
658  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
659  * flag, or when logging of failed state change requests is not desired.
660  */
661 enum drbd_state_rv
662 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
663                     union drbd_state val, enum chg_state_flags f)
664 {
665         enum drbd_state_rv rv;
666
667         wait_event(mdev->state_wait,
668                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
669
670         return rv;
671 }
672
673 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
674 {
675         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
676             name,
677             drbd_conn_str(ns.conn),
678             drbd_role_str(ns.role),
679             drbd_role_str(ns.peer),
680             drbd_disk_str(ns.disk),
681             drbd_disk_str(ns.pdsk),
682             is_susp(ns) ? 's' : 'r',
683             ns.aftr_isp ? 'a' : '-',
684             ns.peer_isp ? 'p' : '-',
685             ns.user_isp ? 'u' : '-'
686             );
687 }
688
689 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
690                   union drbd_state ns, enum drbd_state_rv err)
691 {
692         if (err == SS_IN_TRANSIENT_STATE)
693                 return;
694         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
695         print_st(mdev, " state", os);
696         print_st(mdev, "wanted", ns);
697 }
698
699
700 /**
701  * is_valid_state() - Returns an SS_ error code if ns is not valid
702  * @mdev:       DRBD device.
703  * @ns:         State to consider.
704  */
705 static enum drbd_state_rv
706 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
707 {
708         /* See drbd_state_sw_errors in drbd_strings.c */
709
710         enum drbd_fencing_p fp;
711         enum drbd_state_rv rv = SS_SUCCESS;
712
713         fp = FP_DONT_CARE;
714         if (get_ldev(mdev)) {
715                 fp = mdev->ldev->dc.fencing;
716                 put_ldev(mdev);
717         }
718
719         if (get_net_conf(mdev)) {
720                 if (!mdev->net_conf->two_primaries &&
721                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
722                         rv = SS_TWO_PRIMARIES;
723                 put_net_conf(mdev);
724         }
725
726         if (rv <= 0)
727                 /* already found a reason to abort */;
728         else if (ns.role == R_SECONDARY && mdev->open_cnt)
729                 rv = SS_DEVICE_IN_USE;
730
731         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
732                 rv = SS_NO_UP_TO_DATE_DISK;
733
734         else if (fp >= FP_RESOURCE &&
735                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
736                 rv = SS_PRIMARY_NOP;
737
738         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
739                 rv = SS_NO_UP_TO_DATE_DISK;
740
741         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
742                 rv = SS_NO_LOCAL_DISK;
743
744         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
745                 rv = SS_NO_REMOTE_DISK;
746
747         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
748                 rv = SS_NO_UP_TO_DATE_DISK;
749
750         else if ((ns.conn == C_CONNECTED ||
751                   ns.conn == C_WF_BITMAP_S ||
752                   ns.conn == C_SYNC_SOURCE ||
753                   ns.conn == C_PAUSED_SYNC_S) &&
754                   ns.disk == D_OUTDATED)
755                 rv = SS_CONNECTED_OUTDATES;
756
757         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
758                  (mdev->sync_conf.verify_alg[0] == 0))
759                 rv = SS_NO_VERIFY_ALG;
760
761         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
762                   mdev->agreed_pro_version < 88)
763                 rv = SS_NOT_SUPPORTED;
764
765         return rv;
766 }
767
768 /**
769  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
770  * @mdev:       DRBD device.
771  * @ns:         new state.
772  * @os:         old state.
773  */
774 static enum drbd_state_rv
775 is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
776                           union drbd_state os)
777 {
778         enum drbd_state_rv rv = SS_SUCCESS;
779
780         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
781             os.conn > C_CONNECTED)
782                 rv = SS_RESYNC_RUNNING;
783
784         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
785                 rv = SS_ALREADY_STANDALONE;
786
787         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
788                 rv = SS_IS_DISKLESS;
789
790         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
791                 rv = SS_NO_NET_CONFIG;
792
793         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
794                 rv = SS_LOWER_THAN_OUTDATED;
795
796         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
797                 rv = SS_IN_TRANSIENT_STATE;
798
799         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
800                 rv = SS_IN_TRANSIENT_STATE;
801
802         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
803                 rv = SS_NEED_CONNECTION;
804
805         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
806             ns.conn != os.conn && os.conn > C_CONNECTED)
807                 rv = SS_RESYNC_RUNNING;
808
809         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
810             os.conn < C_CONNECTED)
811                 rv = SS_NEED_CONNECTION;
812
813         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
814             && os.conn < C_WF_REPORT_PARAMS)
815                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
816
817         return rv;
818 }
819
820 /**
821  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
822  * @mdev:       DRBD device.
823  * @os:         old state.
824  * @ns:         new state.
825  * @warn_sync_abort:
826  *
827  * When we loose connection, we have to set the state of the peers disk (pdsk)
828  * to D_UNKNOWN. This rule and many more along those lines are in this function.
829  */
830 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
831                                        union drbd_state ns, const char **warn_sync_abort)
832 {
833         enum drbd_fencing_p fp;
834         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
835
836         fp = FP_DONT_CARE;
837         if (get_ldev(mdev)) {
838                 fp = mdev->ldev->dc.fencing;
839                 put_ldev(mdev);
840         }
841
842         /* Disallow Network errors to configure a device's network part */
843         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
844             os.conn <= C_DISCONNECTING)
845                 ns.conn = os.conn;
846
847         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
848          * If you try to go into some Sync* state, that shall fail (elsewhere). */
849         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
850             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
851                 ns.conn = os.conn;
852
853         /* we cannot fail (again) if we already detached */
854         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
855                 ns.disk = D_DISKLESS;
856
857         /* if we are only D_ATTACHING yet,
858          * we can (and should) go directly to D_DISKLESS. */
859         if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
860                 ns.disk = D_DISKLESS;
861
862         /* After C_DISCONNECTING only C_STANDALONE may follow */
863         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
864                 ns.conn = os.conn;
865
866         if (ns.conn < C_CONNECTED) {
867                 ns.peer_isp = 0;
868                 ns.peer = R_UNKNOWN;
869                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
870                         ns.pdsk = D_UNKNOWN;
871         }
872
873         /* Clear the aftr_isp when becoming unconfigured */
874         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
875                 ns.aftr_isp = 0;
876
877         /* Abort resync if a disk fails/detaches */
878         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
879             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
880                 if (warn_sync_abort)
881                         *warn_sync_abort =
882                                 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
883                                 "Online-verify" : "Resync";
884                 ns.conn = C_CONNECTED;
885         }
886
887         /* Connection breaks down before we finished "Negotiating" */
888         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
889             get_ldev_if_state(mdev, D_NEGOTIATING)) {
890                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
891                         ns.disk = mdev->new_state_tmp.disk;
892                         ns.pdsk = mdev->new_state_tmp.pdsk;
893                 } else {
894                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
895                         ns.disk = D_DISKLESS;
896                         ns.pdsk = D_UNKNOWN;
897                 }
898                 put_ldev(mdev);
899         }
900
901         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
902         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
903                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
904                         ns.disk = D_UP_TO_DATE;
905                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
906                         ns.pdsk = D_UP_TO_DATE;
907         }
908
909         /* Implications of the connection stat on the disk states */
910         disk_min = D_DISKLESS;
911         disk_max = D_UP_TO_DATE;
912         pdsk_min = D_INCONSISTENT;
913         pdsk_max = D_UNKNOWN;
914         switch ((enum drbd_conns)ns.conn) {
915         case C_WF_BITMAP_T:
916         case C_PAUSED_SYNC_T:
917         case C_STARTING_SYNC_T:
918         case C_WF_SYNC_UUID:
919         case C_BEHIND:
920                 disk_min = D_INCONSISTENT;
921                 disk_max = D_OUTDATED;
922                 pdsk_min = D_UP_TO_DATE;
923                 pdsk_max = D_UP_TO_DATE;
924                 break;
925         case C_VERIFY_S:
926         case C_VERIFY_T:
927                 disk_min = D_UP_TO_DATE;
928                 disk_max = D_UP_TO_DATE;
929                 pdsk_min = D_UP_TO_DATE;
930                 pdsk_max = D_UP_TO_DATE;
931                 break;
932         case C_CONNECTED:
933                 disk_min = D_DISKLESS;
934                 disk_max = D_UP_TO_DATE;
935                 pdsk_min = D_DISKLESS;
936                 pdsk_max = D_UP_TO_DATE;
937                 break;
938         case C_WF_BITMAP_S:
939         case C_PAUSED_SYNC_S:
940         case C_STARTING_SYNC_S:
941         case C_AHEAD:
942                 disk_min = D_UP_TO_DATE;
943                 disk_max = D_UP_TO_DATE;
944                 pdsk_min = D_INCONSISTENT;
945                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
946                 break;
947         case C_SYNC_TARGET:
948                 disk_min = D_INCONSISTENT;
949                 disk_max = D_INCONSISTENT;
950                 pdsk_min = D_UP_TO_DATE;
951                 pdsk_max = D_UP_TO_DATE;
952                 break;
953         case C_SYNC_SOURCE:
954                 disk_min = D_UP_TO_DATE;
955                 disk_max = D_UP_TO_DATE;
956                 pdsk_min = D_INCONSISTENT;
957                 pdsk_max = D_INCONSISTENT;
958                 break;
959         case C_STANDALONE:
960         case C_DISCONNECTING:
961         case C_UNCONNECTED:
962         case C_TIMEOUT:
963         case C_BROKEN_PIPE:
964         case C_NETWORK_FAILURE:
965         case C_PROTOCOL_ERROR:
966         case C_TEAR_DOWN:
967         case C_WF_CONNECTION:
968         case C_WF_REPORT_PARAMS:
969         case C_MASK:
970                 break;
971         }
972         if (ns.disk > disk_max)
973                 ns.disk = disk_max;
974
975         if (ns.disk < disk_min) {
976                 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
977                          drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
978                 ns.disk = disk_min;
979         }
980         if (ns.pdsk > pdsk_max)
981                 ns.pdsk = pdsk_max;
982
983         if (ns.pdsk < pdsk_min) {
984                 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
985                          drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
986                 ns.pdsk = pdsk_min;
987         }
988
989         if (fp == FP_STONITH &&
990             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
991             !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
992                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
993
994         if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
995             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
996             !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
997                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
998
999         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1000                 if (ns.conn == C_SYNC_SOURCE)
1001                         ns.conn = C_PAUSED_SYNC_S;
1002                 if (ns.conn == C_SYNC_TARGET)
1003                         ns.conn = C_PAUSED_SYNC_T;
1004         } else {
1005                 if (ns.conn == C_PAUSED_SYNC_S)
1006                         ns.conn = C_SYNC_SOURCE;
1007                 if (ns.conn == C_PAUSED_SYNC_T)
1008                         ns.conn = C_SYNC_TARGET;
1009         }
1010
1011         return ns;
1012 }
1013
1014 /* helper for __drbd_set_state */
1015 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1016 {
1017         if (mdev->agreed_pro_version < 90)
1018                 mdev->ov_start_sector = 0;
1019         mdev->rs_total = drbd_bm_bits(mdev);
1020         mdev->ov_position = 0;
1021         if (cs == C_VERIFY_T) {
1022                 /* starting online verify from an arbitrary position
1023                  * does not fit well into the existing protocol.
1024                  * on C_VERIFY_T, we initialize ov_left and friends
1025                  * implicitly in receive_DataRequest once the
1026                  * first P_OV_REQUEST is received */
1027                 mdev->ov_start_sector = ~(sector_t)0;
1028         } else {
1029                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1030                 if (bit >= mdev->rs_total) {
1031                         mdev->ov_start_sector =
1032                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
1033                         mdev->rs_total = 1;
1034                 } else
1035                         mdev->rs_total -= bit;
1036                 mdev->ov_position = mdev->ov_start_sector;
1037         }
1038         mdev->ov_left = mdev->rs_total;
1039 }
1040
1041 static void drbd_resume_al(struct drbd_conf *mdev)
1042 {
1043         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1044                 dev_info(DEV, "Resumed AL updates\n");
1045 }
1046
1047 /**
1048  * __drbd_set_state() - Set a new DRBD state
1049  * @mdev:       DRBD device.
1050  * @ns:         new state.
1051  * @flags:      Flags
1052  * @done:       Optional completion, that will get completed after the after_state_ch() finished
1053  *
1054  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1055  */
1056 enum drbd_state_rv
1057 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1058                  enum chg_state_flags flags, struct completion *done)
1059 {
1060         union drbd_state os;
1061         enum drbd_state_rv rv = SS_SUCCESS;
1062         const char *warn_sync_abort = NULL;
1063         struct after_state_chg_work *ascw;
1064
1065         os = mdev->state;
1066
1067         ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1068
1069         if (ns.i == os.i)
1070                 return SS_NOTHING_TO_DO;
1071
1072         if (!(flags & CS_HARD)) {
1073                 /*  pre-state-change checks ; only look at ns  */
1074                 /* See drbd_state_sw_errors in drbd_strings.c */
1075
1076                 rv = is_valid_state(mdev, ns);
1077                 if (rv < SS_SUCCESS) {
1078                         /* If the old state was illegal as well, then let
1079                            this happen...*/
1080
1081                         if (is_valid_state(mdev, os) == rv)
1082                                 rv = is_valid_state_transition(mdev, ns, os);
1083                 } else
1084                         rv = is_valid_state_transition(mdev, ns, os);
1085         }
1086
1087         if (rv < SS_SUCCESS) {
1088                 if (flags & CS_VERBOSE)
1089                         print_st_err(mdev, os, ns, rv);
1090                 return rv;
1091         }
1092
1093         if (warn_sync_abort)
1094                 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
1095
1096         {
1097         char *pbp, pb[300];
1098         pbp = pb;
1099         *pbp = 0;
1100         if (ns.role != os.role)
1101                 pbp += sprintf(pbp, "role( %s -> %s ) ",
1102                                drbd_role_str(os.role),
1103                                drbd_role_str(ns.role));
1104         if (ns.peer != os.peer)
1105                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1106                                drbd_role_str(os.peer),
1107                                drbd_role_str(ns.peer));
1108         if (ns.conn != os.conn)
1109                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1110                                drbd_conn_str(os.conn),
1111                                drbd_conn_str(ns.conn));
1112         if (ns.disk != os.disk)
1113                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1114                                drbd_disk_str(os.disk),
1115                                drbd_disk_str(ns.disk));
1116         if (ns.pdsk != os.pdsk)
1117                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1118                                drbd_disk_str(os.pdsk),
1119                                drbd_disk_str(ns.pdsk));
1120         if (is_susp(ns) != is_susp(os))
1121                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1122                                is_susp(os),
1123                                is_susp(ns));
1124         if (ns.aftr_isp != os.aftr_isp)
1125                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1126                                os.aftr_isp,
1127                                ns.aftr_isp);
1128         if (ns.peer_isp != os.peer_isp)
1129                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1130                                os.peer_isp,
1131                                ns.peer_isp);
1132         if (ns.user_isp != os.user_isp)
1133                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1134                                os.user_isp,
1135                                ns.user_isp);
1136         dev_info(DEV, "%s\n", pb);
1137         }
1138
1139         /* solve the race between becoming unconfigured,
1140          * worker doing the cleanup, and
1141          * admin reconfiguring us:
1142          * on (re)configure, first set CONFIG_PENDING,
1143          * then wait for a potentially exiting worker,
1144          * start the worker, and schedule one no_op.
1145          * then proceed with configuration.
1146          */
1147         if (ns.disk == D_DISKLESS &&
1148             ns.conn == C_STANDALONE &&
1149             ns.role == R_SECONDARY &&
1150             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1151                 set_bit(DEVICE_DYING, &mdev->flags);
1152
1153         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1154          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1155          * drbd_ldev_destroy() won't happen before our corresponding
1156          * after_state_ch works run, where we put_ldev again. */
1157         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1158             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1159                 atomic_inc(&mdev->local_cnt);
1160
1161         mdev->state = ns;
1162         wake_up(&mdev->misc_wait);
1163         wake_up(&mdev->state_wait);
1164
1165         /* aborted verify run. log the last position */
1166         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1167             ns.conn < C_CONNECTED) {
1168                 mdev->ov_start_sector =
1169                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1170                 dev_info(DEV, "Online Verify reached sector %llu\n",
1171                         (unsigned long long)mdev->ov_start_sector);
1172         }
1173
1174         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1175             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1176                 dev_info(DEV, "Syncer continues.\n");
1177                 mdev->rs_paused += (long)jiffies
1178                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1179                 if (ns.conn == C_SYNC_TARGET)
1180                         mod_timer(&mdev->resync_timer, jiffies);
1181         }
1182
1183         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1184             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1185                 dev_info(DEV, "Resync suspended\n");
1186                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1187         }
1188
1189         if (os.conn == C_CONNECTED &&
1190             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1191                 unsigned long now = jiffies;
1192                 int i;
1193
1194                 set_ov_position(mdev, ns.conn);
1195                 mdev->rs_start = now;
1196                 mdev->rs_last_events = 0;
1197                 mdev->rs_last_sect_ev = 0;
1198                 mdev->ov_last_oos_size = 0;
1199                 mdev->ov_last_oos_start = 0;
1200
1201                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1202                         mdev->rs_mark_left[i] = mdev->ov_left;
1203                         mdev->rs_mark_time[i] = now;
1204                 }
1205
1206                 drbd_rs_controller_reset(mdev);
1207
1208                 if (ns.conn == C_VERIFY_S) {
1209                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1210                                         (unsigned long long)mdev->ov_position);
1211                         mod_timer(&mdev->resync_timer, jiffies);
1212                 }
1213         }
1214
1215         if (get_ldev(mdev)) {
1216                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1217                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1218                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1219
1220                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1221                         mdf |= MDF_CRASHED_PRIMARY;
1222                 if (mdev->state.role == R_PRIMARY ||
1223                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1224                         mdf |= MDF_PRIMARY_IND;
1225                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1226                         mdf |= MDF_CONNECTED_IND;
1227                 if (mdev->state.disk > D_INCONSISTENT)
1228                         mdf |= MDF_CONSISTENT;
1229                 if (mdev->state.disk > D_OUTDATED)
1230                         mdf |= MDF_WAS_UP_TO_DATE;
1231                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1232                         mdf |= MDF_PEER_OUT_DATED;
1233                 if (mdf != mdev->ldev->md.flags) {
1234                         mdev->ldev->md.flags = mdf;
1235                         drbd_md_mark_dirty(mdev);
1236                 }
1237                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1238                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1239                 put_ldev(mdev);
1240         }
1241
1242         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1243         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1244             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1245                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1246
1247         /* Receiver should clean up itself */
1248         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1249                 drbd_thread_stop_nowait(&mdev->receiver);
1250
1251         /* Now the receiver finished cleaning up itself, it should die */
1252         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1253                 drbd_thread_stop_nowait(&mdev->receiver);
1254
1255         /* Upon network failure, we need to restart the receiver. */
1256         if (os.conn > C_TEAR_DOWN &&
1257             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1258                 drbd_thread_restart_nowait(&mdev->receiver);
1259
1260         /* Resume AL writing if we get a connection */
1261         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1262                 drbd_resume_al(mdev);
1263
1264         if (os.conn == C_AHEAD && ns.conn != C_AHEAD)
1265                 tl_forget(mdev);
1266
1267         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1268         if (ascw) {
1269                 ascw->os = os;
1270                 ascw->ns = ns;
1271                 ascw->flags = flags;
1272                 ascw->w.cb = w_after_state_ch;
1273                 ascw->done = done;
1274                 drbd_queue_work(&mdev->data.work, &ascw->w);
1275         } else {
1276                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1277         }
1278
1279         return rv;
1280 }
1281
1282 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1283 {
1284         struct after_state_chg_work *ascw =
1285                 container_of(w, struct after_state_chg_work, w);
1286         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1287         if (ascw->flags & CS_WAIT_COMPLETE) {
1288                 D_ASSERT(ascw->done != NULL);
1289                 complete(ascw->done);
1290         }
1291         kfree(ascw);
1292
1293         return 1;
1294 }
1295
1296 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1297 {
1298         if (rv) {
1299                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1300                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1301                 return;
1302         }
1303
1304         switch (mdev->state.conn) {
1305         case C_STARTING_SYNC_T:
1306                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1307                 break;
1308         case C_STARTING_SYNC_S:
1309                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1310                 break;
1311         }
1312 }
1313
1314 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
1315 {
1316         int rv;
1317
1318         D_ASSERT(current == mdev->worker.task);
1319
1320         /* open coded non-blocking drbd_suspend_io(mdev); */
1321         set_bit(SUSPEND_IO, &mdev->flags);
1322         if (!is_susp(mdev->state))
1323                 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
1324
1325         drbd_bm_lock(mdev, why);
1326         rv = io_fn(mdev);
1327         drbd_bm_unlock(mdev);
1328
1329         drbd_resume_io(mdev);
1330
1331         return rv;
1332 }
1333
1334 /**
1335  * after_state_ch() - Perform after state change actions that may sleep
1336  * @mdev:       DRBD device.
1337  * @os:         old state.
1338  * @ns:         new state.
1339  * @flags:      Flags
1340  */
1341 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1342                            union drbd_state ns, enum chg_state_flags flags)
1343 {
1344         enum drbd_fencing_p fp;
1345         enum drbd_req_event what = nothing;
1346         union drbd_state nsm = (union drbd_state){ .i = -1 };
1347
1348         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1349                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1350                 if (mdev->p_uuid)
1351                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1352         }
1353
1354         fp = FP_DONT_CARE;
1355         if (get_ldev(mdev)) {
1356                 fp = mdev->ldev->dc.fencing;
1357                 put_ldev(mdev);
1358         }
1359
1360         /* Inform userspace about the change... */
1361         drbd_bcast_state(mdev, ns);
1362
1363         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1364             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1365                 drbd_khelper(mdev, "pri-on-incon-degr");
1366
1367         /* Here we have the actions that are performed after a
1368            state change. This function might sleep */
1369
1370         nsm.i = -1;
1371         if (ns.susp_nod) {
1372                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1373                         what = resend;
1374
1375                 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1376                         what = restart_frozen_disk_io;
1377
1378                 if (what != nothing)
1379                         nsm.susp_nod = 0;
1380         }
1381
1382         if (ns.susp_fen) {
1383                 /* case1: The outdate peer handler is successful: */
1384                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1385                         tl_clear(mdev);
1386                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1387                                 drbd_uuid_new_current(mdev);
1388                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1389                         }
1390                         spin_lock_irq(&mdev->req_lock);
1391                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1392                         spin_unlock_irq(&mdev->req_lock);
1393                 }
1394                 /* case2: The connection was established again: */
1395                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1396                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1397                         what = resend;
1398                         nsm.susp_fen = 0;
1399                 }
1400         }
1401
1402         if (what != nothing) {
1403                 spin_lock_irq(&mdev->req_lock);
1404                 _tl_restart(mdev, what);
1405                 nsm.i &= mdev->state.i;
1406                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1407                 spin_unlock_irq(&mdev->req_lock);
1408         }
1409
1410         /* Became sync source.  With protocol >= 96, we still need to send out
1411          * the sync uuid now. Need to do that before any drbd_send_state, or
1412          * the other side may go "paused sync" before receiving the sync uuids,
1413          * which is unexpected. */
1414         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1415             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1416             mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1417                 drbd_gen_and_send_sync_uuid(mdev);
1418                 put_ldev(mdev);
1419         }
1420
1421         /* Do not change the order of the if above and the two below... */
1422         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1423                 drbd_send_uuids(mdev);
1424                 drbd_send_state(mdev);
1425         }
1426         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1427                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1428
1429         /* Lost contact to peer's copy of the data */
1430         if ((os.pdsk >= D_INCONSISTENT &&
1431              os.pdsk != D_UNKNOWN &&
1432              os.pdsk != D_OUTDATED)
1433         &&  (ns.pdsk < D_INCONSISTENT ||
1434              ns.pdsk == D_UNKNOWN ||
1435              ns.pdsk == D_OUTDATED)) {
1436                 if (get_ldev(mdev)) {
1437                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1438                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1439                                 if (is_susp(mdev->state)) {
1440                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1441                                 } else {
1442                                         drbd_uuid_new_current(mdev);
1443                                         drbd_send_uuids(mdev);
1444                                 }
1445                         }
1446                         put_ldev(mdev);
1447                 }
1448         }
1449
1450         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1451                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1452                         drbd_uuid_new_current(mdev);
1453                         drbd_send_uuids(mdev);
1454                 }
1455
1456                 /* D_DISKLESS Peer becomes secondary */
1457                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1458                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer");
1459                 put_ldev(mdev);
1460         }
1461
1462         /* Write out all changed bits on demote.
1463          * Though, no need to da that just yet
1464          * if there is a resync going on still */
1465         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1466                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1467                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote");
1468                 put_ldev(mdev);
1469         }
1470
1471         /* Last part of the attaching process ... */
1472         if (ns.conn >= C_CONNECTED &&
1473             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1474                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1475                 drbd_send_uuids(mdev);
1476                 drbd_send_state(mdev);
1477         }
1478
1479         /* We want to pause/continue resync, tell peer. */
1480         if (ns.conn >= C_CONNECTED &&
1481              ((os.aftr_isp != ns.aftr_isp) ||
1482               (os.user_isp != ns.user_isp)))
1483                 drbd_send_state(mdev);
1484
1485         /* In case one of the isp bits got set, suspend other devices. */
1486         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1487             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1488                 suspend_other_sg(mdev);
1489
1490         /* Make sure the peer gets informed about eventual state
1491            changes (ISP bits) while we were in WFReportParams. */
1492         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1493                 drbd_send_state(mdev);
1494
1495         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1496                 drbd_send_state(mdev);
1497
1498         /* We are in the progress to start a full sync... */
1499         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1500             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1501                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1502
1503         /* We are invalidating our self... */
1504         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1505             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1506                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1507
1508         /* first half of local IO error, failure to attach,
1509          * or administrative detach */
1510         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1511                 enum drbd_io_error_p eh;
1512                 int was_io_error;
1513                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1514                  * our cleanup here with the transition to D_DISKLESS,
1515                  * so it is safe to dreference ldev here. */
1516                 eh = mdev->ldev->dc.on_io_error;
1517                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1518
1519                 /* current state still has to be D_FAILED,
1520                  * there is only one way out: to D_DISKLESS,
1521                  * and that may only happen after our put_ldev below. */
1522                 if (mdev->state.disk != D_FAILED)
1523                         dev_err(DEV,
1524                                 "ASSERT FAILED: disk is %s during detach\n",
1525                                 drbd_disk_str(mdev->state.disk));
1526
1527                 if (drbd_send_state(mdev))
1528                         dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1529                 else
1530                         dev_err(DEV, "Sending state for detaching disk failed\n");
1531
1532                 drbd_rs_cancel_all(mdev);
1533
1534                 /* In case we want to get something to stable storage still,
1535                  * this may be the last chance.
1536                  * Following put_ldev may transition to D_DISKLESS. */
1537                 drbd_md_sync(mdev);
1538                 put_ldev(mdev);
1539
1540                 if (was_io_error && eh == EP_CALL_HELPER)
1541                         drbd_khelper(mdev, "local-io-error");
1542         }
1543
1544         /* second half of local IO error, failure to attach,
1545          * or administrative detach,
1546          * after local_cnt references have reached zero again */
1547         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1548                 /* We must still be diskless,
1549                  * re-attach has to be serialized with this! */
1550                 if (mdev->state.disk != D_DISKLESS)
1551                         dev_err(DEV,
1552                                 "ASSERT FAILED: disk is %s while going diskless\n",
1553                                 drbd_disk_str(mdev->state.disk));
1554
1555                 mdev->rs_total = 0;
1556                 mdev->rs_failed = 0;
1557                 atomic_set(&mdev->rs_pending_cnt, 0);
1558
1559                 if (drbd_send_state(mdev))
1560                         dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1561                 else
1562                         dev_err(DEV, "Sending state for being diskless failed\n");
1563                 /* corresponding get_ldev in __drbd_set_state
1564                  * this may finaly trigger drbd_ldev_destroy. */
1565                 put_ldev(mdev);
1566         }
1567
1568         /* Disks got bigger while they were detached */
1569         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1570             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1571                 if (ns.conn == C_CONNECTED)
1572                         resync_after_online_grow(mdev);
1573         }
1574
1575         /* A resync finished or aborted, wake paused devices... */
1576         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1577             (os.peer_isp && !ns.peer_isp) ||
1578             (os.user_isp && !ns.user_isp))
1579                 resume_next_sg(mdev);
1580
1581         /* sync target done with resync.  Explicitly notify peer, even though
1582          * it should (at least for non-empty resyncs) already know itself. */
1583         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1584                 drbd_send_state(mdev);
1585
1586         if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED)
1587                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
1588
1589         /* free tl_hash if we Got thawed and are C_STANDALONE */
1590         if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1591                 drbd_free_tl_hash(mdev);
1592
1593         /* Upon network connection, we need to start the receiver */
1594         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1595                 drbd_thread_start(&mdev->receiver);
1596
1597         /* Terminate worker thread if we are unconfigured - it will be
1598            restarted as needed... */
1599         if (ns.disk == D_DISKLESS &&
1600             ns.conn == C_STANDALONE &&
1601             ns.role == R_SECONDARY) {
1602                 if (os.aftr_isp != ns.aftr_isp)
1603                         resume_next_sg(mdev);
1604                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1605                 if (test_bit(DEVICE_DYING, &mdev->flags))
1606                         drbd_thread_stop_nowait(&mdev->worker);
1607         }
1608
1609         drbd_md_sync(mdev);
1610 }
1611
1612
1613 static int drbd_thread_setup(void *arg)
1614 {
1615         struct drbd_thread *thi = (struct drbd_thread *) arg;
1616         struct drbd_conf *mdev = thi->mdev;
1617         unsigned long flags;
1618         int retval;
1619
1620 restart:
1621         retval = thi->function(thi);
1622
1623         spin_lock_irqsave(&thi->t_lock, flags);
1624
1625         /* if the receiver has been "Exiting", the last thing it did
1626          * was set the conn state to "StandAlone",
1627          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1628          * and receiver thread will be "started".
1629          * drbd_thread_start needs to set "Restarting" in that case.
1630          * t_state check and assignment needs to be within the same spinlock,
1631          * so either thread_start sees Exiting, and can remap to Restarting,
1632          * or thread_start see None, and can proceed as normal.
1633          */
1634
1635         if (thi->t_state == Restarting) {
1636                 dev_info(DEV, "Restarting %s\n", current->comm);
1637                 thi->t_state = Running;
1638                 spin_unlock_irqrestore(&thi->t_lock, flags);
1639                 goto restart;
1640         }
1641
1642         thi->task = NULL;
1643         thi->t_state = None;
1644         smp_mb();
1645         complete(&thi->stop);
1646         spin_unlock_irqrestore(&thi->t_lock, flags);
1647
1648         dev_info(DEV, "Terminating %s\n", current->comm);
1649
1650         /* Release mod reference taken when thread was started */
1651         module_put(THIS_MODULE);
1652         return retval;
1653 }
1654
1655 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1656                       int (*func) (struct drbd_thread *))
1657 {
1658         spin_lock_init(&thi->t_lock);
1659         thi->task    = NULL;
1660         thi->t_state = None;
1661         thi->function = func;
1662         thi->mdev = mdev;
1663 }
1664
1665 int drbd_thread_start(struct drbd_thread *thi)
1666 {
1667         struct drbd_conf *mdev = thi->mdev;
1668         struct task_struct *nt;
1669         unsigned long flags;
1670
1671         const char *me =
1672                 thi == &mdev->receiver ? "receiver" :
1673                 thi == &mdev->asender  ? "asender"  :
1674                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1675
1676         /* is used from state engine doing drbd_thread_stop_nowait,
1677          * while holding the req lock irqsave */
1678         spin_lock_irqsave(&thi->t_lock, flags);
1679
1680         switch (thi->t_state) {
1681         case None:
1682                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1683                                 me, current->comm, current->pid);
1684
1685                 /* Get ref on module for thread - this is released when thread exits */
1686                 if (!try_module_get(THIS_MODULE)) {
1687                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1688                         spin_unlock_irqrestore(&thi->t_lock, flags);
1689                         return false;
1690                 }
1691
1692                 init_completion(&thi->stop);
1693                 D_ASSERT(thi->task == NULL);
1694                 thi->reset_cpu_mask = 1;
1695                 thi->t_state = Running;
1696                 spin_unlock_irqrestore(&thi->t_lock, flags);
1697                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1698
1699                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1700                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1701
1702                 if (IS_ERR(nt)) {
1703                         dev_err(DEV, "Couldn't start thread\n");
1704
1705                         module_put(THIS_MODULE);
1706                         return false;
1707                 }
1708                 spin_lock_irqsave(&thi->t_lock, flags);
1709                 thi->task = nt;
1710                 thi->t_state = Running;
1711                 spin_unlock_irqrestore(&thi->t_lock, flags);
1712                 wake_up_process(nt);
1713                 break;
1714         case Exiting:
1715                 thi->t_state = Restarting;
1716                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1717                                 me, current->comm, current->pid);
1718                 /* fall through */
1719         case Running:
1720         case Restarting:
1721         default:
1722                 spin_unlock_irqrestore(&thi->t_lock, flags);
1723                 break;
1724         }
1725
1726         return true;
1727 }
1728
1729
1730 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1731 {
1732         unsigned long flags;
1733
1734         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1735
1736         /* may be called from state engine, holding the req lock irqsave */
1737         spin_lock_irqsave(&thi->t_lock, flags);
1738
1739         if (thi->t_state == None) {
1740                 spin_unlock_irqrestore(&thi->t_lock, flags);
1741                 if (restart)
1742                         drbd_thread_start(thi);
1743                 return;
1744         }
1745
1746         if (thi->t_state != ns) {
1747                 if (thi->task == NULL) {
1748                         spin_unlock_irqrestore(&thi->t_lock, flags);
1749                         return;
1750                 }
1751
1752                 thi->t_state = ns;
1753                 smp_mb();
1754                 init_completion(&thi->stop);
1755                 if (thi->task != current)
1756                         force_sig(DRBD_SIGKILL, thi->task);
1757
1758         }
1759
1760         spin_unlock_irqrestore(&thi->t_lock, flags);
1761
1762         if (wait)
1763                 wait_for_completion(&thi->stop);
1764 }
1765
1766 #ifdef CONFIG_SMP
1767 /**
1768  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1769  * @mdev:       DRBD device.
1770  *
1771  * Forces all threads of a device onto the same CPU. This is beneficial for
1772  * DRBD's performance. May be overwritten by user's configuration.
1773  */
1774 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1775 {
1776         int ord, cpu;
1777
1778         /* user override. */
1779         if (cpumask_weight(mdev->cpu_mask))
1780                 return;
1781
1782         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1783         for_each_online_cpu(cpu) {
1784                 if (ord-- == 0) {
1785                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1786                         return;
1787                 }
1788         }
1789         /* should not be reached */
1790         cpumask_setall(mdev->cpu_mask);
1791 }
1792
1793 /**
1794  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1795  * @mdev:       DRBD device.
1796  *
1797  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1798  * prematurely.
1799  */
1800 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1801 {
1802         struct task_struct *p = current;
1803         struct drbd_thread *thi =
1804                 p == mdev->asender.task  ? &mdev->asender  :
1805                 p == mdev->receiver.task ? &mdev->receiver :
1806                 p == mdev->worker.task   ? &mdev->worker   :
1807                 NULL;
1808         ERR_IF(thi == NULL)
1809                 return;
1810         if (!thi->reset_cpu_mask)
1811                 return;
1812         thi->reset_cpu_mask = 0;
1813         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1814 }
1815 #endif
1816
1817 /* the appropriate socket mutex must be held already */
1818 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1819                           enum drbd_packets cmd, struct p_header80 *h,
1820                           size_t size, unsigned msg_flags)
1821 {
1822         int sent, ok;
1823
1824         ERR_IF(!h) return false;
1825         ERR_IF(!size) return false;
1826
1827         h->magic   = BE_DRBD_MAGIC;
1828         h->command = cpu_to_be16(cmd);
1829         h->length  = cpu_to_be16(size-sizeof(struct p_header80));
1830
1831         sent = drbd_send(mdev, sock, h, size, msg_flags);
1832
1833         ok = (sent == size);
1834         if (!ok)
1835                 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1836                     cmdname(cmd), (int)size, sent);
1837         return ok;
1838 }
1839
1840 /* don't pass the socket. we may only look at it
1841  * when we hold the appropriate socket mutex.
1842  */
1843 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1844                   enum drbd_packets cmd, struct p_header80 *h, size_t size)
1845 {
1846         int ok = 0;
1847         struct socket *sock;
1848
1849         if (use_data_socket) {
1850                 mutex_lock(&mdev->data.mutex);
1851                 sock = mdev->data.socket;
1852         } else {
1853                 mutex_lock(&mdev->meta.mutex);
1854                 sock = mdev->meta.socket;
1855         }
1856
1857         /* drbd_disconnect() could have called drbd_free_sock()
1858          * while we were waiting in down()... */
1859         if (likely(sock != NULL))
1860                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1861
1862         if (use_data_socket)
1863                 mutex_unlock(&mdev->data.mutex);
1864         else
1865                 mutex_unlock(&mdev->meta.mutex);
1866         return ok;
1867 }
1868
1869 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1870                    size_t size)
1871 {
1872         struct p_header80 h;
1873         int ok;
1874
1875         h.magic   = BE_DRBD_MAGIC;
1876         h.command = cpu_to_be16(cmd);
1877         h.length  = cpu_to_be16(size);
1878
1879         if (!drbd_get_data_sock(mdev))
1880                 return 0;
1881
1882         ok = (sizeof(h) ==
1883                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1884         ok = ok && (size ==
1885                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1886
1887         drbd_put_data_sock(mdev);
1888
1889         return ok;
1890 }
1891
1892 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1893 {
1894         struct p_rs_param_95 *p;
1895         struct socket *sock;
1896         int size, rv;
1897         const int apv = mdev->agreed_pro_version;
1898
1899         size = apv <= 87 ? sizeof(struct p_rs_param)
1900                 : apv == 88 ? sizeof(struct p_rs_param)
1901                         + strlen(mdev->sync_conf.verify_alg) + 1
1902                 : apv <= 94 ? sizeof(struct p_rs_param_89)
1903                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1904
1905         /* used from admin command context and receiver/worker context.
1906          * to avoid kmalloc, grab the socket right here,
1907          * then use the pre-allocated sbuf there */
1908         mutex_lock(&mdev->data.mutex);
1909         sock = mdev->data.socket;
1910
1911         if (likely(sock != NULL)) {
1912                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1913
1914                 p = &mdev->data.sbuf.rs_param_95;
1915
1916                 /* initialize verify_alg and csums_alg */
1917                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1918
1919                 p->rate = cpu_to_be32(sc->rate);
1920                 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1921                 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1922                 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1923                 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1924
1925                 if (apv >= 88)
1926                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1927                 if (apv >= 89)
1928                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1929
1930                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1931         } else
1932                 rv = 0; /* not ok */
1933
1934         mutex_unlock(&mdev->data.mutex);
1935
1936         return rv;
1937 }
1938
1939 int drbd_send_protocol(struct drbd_conf *mdev)
1940 {
1941         struct p_protocol *p;
1942         int size, cf, rv;
1943
1944         size = sizeof(struct p_protocol);
1945
1946         if (mdev->agreed_pro_version >= 87)
1947                 size += strlen(mdev->net_conf->integrity_alg) + 1;
1948
1949         /* we must not recurse into our own queue,
1950          * as that is blocked during handshake */
1951         p = kmalloc(size, GFP_NOIO);
1952         if (p == NULL)
1953                 return 0;
1954
1955         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
1956         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
1957         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
1958         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
1959         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1960
1961         cf = 0;
1962         if (mdev->net_conf->want_lose)
1963                 cf |= CF_WANT_LOSE;
1964         if (mdev->net_conf->dry_run) {
1965                 if (mdev->agreed_pro_version >= 92)
1966                         cf |= CF_DRY_RUN;
1967                 else {
1968                         dev_err(DEV, "--dry-run is not supported by peer");
1969                         kfree(p);
1970                         return 0;
1971                 }
1972         }
1973         p->conn_flags    = cpu_to_be32(cf);
1974
1975         if (mdev->agreed_pro_version >= 87)
1976                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1977
1978         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1979                            (struct p_header80 *)p, size);
1980         kfree(p);
1981         return rv;
1982 }
1983
1984 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1985 {
1986         struct p_uuids p;
1987         int i;
1988
1989         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1990                 return 1;
1991
1992         for (i = UI_CURRENT; i < UI_SIZE; i++)
1993                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1994
1995         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1996         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1997         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1998         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1999         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2000         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2001
2002         put_ldev(mdev);
2003
2004         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
2005                              (struct p_header80 *)&p, sizeof(p));
2006 }
2007
2008 int drbd_send_uuids(struct drbd_conf *mdev)
2009 {
2010         return _drbd_send_uuids(mdev, 0);
2011 }
2012
2013 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2014 {
2015         return _drbd_send_uuids(mdev, 8);
2016 }
2017
2018 int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
2019 {
2020         struct p_rs_uuid p;
2021         u64 uuid;
2022
2023         D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2024
2025         get_random_bytes(&uuid, sizeof(u64));
2026         drbd_uuid_set(mdev, UI_BITMAP, uuid);
2027         drbd_md_sync(mdev);
2028         p.uuid = cpu_to_be64(uuid);
2029
2030         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
2031                              (struct p_header80 *)&p, sizeof(p));
2032 }
2033
2034 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
2035 {
2036         struct p_sizes p;
2037         sector_t d_size, u_size;
2038         int q_order_type;
2039         int ok;
2040
2041         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2042                 D_ASSERT(mdev->ldev->backing_bdev);
2043                 d_size = drbd_get_max_capacity(mdev->ldev);
2044                 u_size = mdev->ldev->dc.disk_size;
2045                 q_order_type = drbd_queue_order_type(mdev);
2046                 put_ldev(mdev);
2047         } else {
2048                 d_size = 0;
2049                 u_size = 0;
2050                 q_order_type = QUEUE_ORDERED_NONE;
2051         }
2052
2053         p.d_size = cpu_to_be64(d_size);
2054         p.u_size = cpu_to_be64(u_size);
2055         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2056         p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
2057         p.queue_order_type = cpu_to_be16(q_order_type);
2058         p.dds_flags = cpu_to_be16(flags);
2059
2060         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
2061                            (struct p_header80 *)&p, sizeof(p));
2062         return ok;
2063 }
2064
2065 /**
2066  * drbd_send_state() - Sends the drbd state to the peer
2067  * @mdev:       DRBD device.
2068  */
2069 int drbd_send_state(struct drbd_conf *mdev)
2070 {
2071         struct socket *sock;
2072         struct p_state p;
2073         int ok = 0;
2074
2075         /* Grab state lock so we wont send state if we're in the middle
2076          * of a cluster wide state change on another thread */
2077         drbd_state_lock(mdev);
2078
2079         mutex_lock(&mdev->data.mutex);
2080
2081         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2082         sock = mdev->data.socket;
2083
2084         if (likely(sock != NULL)) {
2085                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2086                                     (struct p_header80 *)&p, sizeof(p), 0);
2087         }
2088
2089         mutex_unlock(&mdev->data.mutex);
2090
2091         drbd_state_unlock(mdev);
2092         return ok;
2093 }
2094
2095 int drbd_send_state_req(struct drbd_conf *mdev,
2096         union drbd_state mask, union drbd_state val)
2097 {
2098         struct p_req_state p;
2099
2100         p.mask    = cpu_to_be32(mask.i);
2101         p.val     = cpu_to_be32(val.i);
2102
2103         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2104                              (struct p_header80 *)&p, sizeof(p));
2105 }
2106
2107 int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2108 {
2109         struct p_req_state_reply p;
2110
2111         p.retcode    = cpu_to_be32(retcode);
2112
2113         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2114                              (struct p_header80 *)&p, sizeof(p));
2115 }
2116
2117 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2118         struct p_compressed_bm *p,
2119         struct bm_xfer_ctx *c)
2120 {
2121         struct bitstream bs;
2122         unsigned long plain_bits;
2123         unsigned long tmp;
2124         unsigned long rl;
2125         unsigned len;
2126         unsigned toggle;
2127         int bits;
2128
2129         /* may we use this feature? */
2130         if ((mdev->sync_conf.use_rle == 0) ||
2131                 (mdev->agreed_pro_version < 90))
2132                         return 0;
2133
2134         if (c->bit_offset >= c->bm_bits)
2135                 return 0; /* nothing to do. */
2136
2137         /* use at most thus many bytes */
2138         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2139         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2140         /* plain bits covered in this code string */
2141         plain_bits = 0;
2142
2143         /* p->encoding & 0x80 stores whether the first run length is set.
2144          * bit offset is implicit.
2145          * start with toggle == 2 to be able to tell the first iteration */
2146         toggle = 2;
2147
2148         /* see how much plain bits we can stuff into one packet
2149          * using RLE and VLI. */
2150         do {
2151                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2152                                     : _drbd_bm_find_next(mdev, c->bit_offset);
2153                 if (tmp == -1UL)
2154                         tmp = c->bm_bits;
2155                 rl = tmp - c->bit_offset;
2156
2157                 if (toggle == 2) { /* first iteration */
2158                         if (rl == 0) {
2159                                 /* the first checked bit was set,
2160                                  * store start value, */
2161                                 DCBP_set_start(p, 1);
2162                                 /* but skip encoding of zero run length */
2163                                 toggle = !toggle;
2164                                 continue;
2165                         }
2166                         DCBP_set_start(p, 0);
2167                 }
2168
2169                 /* paranoia: catch zero runlength.
2170                  * can only happen if bitmap is modified while we scan it. */
2171                 if (rl == 0) {
2172                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2173                             "t:%u bo:%lu\n", toggle, c->bit_offset);
2174                         return -1;
2175                 }
2176
2177                 bits = vli_encode_bits(&bs, rl);
2178                 if (bits == -ENOBUFS) /* buffer full */
2179                         break;
2180                 if (bits <= 0) {
2181                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2182                         return 0;
2183                 }
2184
2185                 toggle = !toggle;
2186                 plain_bits += rl;
2187                 c->bit_offset = tmp;
2188         } while (c->bit_offset < c->bm_bits);
2189
2190         len = bs.cur.b - p->code + !!bs.cur.bit;
2191
2192         if (plain_bits < (len << 3)) {
2193                 /* incompressible with this method.
2194                  * we need to rewind both word and bit position. */
2195                 c->bit_offset -= plain_bits;
2196                 bm_xfer_ctx_bit_to_word_offset(c);
2197                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2198                 return 0;
2199         }
2200
2201         /* RLE + VLI was able to compress it just fine.
2202          * update c->word_offset. */
2203         bm_xfer_ctx_bit_to_word_offset(c);
2204
2205         /* store pad_bits */
2206         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2207
2208         return len;
2209 }
2210
2211 /**
2212  * send_bitmap_rle_or_plain
2213  *
2214  * Return 0 when done, 1 when another iteration is needed, and a negative error
2215  * code upon failure.
2216  */
2217 static int
2218 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2219                          struct p_header80 *h, struct bm_xfer_ctx *c)
2220 {
2221         struct p_compressed_bm *p = (void*)h;
2222         unsigned long num_words;
2223         int len;
2224         int ok;
2225
2226         len = fill_bitmap_rle_bits(mdev, p, c);
2227
2228         if (len < 0)
2229                 return -EIO;
2230
2231         if (len) {
2232                 DCBP_set_code(p, RLE_VLI_Bits);
2233                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2234                         sizeof(*p) + len, 0);
2235
2236                 c->packets[0]++;
2237                 c->bytes[0] += sizeof(*p) + len;
2238
2239                 if (c->bit_offset >= c->bm_bits)
2240                         len = 0; /* DONE */
2241         } else {
2242                 /* was not compressible.
2243                  * send a buffer full of plain text bits instead. */
2244                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2245                 len = num_words * sizeof(long);
2246                 if (len)
2247                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2248                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2249                                    h, sizeof(struct p_header80) + len, 0);
2250                 c->word_offset += num_words;
2251                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2252
2253                 c->packets[1]++;
2254                 c->bytes[1] += sizeof(struct p_header80) + len;
2255
2256                 if (c->bit_offset > c->bm_bits)
2257                         c->bit_offset = c->bm_bits;
2258         }
2259         if (ok) {
2260                 if (len == 0) {
2261                         INFO_bm_xfer_stats(mdev, "send", c);
2262                         return 0;
2263                 } else
2264                         return 1;
2265         }
2266         return -EIO;
2267 }
2268
2269 /* See the comment at receive_bitmap() */
2270 int _drbd_send_bitmap(struct drbd_conf *mdev)
2271 {
2272         struct bm_xfer_ctx c;
2273         struct p_header80 *p;
2274         int err;
2275
2276         ERR_IF(!mdev->bitmap) return false;
2277
2278         /* maybe we should use some per thread scratch page,
2279          * and allocate that during initial device creation? */
2280         p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2281         if (!p) {
2282                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2283                 return false;
2284         }
2285
2286         if (get_ldev(mdev)) {
2287                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2288                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2289                         drbd_bm_set_all(mdev);
2290                         if (drbd_bm_write(mdev)) {
2291                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2292                                  * but otherwise process as per normal - need to tell other
2293                                  * side that a full resync is required! */
2294                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2295                         } else {
2296                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2297                                 drbd_md_sync(mdev);
2298                         }
2299                 }
2300                 put_ldev(mdev);
2301         }
2302
2303         c = (struct bm_xfer_ctx) {
2304                 .bm_bits = drbd_bm_bits(mdev),
2305                 .bm_words = drbd_bm_words(mdev),
2306         };
2307
2308         do {
2309                 err = send_bitmap_rle_or_plain(mdev, p, &c);
2310         } while (err > 0);
2311
2312         free_page((unsigned long) p);
2313         return err == 0;
2314 }
2315
2316 int drbd_send_bitmap(struct drbd_conf *mdev)
2317 {
2318         int err;
2319
2320         if (!drbd_get_data_sock(mdev))
2321                 return -1;
2322         err = !_drbd_send_bitmap(mdev);
2323         drbd_put_data_sock(mdev);
2324         return err;
2325 }
2326
2327 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2328 {
2329         int ok;
2330         struct p_barrier_ack p;
2331
2332         p.barrier  = barrier_nr;
2333         p.set_size = cpu_to_be32(set_size);
2334
2335         if (mdev->state.conn < C_CONNECTED)
2336                 return false;
2337         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2338                         (struct p_header80 *)&p, sizeof(p));
2339         return ok;
2340 }
2341
2342 /**
2343  * _drbd_send_ack() - Sends an ack packet
2344  * @mdev:       DRBD device.
2345  * @cmd:        Packet command code.
2346  * @sector:     sector, needs to be in big endian byte order
2347  * @blksize:    size in byte, needs to be in big endian byte order
2348  * @block_id:   Id, big endian byte order
2349  */
2350 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2351                           u64 sector,
2352                           u32 blksize,
2353                           u64 block_id)
2354 {
2355         int ok;
2356         struct p_block_ack p;
2357
2358         p.sector   = sector;
2359         p.block_id = block_id;
2360         p.blksize  = blksize;
2361         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2362
2363         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2364                 return false;
2365         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2366                                 (struct p_header80 *)&p, sizeof(p));
2367         return ok;
2368 }
2369
2370 /* dp->sector and dp->block_id already/still in network byte order,
2371  * data_size is payload size according to dp->head,
2372  * and may need to be corrected for digest size. */
2373 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2374                      struct p_data *dp, int data_size)
2375 {
2376         data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2377                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2378         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2379                               dp->block_id);
2380 }
2381
2382 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2383                      struct p_block_req *rp)
2384 {
2385         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2386 }
2387
2388 /**
2389  * drbd_send_ack() - Sends an ack packet
2390  * @mdev:       DRBD device.
2391  * @cmd:        Packet command code.
2392  * @e:          Epoch entry.
2393  */
2394 int drbd_send_ack(struct drbd_conf *mdev,
2395         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2396 {
2397         return _drbd_send_ack(mdev, cmd,
2398                               cpu_to_be64(e->sector),
2399                               cpu_to_be32(e->size),
2400                               e->block_id);
2401 }
2402
2403 /* This function misuses the block_id field to signal if the blocks
2404  * are is sync or not. */
2405 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2406                      sector_t sector, int blksize, u64 block_id)
2407 {
2408         return _drbd_send_ack(mdev, cmd,
2409                               cpu_to_be64(sector),
2410                               cpu_to_be32(blksize),
2411                               cpu_to_be64(block_id));
2412 }
2413
2414 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2415                        sector_t sector, int size, u64 block_id)
2416 {
2417         int ok;
2418         struct p_block_req p;
2419
2420         p.sector   = cpu_to_be64(sector);
2421         p.block_id = block_id;
2422         p.blksize  = cpu_to_be32(size);
2423
2424         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2425                                 (struct p_header80 *)&p, sizeof(p));
2426         return ok;
2427 }
2428
2429 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2430                             sector_t sector, int size,
2431                             void *digest, int digest_size,
2432                             enum drbd_packets cmd)
2433 {
2434         int ok;
2435         struct p_block_req p;
2436
2437         p.sector   = cpu_to_be64(sector);
2438         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2439         p.blksize  = cpu_to_be32(size);
2440
2441         p.head.magic   = BE_DRBD_MAGIC;
2442         p.head.command = cpu_to_be16(cmd);
2443         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2444
2445         mutex_lock(&mdev->data.mutex);
2446
2447         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2448         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2449
2450         mutex_unlock(&mdev->data.mutex);
2451
2452         return ok;
2453 }
2454
2455 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2456 {
2457         int ok;
2458         struct p_block_req p;
2459
2460         p.sector   = cpu_to_be64(sector);
2461         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2462         p.blksize  = cpu_to_be32(size);
2463
2464         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2465                            (struct p_header80 *)&p, sizeof(p));
2466         return ok;
2467 }
2468
2469 /* called on sndtimeo
2470  * returns false if we should retry,
2471  * true if we think connection is dead
2472  */
2473 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2474 {
2475         int drop_it;
2476         /* long elapsed = (long)(jiffies - mdev->last_received); */
2477
2478         drop_it =   mdev->meta.socket == sock
2479                 || !mdev->asender.task
2480                 || get_t_state(&mdev->asender) != Running
2481                 || mdev->state.conn < C_CONNECTED;
2482
2483         if (drop_it)
2484                 return true;
2485
2486         drop_it = !--mdev->ko_count;
2487         if (!drop_it) {
2488                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2489                        current->comm, current->pid, mdev->ko_count);
2490                 request_ping(mdev);
2491         }
2492
2493         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2494 }
2495
2496 /* The idea of sendpage seems to be to put some kind of reference
2497  * to the page into the skb, and to hand it over to the NIC. In
2498  * this process get_page() gets called.
2499  *
2500  * As soon as the page was really sent over the network put_page()
2501  * gets called by some part of the network layer. [ NIC driver? ]
2502  *
2503  * [ get_page() / put_page() increment/decrement the count. If count
2504  *   reaches 0 the page will be freed. ]
2505  *
2506  * This works nicely with pages from FSs.
2507  * But this means that in protocol A we might signal IO completion too early!
2508  *
2509  * In order not to corrupt data during a resync we must make sure
2510  * that we do not reuse our own buffer pages (EEs) to early, therefore
2511  * we have the net_ee list.
2512  *
2513  * XFS seems to have problems, still, it submits pages with page_count == 0!
2514  * As a workaround, we disable sendpage on pages
2515  * with page_count == 0 or PageSlab.
2516  */
2517 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2518                    int offset, size_t size, unsigned msg_flags)
2519 {
2520         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2521         kunmap(page);
2522         if (sent == size)
2523                 mdev->send_cnt += size>>9;
2524         return sent == size;
2525 }
2526
2527 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2528                     int offset, size_t size, unsigned msg_flags)
2529 {
2530         mm_segment_t oldfs = get_fs();
2531         int sent, ok;
2532         int len = size;
2533
2534         /* e.g. XFS meta- & log-data is in slab pages, which have a
2535          * page_count of 0 and/or have PageSlab() set.
2536          * we cannot use send_page for those, as that does get_page();
2537          * put_page(); and would cause either a VM_BUG directly, or
2538          * __page_cache_release a page that would actually still be referenced
2539          * by someone, leading to some obscure delayed Oops somewhere else. */
2540         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2541                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2542
2543         msg_flags |= MSG_NOSIGNAL;
2544         drbd_update_congested(mdev);
2545         set_fs(KERNEL_DS);
2546         do {
2547                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2548                                                         offset, len,
2549                                                         msg_flags);
2550                 if (sent == -EAGAIN) {
2551                         if (we_should_drop_the_connection(mdev,
2552                                                           mdev->data.socket))
2553                                 break;
2554                         else
2555                                 continue;
2556                 }
2557                 if (sent <= 0) {
2558                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2559                              __func__, (int)size, len, sent);
2560                         break;
2561                 }
2562                 len    -= sent;
2563                 offset += sent;
2564         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2565         set_fs(oldfs);
2566         clear_bit(NET_CONGESTED, &mdev->flags);
2567
2568         ok = (len == 0);
2569         if (likely(ok))
2570                 mdev->send_cnt += size>>9;
2571         return ok;
2572 }
2573
2574 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2575 {
2576         struct bio_vec *bvec;
2577         int i;
2578         /* hint all but last page with MSG_MORE */
2579         __bio_for_each_segment(bvec, bio, i, 0) {
2580                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2581                                      bvec->bv_offset, bvec->bv_len,
2582                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2583                         return 0;
2584         }
2585         return 1;
2586 }
2587
2588 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2589 {
2590         struct bio_vec *bvec;
2591         int i;
2592         /* hint all but last page with MSG_MORE */
2593         __bio_for_each_segment(bvec, bio, i, 0) {
2594                 if (!_drbd_send_page(mdev, bvec->bv_page,
2595                                      bvec->bv_offset, bvec->bv_len,
2596                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2597                         return 0;
2598         }
2599         return 1;
2600 }
2601
2602 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2603 {
2604         struct page *page = e->pages;
2605         unsigned len = e->size;
2606         /* hint all but last page with MSG_MORE */
2607         page_chain_for_each(page) {
2608                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2609                 if (!_drbd_send_page(mdev, page, 0, l,
2610                                 page_chain_next(page) ? MSG_MORE : 0))
2611                         return 0;
2612                 len -= l;
2613         }
2614         return 1;
2615 }
2616
2617 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2618 {
2619         if (mdev->agreed_pro_version >= 95)
2620                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2621                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
2622                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2623                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2624         else
2625                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2626 }
2627
2628 /* Used to send write requests
2629  * R_PRIMARY -> Peer    (P_DATA)
2630  */
2631 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2632 {
2633         int ok = 1;
2634         struct p_data p;
2635         unsigned int dp_flags = 0;
2636         void *dgb;
2637         int dgs;
2638
2639         if (!drbd_get_data_sock(mdev))
2640                 return 0;
2641
2642         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2643                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2644
2645         if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2646                 p.head.h80.magic   = BE_DRBD_MAGIC;
2647                 p.head.h80.command = cpu_to_be16(P_DATA);
2648                 p.head.h80.length  =
2649                         cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2650         } else {
2651                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2652                 p.head.h95.command = cpu_to_be16(P_DATA);
2653                 p.head.h95.length  =
2654                         cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2655         }
2656
2657         p.sector   = cpu_to_be64(req->sector);
2658         p.block_id = (unsigned long)req;
2659         p.seq_num  = cpu_to_be32(req->seq_num =
2660                                  atomic_add_return(1, &mdev->packet_seq));
2661
2662         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2663
2664         if (mdev->state.conn >= C_SYNC_SOURCE &&
2665             mdev->state.conn <= C_PAUSED_SYNC_T)
2666                 dp_flags |= DP_MAY_SET_IN_SYNC;
2667
2668         p.dp_flags = cpu_to_be32(dp_flags);
2669         set_bit(UNPLUG_REMOTE, &mdev->flags);
2670         ok = (sizeof(p) ==
2671                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2672         if (ok && dgs) {
2673                 dgb = mdev->int_dig_out;
2674                 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2675                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2676         }
2677         if (ok) {
2678                 /* For protocol A, we have to memcpy the payload into
2679                  * socket buffers, as we may complete right away
2680                  * as soon as we handed it over to tcp, at which point the data
2681                  * pages may become invalid.
2682                  *
2683                  * For data-integrity enabled, we copy it as well, so we can be
2684                  * sure that even if the bio pages may still be modified, it
2685                  * won't change the data on the wire, thus if the digest checks
2686                  * out ok after sending on this side, but does not fit on the
2687                  * receiving side, we sure have detected corruption elsewhere.
2688                  */
2689                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2690                         ok = _drbd_send_bio(mdev, req->master_bio);
2691                 else
2692                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2693
2694                 /* double check digest, sometimes buffers have been modified in flight. */
2695                 if (dgs > 0 && dgs <= 64) {
2696                         /* 64 byte, 512 bit, is the larges digest size
2697                          * currently supported in kernel crypto. */
2698                         unsigned char digest[64];
2699                         drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2700                         if (memcmp(mdev->int_dig_out, digest, dgs)) {
2701                                 dev_warn(DEV,
2702                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2703                                         (unsigned long long)req->sector, req->size);
2704                         }
2705                 } /* else if (dgs > 64) {
2706                      ... Be noisy about digest too large ...
2707                 } */
2708         }
2709
2710         drbd_put_data_sock(mdev);
2711
2712         return ok;
2713 }
2714
2715 /* answer packet, used to send data back for read requests:
2716  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2717  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2718  */
2719 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2720                     struct drbd_epoch_entry *e)
2721 {
2722         int ok;
2723         struct p_data p;
2724         void *dgb;
2725         int dgs;
2726
2727         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2728                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2729
2730         if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2731                 p.head.h80.magic   = BE_DRBD_MAGIC;
2732                 p.head.h80.command = cpu_to_be16(cmd);
2733                 p.head.h80.length  =
2734                         cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2735         } else {
2736                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2737                 p.head.h95.command = cpu_to_be16(cmd);
2738                 p.head.h95.length  =
2739                         cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2740         }
2741
2742         p.sector   = cpu_to_be64(e->sector);
2743         p.block_id = e->block_id;
2744         /* p.seq_num  = 0;    No sequence numbers here.. */
2745
2746         /* Only called by our kernel thread.
2747          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2748          * in response to admin command or module unload.
2749          */
2750         if (!drbd_get_data_sock(mdev))
2751                 return 0;
2752
2753         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2754         if (ok && dgs) {
2755                 dgb = mdev->int_dig_out;
2756                 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2757                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2758         }
2759         if (ok)
2760                 ok = _drbd_send_zc_ee(mdev, e);
2761
2762         drbd_put_data_sock(mdev);
2763
2764         return ok;
2765 }
2766
2767 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2768 {
2769         struct p_block_desc p;
2770
2771         p.sector  = cpu_to_be64(req->sector);
2772         p.blksize = cpu_to_be32(req->size);
2773
2774         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2775 }
2776
2777 /*
2778   drbd_send distinguishes two cases:
2779
2780   Packets sent via the data socket "sock"
2781   and packets sent via the meta data socket "msock"
2782
2783                     sock                      msock
2784   -----------------+-------------------------+------------------------------
2785   timeout           conf.timeout / 2          conf.timeout / 2
2786   timeout action    send a ping via msock     Abort communication
2787                                               and close all sockets
2788 */
2789
2790 /*
2791  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2792  */
2793 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2794               void *buf, size_t size, unsigned msg_flags)
2795 {
2796         struct kvec iov;
2797         struct msghdr msg;
2798         int rv, sent = 0;
2799
2800         if (!sock)
2801                 return -1000;
2802
2803         /* THINK  if (signal_pending) return ... ? */
2804
2805         iov.iov_base = buf;
2806         iov.iov_len  = size;
2807
2808         msg.msg_name       = NULL;
2809         msg.msg_namelen    = 0;
2810         msg.msg_control    = NULL;
2811         msg.msg_controllen = 0;
2812         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2813
2814         if (sock == mdev->data.socket) {
2815                 mdev->ko_count = mdev->net_conf->ko_count;
2816                 drbd_update_congested(mdev);
2817         }
2818         do {
2819                 /* STRANGE
2820                  * tcp_sendmsg does _not_ use its size parameter at all ?
2821                  *
2822                  * -EAGAIN on timeout, -EINTR on signal.
2823                  */
2824 /* THINK
2825  * do we need to block DRBD_SIG if sock == &meta.socket ??
2826  * otherwise wake_asender() might interrupt some send_*Ack !
2827  */
2828                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2829                 if (rv == -EAGAIN) {
2830                         if (we_should_drop_the_connection(mdev, sock))
2831                                 break;
2832                         else
2833                                 continue;
2834                 }
2835                 D_ASSERT(rv != 0);
2836                 if (rv == -EINTR) {
2837                         flush_signals(current);
2838                         rv = 0;
2839                 }
2840                 if (rv < 0)
2841                         break;
2842                 sent += rv;
2843                 iov.iov_base += rv;
2844                 iov.iov_len  -= rv;
2845         } while (sent < size);
2846
2847         if (sock == mdev->data.socket)
2848                 clear_bit(NET_CONGESTED, &mdev->flags);
2849
2850         if (rv <= 0) {
2851                 if (rv != -EAGAIN) {
2852                         dev_err(DEV, "%s_sendmsg returned %d\n",
2853                             sock == mdev->meta.socket ? "msock" : "sock",
2854                             rv);
2855                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2856                 } else
2857                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2858         }
2859
2860         return sent;
2861 }
2862
2863 static int drbd_open(struct block_device *bdev, fmode_t mode)
2864 {
2865         struct drbd_conf *mdev = bdev->bd_disk->private_data;
2866         unsigned long flags;
2867         int rv = 0;
2868
2869         mutex_lock(&drbd_main_mutex);
2870         spin_lock_irqsave(&mdev->req_lock, flags);
2871         /* to have a stable mdev->state.role
2872          * and no race with updating open_cnt */
2873
2874         if (mdev->state.role != R_PRIMARY) {
2875                 if (mode & FMODE_WRITE)
2876                         rv = -EROFS;
2877                 else if (!allow_oos)
2878                         rv = -EMEDIUMTYPE;
2879         }
2880
2881         if (!rv)
2882                 mdev->open_cnt++;
2883         spin_unlock_irqrestore(&mdev->req_lock, flags);
2884         mutex_unlock(&drbd_main_mutex);
2885
2886         return rv;
2887 }
2888
2889 static int drbd_release(struct gendisk *gd, fmode_t mode)
2890 {
2891         struct drbd_conf *mdev = gd->private_data;
2892         mutex_lock(&drbd_main_mutex);
2893         mdev->open_cnt--;
2894         mutex_unlock(&drbd_main_mutex);
2895         return 0;
2896 }
2897
2898 static void drbd_set_defaults(struct drbd_conf *mdev)
2899 {
2900         /* This way we get a compile error when sync_conf grows,
2901            and we forgot to initialize it here */
2902         mdev->sync_conf = (struct syncer_conf) {
2903                 /* .rate = */           DRBD_RATE_DEF,
2904                 /* .after = */          DRBD_AFTER_DEF,
2905                 /* .al_extents = */     DRBD_AL_EXTENTS_DEF,
2906                 /* .verify_alg = */     {}, 0,
2907                 /* .cpu_mask = */       {}, 0,
2908                 /* .csums_alg = */      {}, 0,
2909                 /* .use_rle = */        0,
2910                 /* .on_no_data = */     DRBD_ON_NO_DATA_DEF,
2911                 /* .c_plan_ahead = */   DRBD_C_PLAN_AHEAD_DEF,
2912                 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2913                 /* .c_fill_target = */  DRBD_C_FILL_TARGET_DEF,
2914                 /* .c_max_rate = */     DRBD_C_MAX_RATE_DEF,
2915                 /* .c_min_rate = */     DRBD_C_MIN_RATE_DEF
2916         };
2917
2918         /* Have to use that way, because the layout differs between
2919            big endian and little endian */
2920         mdev->state = (union drbd_state) {
2921                 { .role = R_SECONDARY,
2922                   .peer = R_UNKNOWN,
2923                   .conn = C_STANDALONE,
2924                   .disk = D_DISKLESS,
2925                   .pdsk = D_UNKNOWN,
2926                   .susp = 0,
2927                   .susp_nod = 0,
2928                   .susp_fen = 0
2929                 } };
2930 }
2931
2932 void drbd_init_set_defaults(struct drbd_conf *mdev)
2933 {
2934         /* the memset(,0,) did most of this.
2935          * note: only assignments, no allocation in here */
2936
2937         drbd_set_defaults(mdev);
2938
2939         atomic_set(&mdev->ap_bio_cnt, 0);
2940         atomic_set(&mdev->ap_pending_cnt, 0);
2941         atomic_set(&mdev->rs_pending_cnt, 0);
2942         atomic_set(&mdev->unacked_cnt, 0);
2943         atomic_set(&mdev->local_cnt, 0);
2944         atomic_set(&mdev->net_cnt, 0);
2945         atomic_set(&mdev->packet_seq, 0);
2946         atomic_set(&mdev->pp_in_use, 0);
2947         atomic_set(&mdev->pp_in_use_by_net, 0);
2948         atomic_set(&mdev->rs_sect_in, 0);
2949         atomic_set(&mdev->rs_sect_ev, 0);
2950         atomic_set(&mdev->ap_in_flight, 0);
2951
2952         mutex_init(&mdev->md_io_mutex);
2953         mutex_init(&mdev->data.mutex);
2954         mutex_init(&mdev->meta.mutex);
2955         sema_init(&mdev->data.work.s, 0);
2956         sema_init(&mdev->meta.work.s, 0);
2957         mutex_init(&mdev->state_mutex);
2958
2959         spin_lock_init(&mdev->data.work.q_lock);
2960         spin_lock_init(&mdev->meta.work.q_lock);
2961
2962         spin_lock_init(&mdev->al_lock);
2963         spin_lock_init(&mdev->req_lock);
2964         spin_lock_init(&mdev->peer_seq_lock);
2965         spin_lock_init(&mdev->epoch_lock);
2966
2967         INIT_LIST_HEAD(&mdev->active_ee);
2968         INIT_LIST_HEAD(&mdev->sync_ee);
2969         INIT_LIST_HEAD(&mdev->done_ee);
2970         INIT_LIST_HEAD(&mdev->read_ee);
2971         INIT_LIST_HEAD(&mdev->net_ee);
2972         INIT_LIST_HEAD(&mdev->resync_reads);
2973         INIT_LIST_HEAD(&mdev->data.work.q);
2974         INIT_LIST_HEAD(&mdev->meta.work.q);
2975         INIT_LIST_HEAD(&mdev->resync_work.list);
2976         INIT_LIST_HEAD(&mdev->unplug_work.list);
2977         INIT_LIST_HEAD(&mdev->go_diskless.list);
2978         INIT_LIST_HEAD(&mdev->md_sync_work.list);
2979         INIT_LIST_HEAD(&mdev->start_resync_work.list);
2980         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2981
2982         mdev->resync_work.cb  = w_resync_timer;
2983         mdev->unplug_work.cb  = w_send_write_hint;
2984         mdev->go_diskless.cb  = w_go_diskless;
2985         mdev->md_sync_work.cb = w_md_sync;
2986         mdev->bm_io_work.w.cb = w_bitmap_io;
2987         init_timer(&mdev->resync_timer);
2988         init_timer(&mdev->md_sync_timer);
2989         mdev->resync_timer.function = resync_timer_fn;
2990         mdev->resync_timer.data = (unsigned long) mdev;
2991         mdev->md_sync_timer.function = md_sync_timer_fn;
2992         mdev->md_sync_timer.data = (unsigned long) mdev;
2993
2994         init_waitqueue_head(&mdev->misc_wait);
2995         init_waitqueue_head(&mdev->state_wait);
2996         init_waitqueue_head(&mdev->net_cnt_wait);
2997         init_waitqueue_head(&mdev->ee_wait);
2998         init_waitqueue_head(&mdev->al_wait);
2999         init_waitqueue_head(&mdev->seq_wait);
3000
3001         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3002         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3003         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3004
3005         mdev->agreed_pro_version = PRO_VERSION_MAX;
3006         mdev->write_ordering = WO_bdev_flush;
3007         mdev->resync_wenr = LC_FREE;
3008 }
3009
3010 void drbd_mdev_cleanup(struct drbd_conf *mdev)
3011 {
3012         int i;
3013         if (mdev->receiver.t_state != None)
3014                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3015                                 mdev->receiver.t_state);
3016
3017         /* no need to lock it, I'm the only thread alive */
3018         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
3019                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3020         mdev->al_writ_cnt  =
3021         mdev->bm_writ_cnt  =
3022         mdev->read_cnt     =
3023         mdev->recv_cnt     =
3024         mdev->send_cnt     =
3025         mdev->writ_cnt     =
3026         mdev->p_size       =
3027         mdev->rs_start     =
3028         mdev->rs_total     =
3029         mdev->rs_failed    = 0;
3030         mdev->rs_last_events = 0;
3031         mdev->rs_last_sect_ev = 0;
3032         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3033                 mdev->rs_mark_left[i] = 0;
3034                 mdev->rs_mark_time[i] = 0;
3035         }
3036         D_ASSERT(mdev->net_conf == NULL);
3037
3038         drbd_set_my_capacity(mdev, 0);
3039         if (mdev->bitmap) {
3040                 /* maybe never allocated. */
3041                 drbd_bm_resize(mdev, 0, 1);
3042                 drbd_bm_cleanup(mdev);
3043         }
3044
3045         drbd_free_resources(mdev);
3046         clear_bit(AL_SUSPENDED, &mdev->flags);
3047
3048         /*
3049          * currently we drbd_init_ee only on module load, so
3050          * we may do drbd_release_ee only on module unload!
3051          */
3052         D_ASSERT(list_empty(&mdev->active_ee));
3053         D_ASSERT(list_empty(&mdev->sync_ee));
3054         D_ASSERT(list_empty(&mdev->done_ee));
3055         D_ASSERT(list_empty(&mdev->read_ee));
3056         D_ASSERT(list_empty(&mdev->net_ee));
3057         D_ASSERT(list_empty(&mdev->resync_reads));
3058         D_ASSERT(list_empty(&mdev->data.work.q));
3059         D_ASSERT(list_empty(&mdev->meta.work.q));
3060         D_ASSERT(list_empty(&mdev->resync_work.list));
3061         D_ASSERT(list_empty(&mdev->unplug_work.list));
3062         D_ASSERT(list_empty(&mdev->go_diskless.list));
3063
3064         drbd_set_defaults(mdev);
3065 }
3066
3067
3068 static void drbd_destroy_mempools(void)
3069 {
3070         struct page *page;
3071
3072         while (drbd_pp_pool) {
3073                 page = drbd_pp_pool;
3074                 drbd_pp_pool = (struct page *)page_private(page);
3075                 __free_page(page);
3076                 drbd_pp_vacant--;
3077         }
3078
3079         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3080
3081         if (drbd_ee_mempool)
3082                 mempool_destroy(drbd_ee_mempool);
3083         if (drbd_request_mempool)
3084                 mempool_destroy(drbd_request_mempool);
3085         if (drbd_ee_cache)
3086                 kmem_cache_destroy(drbd_ee_cache);
3087         if (drbd_request_cache)
3088                 kmem_cache_destroy(drbd_request_cache);
3089         if (drbd_bm_ext_cache)
3090                 kmem_cache_destroy(drbd_bm_ext_cache);
3091         if (drbd_al_ext_cache)
3092                 kmem_cache_destroy(drbd_al_ext_cache);
3093
3094         drbd_ee_mempool      = NULL;
3095         drbd_request_mempool = NULL;
3096         drbd_ee_cache        = NULL;
3097         drbd_request_cache   = NULL;
3098         drbd_bm_ext_cache    = NULL;
3099         drbd_al_ext_cache    = NULL;
3100
3101         return;
3102 }
3103
3104 static int drbd_create_mempools(void)
3105 {
3106         struct page *page;
3107         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3108         int i;
3109
3110         /* prepare our caches and mempools */
3111         drbd_request_mempool = NULL;
3112         drbd_ee_cache        = NULL;
3113         drbd_request_cache   = NULL;
3114         drbd_bm_ext_cache    = NULL;
3115         drbd_al_ext_cache    = NULL;
3116         drbd_pp_pool         = NULL;
3117
3118         /* caches */
3119         drbd_request_cache = kmem_cache_create(
3120                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3121         if (drbd_request_cache == NULL)
3122                 goto Enomem;
3123
3124         drbd_ee_cache = kmem_cache_create(
3125                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3126         if (drbd_ee_cache == NULL)
3127                 goto Enomem;
3128
3129         drbd_bm_ext_cache = kmem_cache_create(
3130                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3131         if (drbd_bm_ext_cache == NULL)
3132                 goto Enomem;
3133
3134         drbd_al_ext_cache = kmem_cache_create(
3135                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3136         if (drbd_al_ext_cache == NULL)
3137                 goto Enomem;
3138
3139         /* mempools */
3140         drbd_request_mempool = mempool_create(number,
3141                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3142         if (drbd_request_mempool == NULL)
3143                 goto Enomem;
3144
3145         drbd_ee_mempool = mempool_create(number,
3146                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3147         if (drbd_ee_mempool == NULL)
3148                 goto Enomem;
3149
3150         /* drbd's page pool */
3151         spin_lock_init(&drbd_pp_lock);
3152
3153         for (i = 0; i < number; i++) {
3154                 page = alloc_page(GFP_HIGHUSER);
3155                 if (!page)
3156                         goto Enomem;
3157                 set_page_private(page, (unsigned long)drbd_pp_pool);
3158                 drbd_pp_pool = page;
3159         }
3160         drbd_pp_vacant = number;
3161
3162         return 0;
3163
3164 Enomem:
3165         drbd_destroy_mempools(); /* in case we allocated some */
3166         return -ENOMEM;
3167 }
3168
3169 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3170         void *unused)
3171 {
3172         /* just so we have it.  you never know what interesting things we
3173          * might want to do here some day...
3174          */
3175
3176         return NOTIFY_DONE;
3177 }
3178
3179 static struct notifier_block drbd_notifier = {
3180         .notifier_call = drbd_notify_sys,
3181 };
3182
3183 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3184 {
3185         int rr;
3186
3187         rr = drbd_release_ee(mdev, &mdev->active_ee);
3188         if (rr)
3189                 dev_err(DEV, "%d EEs in active list found!\n", rr);
3190
3191         rr = drbd_release_ee(mdev, &mdev->sync_ee);
3192         if (rr)
3193                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3194
3195         rr = drbd_release_ee(mdev, &mdev->read_ee);
3196         if (rr)
3197                 dev_err(DEV, "%d EEs in read list found!\n", rr);
3198
3199         rr = drbd_release_ee(mdev, &mdev->done_ee);
3200         if (rr)
3201                 dev_err(DEV, "%d EEs in done list found!\n", rr);
3202
3203         rr = drbd_release_ee(mdev, &mdev->net_ee);
3204         if (rr)
3205                 dev_err(DEV, "%d EEs in net list found!\n", rr);
3206 }
3207
3208 /* caution. no locking.
3209  * currently only used from module cleanup code. */
3210 static void drbd_delete_device(unsigned int minor)
3211 {
3212         struct drbd_conf *mdev = minor_to_mdev(minor);
3213
3214         if (!mdev)
3215                 return;
3216
3217         /* paranoia asserts */
3218         if (mdev->open_cnt != 0)
3219                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3220                                 __FILE__ , __LINE__);
3221
3222         ERR_IF (!list_empty(&mdev->data.work.q)) {
3223                 struct list_head *lp;
3224                 list_for_each(lp, &mdev->data.work.q) {
3225                         dev_err(DEV, "lp = %p\n", lp);
3226                 }
3227         };
3228         /* end paranoia asserts */
3229
3230         del_gendisk(mdev->vdisk);
3231
3232         /* cleanup stuff that may have been allocated during
3233          * device (re-)configuration or state changes */
3234
3235         if (mdev->this_bdev)
3236                 bdput(mdev->this_bdev);
3237
3238         drbd_free_resources(mdev);
3239
3240         drbd_release_ee_lists(mdev);
3241
3242         /* should be free'd on disconnect? */
3243         kfree(mdev->ee_hash);
3244         /*
3245         mdev->ee_hash_s = 0;
3246         mdev->ee_hash = NULL;
3247         */
3248
3249         lc_destroy(mdev->act_log);
3250         lc_destroy(mdev->resync);
3251
3252         kfree(mdev->p_uuid);
3253         /* mdev->p_uuid = NULL; */
3254
3255         kfree(mdev->int_dig_out);
3256         kfree(mdev->int_dig_in);
3257         kfree(mdev->int_dig_vv);
3258
3259         /* cleanup the rest that has been
3260          * allocated from drbd_new_device
3261          * and actually free the mdev itself */
3262         drbd_free_mdev(mdev);
3263 }
3264
3265 static void drbd_cleanup(void)
3266 {
3267         unsigned int i;
3268
3269         unregister_reboot_notifier(&drbd_notifier);
3270
3271         /* first remove proc,
3272          * drbdsetup uses it's presence to detect
3273          * whether DRBD is loaded.
3274          * If we would get stuck in proc removal,
3275          * but have netlink already deregistered,
3276          * some drbdsetup commands may wait forever
3277          * for an answer.
3278          */
3279         if (drbd_proc)
3280                 remove_proc_entry("drbd", NULL);
3281
3282         drbd_nl_cleanup();
3283
3284         if (minor_table) {
3285                 i = minor_count;
3286                 while (i--)
3287                         drbd_delete_device(i);
3288                 drbd_destroy_mempools();
3289         }
3290
3291         kfree(minor_table);
3292
3293         unregister_blkdev(DRBD_MAJOR, "drbd");
3294
3295         printk(KERN_INFO "drbd: module cleanup done.\n");
3296 }
3297
3298 /**
3299  * drbd_congested() - Callback for pdflush
3300  * @congested_data:     User data
3301  * @bdi_bits:           Bits pdflush is currently interested in
3302  *
3303  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3304  */
3305 static int drbd_congested(void *congested_data, int bdi_bits)
3306 {
3307         struct drbd_conf *mdev = congested_data;
3308         struct request_queue *q;
3309         char reason = '-';
3310         int r = 0;
3311
3312         if (!may_inc_ap_bio(mdev)) {
3313                 /* DRBD has frozen IO */
3314                 r = bdi_bits;
3315                 reason = 'd';
3316                 goto out;
3317         }
3318
3319         if (get_ldev(mdev)) {
3320                 q = bdev_get_queue(mdev->ldev->backing_bdev);
3321                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3322                 put_ldev(mdev);
3323                 if (r)
3324                         reason = 'b';
3325         }
3326
3327         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3328                 r |= (1 << BDI_async_congested);
3329                 reason = reason == 'b' ? 'a' : 'n';
3330         }
3331
3332 out:
3333         mdev->congestion_reason = reason;
3334         return r;
3335 }
3336
3337 struct drbd_conf *drbd_new_device(unsigned int minor)
3338 {
3339         struct drbd_conf *mdev;
3340         struct gendisk *disk;
3341         struct request_queue *q;
3342
3343         /* GFP_KERNEL, we are outside of all write-out paths */
3344         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3345         if (!mdev)
3346                 return NULL;
3347         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3348                 goto out_no_cpumask;
3349
3350         mdev->minor = minor;
3351
3352         drbd_init_set_defaults(mdev);
3353
3354         q = blk_alloc_queue(GFP_KERNEL);
3355         if (!q)
3356                 goto out_no_q;
3357         mdev->rq_queue = q;
3358         q->queuedata   = mdev;
3359
3360         disk = alloc_disk(1);
3361         if (!disk)
3362                 goto out_no_disk;
3363         mdev->vdisk = disk;
3364
3365         set_disk_ro(disk, true);
3366
3367         disk->queue = q;
3368         disk->major = DRBD_MAJOR;
3369         disk->first_minor = minor;
3370         disk->fops = &drbd_ops;
3371         sprintf(disk->disk_name, "drbd%d", minor);
3372         disk->private_data = mdev;
3373
3374         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3375         /* we have no partitions. we contain only ourselves. */
3376         mdev->this_bdev->bd_contains = mdev->this_bdev;
3377
3378         q->backing_dev_info.congested_fn = drbd_congested;
3379         q->backing_dev_info.congested_data = mdev;
3380
3381         blk_queue_make_request(q, drbd_make_request);
3382         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
3383         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3384         blk_queue_merge_bvec(q, drbd_merge_bvec);
3385         q->queue_lock = &mdev->req_lock;
3386
3387         mdev->md_io_page = alloc_page(GFP_KERNEL);
3388         if (!mdev->md_io_page)
3389                 goto out_no_io_page;
3390
3391         if (drbd_bm_init(mdev))
3392                 goto out_no_bitmap;
3393         /* no need to lock access, we are still initializing this minor device. */
3394         if (!tl_init(mdev))
3395                 goto out_no_tl;
3396
3397         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3398         if (!mdev->app_reads_hash)
3399                 goto out_no_app_reads;
3400
3401         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3402         if (!mdev->current_epoch)
3403                 goto out_no_epoch;
3404
3405         INIT_LIST_HEAD(&mdev->current_epoch->list);
3406         mdev->epochs = 1;
3407
3408         return mdev;
3409
3410 /* out_whatever_else:
3411         kfree(mdev->current_epoch); */
3412 out_no_epoch:
3413         kfree(mdev->app_reads_hash);
3414 out_no_app_reads:
3415         tl_cleanup(mdev);
3416 out_no_tl:
3417         drbd_bm_cleanup(mdev);
3418 out_no_bitmap:
3419         __free_page(mdev->md_io_page);
3420 out_no_io_page:
3421         put_disk(disk);
3422 out_no_disk:
3423         blk_cleanup_queue(q);
3424 out_no_q:
3425         free_cpumask_var(mdev->cpu_mask);
3426 out_no_cpumask:
3427         kfree(mdev);
3428         return NULL;
3429 }
3430
3431 /* counterpart of drbd_new_device.
3432  * last part of drbd_delete_device. */
3433 void drbd_free_mdev(struct drbd_conf *mdev)
3434 {
3435         kfree(mdev->current_epoch);
3436         kfree(mdev->app_reads_hash);
3437         tl_cleanup(mdev);
3438         if (mdev->bitmap) /* should no longer be there. */
3439                 drbd_bm_cleanup(mdev);
3440         __free_page(mdev->md_io_page);
3441         put_disk(mdev->vdisk);
3442         blk_cleanup_queue(mdev->rq_queue);
3443         free_cpumask_var(mdev->cpu_mask);
3444         drbd_free_tl_hash(mdev);
3445         kfree(mdev);
3446 }
3447
3448
3449 int __init drbd_init(void)
3450 {
3451         int err;
3452
3453         if (sizeof(struct p_handshake) != 80) {
3454                 printk(KERN_ERR
3455                        "drbd: never change the size or layout "
3456                        "of the HandShake packet.\n");
3457                 return -EINVAL;
3458         }
3459
3460         if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
3461                 printk(KERN_ERR
3462                         "drbd: invalid minor_count (%d)\n", minor_count);
3463 #ifdef MODULE
3464                 return -EINVAL;
3465 #else
3466                 minor_count = 8;
3467 #endif
3468         }
3469
3470         err = drbd_nl_init();
3471         if (err)
3472                 return err;
3473
3474         err = register_blkdev(DRBD_MAJOR, "drbd");
3475         if (err) {
3476                 printk(KERN_ERR
3477                        "drbd: unable to register block device major %d\n",
3478                        DRBD_MAJOR);
3479                 return err;
3480         }
3481
3482         register_reboot_notifier(&drbd_notifier);
3483
3484         /*
3485          * allocate all necessary structs
3486          */
3487         err = -ENOMEM;
3488
3489         init_waitqueue_head(&drbd_pp_wait);
3490
3491         drbd_proc = NULL; /* play safe for drbd_cleanup */
3492         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3493                                 GFP_KERNEL);
3494         if (!minor_table)
3495                 goto Enomem;
3496
3497         err = drbd_create_mempools();
3498         if (err)
3499                 goto Enomem;
3500
3501         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3502         if (!drbd_proc) {
3503                 printk(KERN_ERR "drbd: unable to register proc file\n");
3504                 goto Enomem;
3505         }
3506
3507         rwlock_init(&global_state_lock);
3508
3509         printk(KERN_INFO "drbd: initialized. "
3510                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3511                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3512         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3513         printk(KERN_INFO "drbd: registered as block device major %d\n",
3514                 DRBD_MAJOR);
3515         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3516
3517         return 0; /* Success! */
3518
3519 Enomem:
3520         drbd_cleanup();
3521         if (err == -ENOMEM)
3522                 /* currently always the case */
3523                 printk(KERN_ERR "drbd: ran out of memory\n");
3524         else
3525                 printk(KERN_ERR "drbd: initialization failure\n");
3526         return err;
3527 }
3528
3529 void drbd_free_bc(struct drbd_backing_dev *ldev)
3530 {
3531         if (ldev == NULL)
3532                 return;
3533
3534         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3535         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3536
3537         kfree(ldev);
3538 }
3539
3540 void drbd_free_sock(struct drbd_conf *mdev)
3541 {
3542         if (mdev->data.socket) {
3543                 mutex_lock(&mdev->data.mutex);
3544                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3545                 sock_release(mdev->data.socket);
3546                 mdev->data.socket = NULL;
3547                 mutex_unlock(&mdev->data.mutex);
3548         }
3549         if (mdev->meta.socket) {
3550                 mutex_lock(&mdev->meta.mutex);
3551                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3552                 sock_release(mdev->meta.socket);
3553                 mdev->meta.socket = NULL;
3554                 mutex_unlock(&mdev->meta.mutex);
3555         }
3556 }
3557
3558
3559 void drbd_free_resources(struct drbd_conf *mdev)
3560 {
3561         crypto_free_hash(mdev->csums_tfm);
3562         mdev->csums_tfm = NULL;
3563         crypto_free_hash(mdev->verify_tfm);
3564         mdev->verify_tfm = NULL;
3565         crypto_free_hash(mdev->cram_hmac_tfm);
3566         mdev->cram_hmac_tfm = NULL;
3567         crypto_free_hash(mdev->integrity_w_tfm);
3568         mdev->integrity_w_tfm = NULL;
3569         crypto_free_hash(mdev->integrity_r_tfm);
3570         mdev->integrity_r_tfm = NULL;
3571
3572         drbd_free_sock(mdev);
3573
3574         __no_warn(local,
3575                   drbd_free_bc(mdev->ldev);
3576                   mdev->ldev = NULL;);
3577 }
3578
3579 /* meta data management */
3580
3581 struct meta_data_on_disk {
3582         u64 la_size;           /* last agreed size. */
3583         u64 uuid[UI_SIZE];   /* UUIDs. */
3584         u64 device_uuid;
3585         u64 reserved_u64_1;
3586         u32 flags;             /* MDF */
3587         u32 magic;
3588         u32 md_size_sect;
3589         u32 al_offset;         /* offset to this block */
3590         u32 al_nr_extents;     /* important for restoring the AL */
3591               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3592         u32 bm_offset;         /* offset to the bitmap, from here */
3593         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3594         u32 reserved_u32[4];
3595
3596 } __packed;
3597
3598 /**
3599  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3600  * @mdev:       DRBD device.
3601  */
3602 void drbd_md_sync(struct drbd_conf *mdev)
3603 {
3604         struct meta_data_on_disk *buffer;
3605         sector_t sector;
3606         int i;
3607
3608         del_timer(&mdev->md_sync_timer);
3609         /* timer may be rearmed by drbd_md_mark_dirty() now. */
3610         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3611                 return;
3612
3613         /* We use here D_FAILED and not D_ATTACHING because we try to write
3614          * metadata even if we detach due to a disk failure! */
3615         if (!get_ldev_if_state(mdev, D_FAILED))
3616                 return;
3617
3618         mutex_lock(&mdev->md_io_mutex);
3619         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3620         memset(buffer, 0, 512);
3621
3622         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3623         for (i = UI_CURRENT; i < UI_SIZE; i++)
3624                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3625         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3626         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3627
3628         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3629         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3630         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3631         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3632         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3633
3634         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3635
3636         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3637         sector = mdev->ldev->md.md_offset;
3638
3639         if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3640                 /* this was a try anyways ... */
3641                 dev_err(DEV, "meta data update failed!\n");
3642                 drbd_chk_io_error(mdev, 1, true);
3643         }
3644
3645         /* Update mdev->ldev->md.la_size_sect,
3646          * since we updated it on metadata. */
3647         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3648
3649         mutex_unlock(&mdev->md_io_mutex);
3650         put_ldev(mdev);
3651 }
3652
3653 /**
3654  * drbd_md_read() - Reads in the meta data super block
3655  * @mdev:       DRBD device.
3656  * @bdev:       Device from which the meta data should be read in.
3657  *
3658  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3659  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3660  */
3661 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3662 {
3663         struct meta_data_on_disk *buffer;
3664         int i, rv = NO_ERROR;
3665
3666         if (!get_ldev_if_state(mdev, D_ATTACHING))
3667                 return ERR_IO_MD_DISK;
3668
3669         mutex_lock(&mdev->md_io_mutex);
3670         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3671
3672         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3673                 /* NOTE: cant do normal error processing here as this is
3674                    called BEFORE disk is attached */
3675                 dev_err(DEV, "Error while reading metadata.\n");
3676                 rv = ERR_IO_MD_DISK;
3677                 goto err;
3678         }
3679
3680         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3681                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3682                 rv = ERR_MD_INVALID;
3683                 goto err;
3684         }
3685         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3686                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3687                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3688                 rv = ERR_MD_INVALID;
3689                 goto err;
3690         }
3691         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3692                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3693                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3694                 rv = ERR_MD_INVALID;
3695                 goto err;
3696         }
3697         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3698                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3699                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3700                 rv = ERR_MD_INVALID;
3701                 goto err;
3702         }
3703
3704         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3705                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3706                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3707                 rv = ERR_MD_INVALID;
3708                 goto err;
3709         }
3710
3711         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3712         for (i = UI_CURRENT; i < UI_SIZE; i++)
3713                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3714         bdev->md.flags = be32_to_cpu(buffer->flags);
3715         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3716         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3717
3718         if (mdev->sync_conf.al_extents < 7)
3719                 mdev->sync_conf.al_extents = 127;
3720
3721  err:
3722         mutex_unlock(&mdev->md_io_mutex);
3723         put_ldev(mdev);
3724
3725         return rv;
3726 }
3727
3728 static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3729 {
3730         static char *uuid_str[UI_EXTENDED_SIZE] = {
3731                 [UI_CURRENT] = "CURRENT",
3732                 [UI_BITMAP] = "BITMAP",
3733                 [UI_HISTORY_START] = "HISTORY_START",
3734                 [UI_HISTORY_END] = "HISTORY_END",
3735                 [UI_SIZE] = "SIZE",
3736                 [UI_FLAGS] = "FLAGS",
3737         };
3738
3739         if (index >= UI_EXTENDED_SIZE) {
3740                 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3741                 return;
3742         }
3743
3744         dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3745                  uuid_str[index],
3746                  (unsigned long long)mdev->ldev->md.uuid[index]);
3747 }
3748
3749
3750 /**
3751  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3752  * @mdev:       DRBD device.
3753  *
3754  * Call this function if you change anything that should be written to
3755  * the meta-data super block. This function sets MD_DIRTY, and starts a
3756  * timer that ensures that within five seconds you have to call drbd_md_sync().
3757  */
3758 #ifdef DEBUG
3759 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3760 {
3761         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3762                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3763                 mdev->last_md_mark_dirty.line = line;
3764                 mdev->last_md_mark_dirty.func = func;
3765         }
3766 }
3767 #else
3768 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3769 {
3770         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3771                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3772 }
3773 #endif
3774
3775 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3776 {
3777         int i;
3778
3779         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
3780                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3781                 debug_drbd_uuid(mdev, i+1);
3782         }
3783 }
3784
3785 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3786 {
3787         if (idx == UI_CURRENT) {
3788                 if (mdev->state.role == R_PRIMARY)
3789                         val |= 1;
3790                 else
3791                         val &= ~((u64)1);
3792
3793                 drbd_set_ed_uuid(mdev, val);
3794         }
3795
3796         mdev->ldev->md.uuid[idx] = val;
3797         debug_drbd_uuid(mdev, idx);
3798         drbd_md_mark_dirty(mdev);
3799 }
3800
3801
3802 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3803 {
3804         if (mdev->ldev->md.uuid[idx]) {
3805                 drbd_uuid_move_history(mdev);
3806                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3807                 debug_drbd_uuid(mdev, UI_HISTORY_START);
3808         }
3809         _drbd_uuid_set(mdev, idx, val);
3810 }
3811
3812 /**
3813  * drbd_uuid_new_current() - Creates a new current UUID
3814  * @mdev:       DRBD device.
3815  *
3816  * Creates a new current UUID, and rotates the old current UUID into
3817  * the bitmap slot. Causes an incremental resync upon next connect.
3818  */
3819 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3820 {
3821         u64 val;
3822
3823         dev_info(DEV, "Creating new current UUID\n");
3824         D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3825         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3826         debug_drbd_uuid(mdev, UI_BITMAP);
3827
3828         get_random_bytes(&val, sizeof(u64));
3829         _drbd_uuid_set(mdev, UI_CURRENT, val);
3830         /* get it to stable storage _now_ */
3831         drbd_md_sync(mdev);
3832 }
3833
3834 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3835 {
3836         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3837                 return;
3838
3839         if (val == 0) {
3840                 drbd_uuid_move_history(mdev);
3841                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3842                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3843                 debug_drbd_uuid(mdev, UI_HISTORY_START);
3844                 debug_drbd_uuid(mdev, UI_BITMAP);
3845         } else {
3846                 if (mdev->ldev->md.uuid[UI_BITMAP])
3847                         dev_warn(DEV, "bm UUID already set");
3848
3849                 mdev->ldev->md.uuid[UI_BITMAP] = val;
3850                 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3851
3852                 debug_drbd_uuid(mdev, UI_BITMAP);
3853         }
3854         drbd_md_mark_dirty(mdev);
3855 }
3856
3857 /**
3858  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3859  * @mdev:       DRBD device.
3860  *
3861  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3862  */
3863 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3864 {
3865         int rv = -EIO;
3866
3867         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3868                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3869                 drbd_md_sync(mdev);
3870                 drbd_bm_set_all(mdev);
3871
3872                 rv = drbd_bm_write(mdev);
3873
3874                 if (!rv) {
3875                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3876                         drbd_md_sync(mdev);
3877                 }
3878
3879                 put_ldev(mdev);
3880         }
3881
3882         return rv;
3883 }
3884
3885 /**
3886  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3887  * @mdev:       DRBD device.
3888  *
3889  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3890  */
3891 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3892 {
3893         int rv = -EIO;
3894
3895         drbd_resume_al(mdev);
3896         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3897                 drbd_bm_clear_all(mdev);
3898                 rv = drbd_bm_write(mdev);
3899                 put_ldev(mdev);
3900         }
3901
3902         return rv;
3903 }
3904
3905 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3906 {
3907         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3908         int rv = -EIO;
3909
3910         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3911
3912         if (get_ldev(mdev)) {
3913                 drbd_bm_lock(mdev, work->why);
3914                 rv = work->io_fn(mdev);
3915                 drbd_bm_unlock(mdev);
3916                 put_ldev(mdev);
3917         }
3918
3919         clear_bit(BITMAP_IO, &mdev->flags);
3920         smp_mb__after_clear_bit();
3921         wake_up(&mdev->misc_wait);
3922
3923         if (work->done)
3924                 work->done(mdev, rv);
3925
3926         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3927         work->why = NULL;
3928
3929         return 1;
3930 }
3931
3932 void drbd_ldev_destroy(struct drbd_conf *mdev)
3933 {
3934         lc_destroy(mdev->resync);
3935         mdev->resync = NULL;
3936         lc_destroy(mdev->act_log);
3937         mdev->act_log = NULL;
3938         __no_warn(local,
3939                 drbd_free_bc(mdev->ldev);
3940                 mdev->ldev = NULL;);
3941
3942         if (mdev->md_io_tmpp) {
3943                 __free_page(mdev->md_io_tmpp);
3944                 mdev->md_io_tmpp = NULL;
3945         }
3946         clear_bit(GO_DISKLESS, &mdev->flags);
3947 }
3948
3949 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3950 {
3951         D_ASSERT(mdev->state.disk == D_FAILED);
3952         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3953          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3954          * the protected members anymore, though, so once put_ldev reaches zero
3955          * again, it will be safe to free them. */
3956         drbd_force_state(mdev, NS(disk, D_DISKLESS));
3957         return 1;
3958 }
3959
3960 void drbd_go_diskless(struct drbd_conf *mdev)
3961 {
3962         D_ASSERT(mdev->state.disk == D_FAILED);
3963         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3964                 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3965 }
3966
3967 /**
3968  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3969  * @mdev:       DRBD device.
3970  * @io_fn:      IO callback to be called when bitmap IO is possible
3971  * @done:       callback to be called after the bitmap IO was performed
3972  * @why:        Descriptive text of the reason for doing the IO
3973  *
3974  * While IO on the bitmap happens we freeze application IO thus we ensure
3975  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3976  * called from worker context. It MUST NOT be used while a previous such
3977  * work is still pending!
3978  */
3979 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3980                           int (*io_fn)(struct drbd_conf *),
3981                           void (*done)(struct drbd_conf *, int),
3982                           char *why)
3983 {
3984         D_ASSERT(current == mdev->worker.task);
3985
3986         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3987         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3988         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3989         if (mdev->bm_io_work.why)
3990                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3991                         why, mdev->bm_io_work.why);
3992
3993         mdev->bm_io_work.io_fn = io_fn;
3994         mdev->bm_io_work.done = done;
3995         mdev->bm_io_work.why = why;
3996
3997         spin_lock_irq(&mdev->req_lock);
3998         set_bit(BITMAP_IO, &mdev->flags);
3999         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4000                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4001                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
4002         }
4003         spin_unlock_irq(&mdev->req_lock);
4004 }
4005
4006 /**
4007  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
4008  * @mdev:       DRBD device.
4009  * @io_fn:      IO callback to be called when bitmap IO is possible
4010  * @why:        Descriptive text of the reason for doing the IO
4011  *
4012  * freezes application IO while that the actual IO operations runs. This
4013  * functions MAY NOT be called from worker context.
4014  */
4015 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
4016 {
4017         int rv;
4018
4019         D_ASSERT(current != mdev->worker.task);
4020
4021         drbd_suspend_io(mdev);
4022
4023         drbd_bm_lock(mdev, why);
4024         rv = io_fn(mdev);
4025         drbd_bm_unlock(mdev);
4026
4027         drbd_resume_io(mdev);
4028
4029         return rv;
4030 }
4031
4032 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4033 {
4034         if ((mdev->ldev->md.flags & flag) != flag) {
4035                 drbd_md_mark_dirty(mdev);
4036                 mdev->ldev->md.flags |= flag;
4037         }
4038 }
4039
4040 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4041 {
4042         if ((mdev->ldev->md.flags & flag) != 0) {
4043                 drbd_md_mark_dirty(mdev);
4044                 mdev->ldev->md.flags &= ~flag;
4045         }
4046 }
4047 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4048 {
4049         return (bdev->md.flags & flag) != 0;
4050 }
4051
4052 static void md_sync_timer_fn(unsigned long data)
4053 {
4054         struct drbd_conf *mdev = (struct drbd_conf *) data;
4055
4056         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4057 }
4058
4059 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4060 {
4061         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4062 #ifdef DEBUG
4063         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4064                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4065 #endif
4066         drbd_md_sync(mdev);
4067         return 1;
4068 }
4069
4070 #ifdef CONFIG_DRBD_FAULT_INJECTION
4071 /* Fault insertion support including random number generator shamelessly
4072  * stolen from kernel/rcutorture.c */
4073 struct fault_random_state {
4074         unsigned long state;
4075         unsigned long count;
4076 };
4077
4078 #define FAULT_RANDOM_MULT 39916801  /* prime */
4079 #define FAULT_RANDOM_ADD        479001701 /* prime */
4080 #define FAULT_RANDOM_REFRESH 10000
4081
4082 /*
4083  * Crude but fast random-number generator.  Uses a linear congruential
4084  * generator, with occasional help from get_random_bytes().
4085  */
4086 static unsigned long
4087 _drbd_fault_random(struct fault_random_state *rsp)
4088 {
4089         long refresh;
4090
4091         if (!rsp->count--) {
4092                 get_random_bytes(&refresh, sizeof(refresh));
4093                 rsp->state += refresh;
4094                 rsp->count = FAULT_RANDOM_REFRESH;
4095         }
4096         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4097         return swahw32(rsp->state);
4098 }
4099
4100 static char *
4101 _drbd_fault_str(unsigned int type) {
4102         static char *_faults[] = {
4103                 [DRBD_FAULT_MD_WR] = "Meta-data write",
4104                 [DRBD_FAULT_MD_RD] = "Meta-data read",
4105                 [DRBD_FAULT_RS_WR] = "Resync write",
4106                 [DRBD_FAULT_RS_RD] = "Resync read",
4107                 [DRBD_FAULT_DT_WR] = "Data write",
4108                 [DRBD_FAULT_DT_RD] = "Data read",
4109                 [DRBD_FAULT_DT_RA] = "Data read ahead",
4110                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4111                 [DRBD_FAULT_AL_EE] = "EE allocation",
4112                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4113         };
4114
4115         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4116 }
4117
4118 unsigned int
4119 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4120 {
4121         static struct fault_random_state rrs = {0, 0};
4122
4123         unsigned int ret = (
4124                 (fault_devs == 0 ||
4125                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4126                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4127
4128         if (ret) {
4129                 fault_count++;
4130
4131                 if (__ratelimit(&drbd_ratelimit_state))
4132                         dev_warn(DEV, "***Simulating %s failure\n",
4133                                 _drbd_fault_str(type));
4134         }
4135
4136         return ret;
4137 }
4138 #endif
4139
4140 const char *drbd_buildtag(void)
4141 {
4142         /* DRBD built from external sources has here a reference to the
4143            git hash of the source code. */
4144
4145         static char buildtag[38] = "\0uilt-in";
4146
4147         if (buildtag[0] == 0) {
4148 #ifdef CONFIG_MODULES
4149                 if (THIS_MODULE != NULL)
4150                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4151                 else
4152 #endif
4153                         buildtag[0] = 'b';
4154         }
4155
4156         return buildtag;
4157 }
4158
4159 module_init(drbd_init)
4160 module_exit(drbd_cleanup)
4161
4162 EXPORT_SYMBOL(drbd_conn_str);
4163 EXPORT_SYMBOL(drbd_role_str);
4164 EXPORT_SYMBOL(drbd_disk_str);
4165 EXPORT_SYMBOL(drbd_set_st_err_str);