]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_receiver.c
block: Abstract out bvec iterator
[karo-tx-linux.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2    drbd_receiver.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
46 #include "drbd_int.h"
47 #include "drbd_req.h"
48
49 #include "drbd_vli.h"
50
51 struct packet_info {
52         enum drbd_packet cmd;
53         unsigned int size;
54         unsigned int vnr;
55         void *data;
56 };
57
58 enum finish_epoch {
59         FE_STILL_LIVE,
60         FE_DESTROYED,
61         FE_RECYCLED,
62 };
63
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
67
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
70
71
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
74 /*
75  * some helper functions to deal with single linked page lists,
76  * page->private being our "next" pointer.
77  */
78
79 /* If at least n pages are linked at head, get n pages off.
80  * Otherwise, don't modify head, and return NULL.
81  * Locking is the responsibility of the caller.
82  */
83 static struct page *page_chain_del(struct page **head, int n)
84 {
85         struct page *page;
86         struct page *tmp;
87
88         BUG_ON(!n);
89         BUG_ON(!head);
90
91         page = *head;
92
93         if (!page)
94                 return NULL;
95
96         while (page) {
97                 tmp = page_chain_next(page);
98                 if (--n == 0)
99                         break; /* found sufficient pages */
100                 if (tmp == NULL)
101                         /* insufficient pages, don't use any of them. */
102                         return NULL;
103                 page = tmp;
104         }
105
106         /* add end of list marker for the returned list */
107         set_page_private(page, 0);
108         /* actual return value, and adjustment of head */
109         page = *head;
110         *head = tmp;
111         return page;
112 }
113
114 /* may be used outside of locks to find the tail of a (usually short)
115  * "private" page chain, before adding it back to a global chain head
116  * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
118 {
119         struct page *tmp;
120         int i = 1;
121         while ((tmp = page_chain_next(page)))
122                 ++i, page = tmp;
123         if (len)
124                 *len = i;
125         return page;
126 }
127
128 static int page_chain_free(struct page *page)
129 {
130         struct page *tmp;
131         int i = 0;
132         page_chain_for_each_safe(page, tmp) {
133                 put_page(page);
134                 ++i;
135         }
136         return i;
137 }
138
139 static void page_chain_add(struct page **head,
140                 struct page *chain_first, struct page *chain_last)
141 {
142 #if 1
143         struct page *tmp;
144         tmp = page_chain_tail(chain_first, NULL);
145         BUG_ON(tmp != chain_last);
146 #endif
147
148         /* add chain to head */
149         set_page_private(chain_last, (unsigned long)*head);
150         *head = chain_first;
151 }
152
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154                                        unsigned int number)
155 {
156         struct page *page = NULL;
157         struct page *tmp = NULL;
158         unsigned int i = 0;
159
160         /* Yes, testing drbd_pp_vacant outside the lock is racy.
161          * So what. It saves a spin_lock. */
162         if (drbd_pp_vacant >= number) {
163                 spin_lock(&drbd_pp_lock);
164                 page = page_chain_del(&drbd_pp_pool, number);
165                 if (page)
166                         drbd_pp_vacant -= number;
167                 spin_unlock(&drbd_pp_lock);
168                 if (page)
169                         return page;
170         }
171
172         /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173          * "criss-cross" setup, that might cause write-out on some other DRBD,
174          * which in turn might block on the other node at this very place.  */
175         for (i = 0; i < number; i++) {
176                 tmp = alloc_page(GFP_TRY);
177                 if (!tmp)
178                         break;
179                 set_page_private(tmp, (unsigned long)page);
180                 page = tmp;
181         }
182
183         if (i == number)
184                 return page;
185
186         /* Not enough pages immediately available this time.
187          * No need to jump around here, drbd_alloc_pages will retry this
188          * function "soon". */
189         if (page) {
190                 tmp = page_chain_tail(page, NULL);
191                 spin_lock(&drbd_pp_lock);
192                 page_chain_add(&drbd_pp_pool, page, tmp);
193                 drbd_pp_vacant += i;
194                 spin_unlock(&drbd_pp_lock);
195         }
196         return NULL;
197 }
198
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200                                            struct list_head *to_be_freed)
201 {
202         struct drbd_peer_request *peer_req;
203         struct list_head *le, *tle;
204
205         /* The EEs are always appended to the end of the list. Since
206            they are sent in order over the wire, they have to finish
207            in order. As soon as we see the first not finished we can
208            stop to examine the list... */
209
210         list_for_each_safe(le, tle, &mdev->net_ee) {
211                 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212                 if (drbd_peer_req_has_active_page(peer_req))
213                         break;
214                 list_move(le, to_be_freed);
215         }
216 }
217
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219 {
220         LIST_HEAD(reclaimed);
221         struct drbd_peer_request *peer_req, *t;
222
223         spin_lock_irq(&mdev->tconn->req_lock);
224         reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225         spin_unlock_irq(&mdev->tconn->req_lock);
226
227         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228                 drbd_free_net_peer_req(mdev, peer_req);
229 }
230
231 /**
232  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233  * @mdev:       DRBD device.
234  * @number:     number of pages requested
235  * @retry:      whether to retry, if not enough pages are available right now
236  *
237  * Tries to allocate number pages, first from our own page pool, then from
238  * the kernel, unless this allocation would exceed the max_buffers setting.
239  * Possibly retry until DRBD frees sufficient pages somewhere else.
240  *
241  * Returns a page chain linked via page->private.
242  */
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244                               bool retry)
245 {
246         struct page *page = NULL;
247         struct net_conf *nc;
248         DEFINE_WAIT(wait);
249         int mxb;
250
251         /* Yes, we may run up to @number over max_buffers. If we
252          * follow it strictly, the admin will get it wrong anyways. */
253         rcu_read_lock();
254         nc = rcu_dereference(mdev->tconn->net_conf);
255         mxb = nc ? nc->max_buffers : 1000000;
256         rcu_read_unlock();
257
258         if (atomic_read(&mdev->pp_in_use) < mxb)
259                 page = __drbd_alloc_pages(mdev, number);
260
261         while (page == NULL) {
262                 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264                 drbd_kick_lo_and_reclaim_net(mdev);
265
266                 if (atomic_read(&mdev->pp_in_use) < mxb) {
267                         page = __drbd_alloc_pages(mdev, number);
268                         if (page)
269                                 break;
270                 }
271
272                 if (!retry)
273                         break;
274
275                 if (signal_pending(current)) {
276                         dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
277                         break;
278                 }
279
280                 schedule();
281         }
282         finish_wait(&drbd_pp_wait, &wait);
283
284         if (page)
285                 atomic_add(number, &mdev->pp_in_use);
286         return page;
287 }
288
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290  * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291  * Either links the page chain back to the global pool,
292  * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
294 {
295         atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
296         int i;
297
298         if (page == NULL)
299                 return;
300
301         if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302                 i = page_chain_free(page);
303         else {
304                 struct page *tmp;
305                 tmp = page_chain_tail(page, &i);
306                 spin_lock(&drbd_pp_lock);
307                 page_chain_add(&drbd_pp_pool, page, tmp);
308                 drbd_pp_vacant += i;
309                 spin_unlock(&drbd_pp_lock);
310         }
311         i = atomic_sub_return(i, a);
312         if (i < 0)
313                 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314                         is_net ? "pp_in_use_by_net" : "pp_in_use", i);
315         wake_up(&drbd_pp_wait);
316 }
317
318 /*
319 You need to hold the req_lock:
320  _drbd_wait_ee_list_empty()
321
322 You must not have the req_lock:
323  drbd_free_peer_req()
324  drbd_alloc_peer_req()
325  drbd_free_peer_reqs()
326  drbd_ee_fix_bhs()
327  drbd_finish_peer_reqs()
328  drbd_clear_done_ee()
329  drbd_wait_ee_list_empty()
330 */
331
332 struct drbd_peer_request *
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334                     unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
335 {
336         struct drbd_peer_request *peer_req;
337         struct page *page = NULL;
338         unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
339
340         if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
341                 return NULL;
342
343         peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344         if (!peer_req) {
345                 if (!(gfp_mask & __GFP_NOWARN))
346                         dev_err(DEV, "%s: allocation failed\n", __func__);
347                 return NULL;
348         }
349
350         if (data_size) {
351                 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
352                 if (!page)
353                         goto fail;
354         }
355
356         drbd_clear_interval(&peer_req->i);
357         peer_req->i.size = data_size;
358         peer_req->i.sector = sector;
359         peer_req->i.local = false;
360         peer_req->i.waiting = false;
361
362         peer_req->epoch = NULL;
363         peer_req->w.mdev = mdev;
364         peer_req->pages = page;
365         atomic_set(&peer_req->pending_bios, 0);
366         peer_req->flags = 0;
367         /*
368          * The block_id is opaque to the receiver.  It is not endianness
369          * converted, and sent back to the sender unchanged.
370          */
371         peer_req->block_id = id;
372
373         return peer_req;
374
375  fail:
376         mempool_free(peer_req, drbd_ee_mempool);
377         return NULL;
378 }
379
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
381                        int is_net)
382 {
383         if (peer_req->flags & EE_HAS_DIGEST)
384                 kfree(peer_req->digest);
385         drbd_free_pages(mdev, peer_req->pages, is_net);
386         D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387         D_ASSERT(drbd_interval_empty(&peer_req->i));
388         mempool_free(peer_req, drbd_ee_mempool);
389 }
390
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
392 {
393         LIST_HEAD(work_list);
394         struct drbd_peer_request *peer_req, *t;
395         int count = 0;
396         int is_net = list == &mdev->net_ee;
397
398         spin_lock_irq(&mdev->tconn->req_lock);
399         list_splice_init(list, &work_list);
400         spin_unlock_irq(&mdev->tconn->req_lock);
401
402         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
403                 __drbd_free_peer_req(mdev, peer_req, is_net);
404                 count++;
405         }
406         return count;
407 }
408
409 /*
410  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
411  */
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
413 {
414         LIST_HEAD(work_list);
415         LIST_HEAD(reclaimed);
416         struct drbd_peer_request *peer_req, *t;
417         int err = 0;
418
419         spin_lock_irq(&mdev->tconn->req_lock);
420         reclaim_finished_net_peer_reqs(mdev, &reclaimed);
421         list_splice_init(&mdev->done_ee, &work_list);
422         spin_unlock_irq(&mdev->tconn->req_lock);
423
424         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
425                 drbd_free_net_peer_req(mdev, peer_req);
426
427         /* possible callbacks here:
428          * e_end_block, and e_end_resync_block, e_send_superseded.
429          * all ignore the last argument.
430          */
431         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
432                 int err2;
433
434                 /* list_del not necessary, next/prev members not touched */
435                 err2 = peer_req->w.cb(&peer_req->w, !!err);
436                 if (!err)
437                         err = err2;
438                 drbd_free_peer_req(mdev, peer_req);
439         }
440         wake_up(&mdev->ee_wait);
441
442         return err;
443 }
444
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446                                      struct list_head *head)
447 {
448         DEFINE_WAIT(wait);
449
450         /* avoids spin_lock/unlock
451          * and calling prepare_to_wait in the fast path */
452         while (!list_empty(head)) {
453                 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454                 spin_unlock_irq(&mdev->tconn->req_lock);
455                 io_schedule();
456                 finish_wait(&mdev->ee_wait, &wait);
457                 spin_lock_irq(&mdev->tconn->req_lock);
458         }
459 }
460
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462                                     struct list_head *head)
463 {
464         spin_lock_irq(&mdev->tconn->req_lock);
465         _drbd_wait_ee_list_empty(mdev, head);
466         spin_unlock_irq(&mdev->tconn->req_lock);
467 }
468
469 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
470 {
471         mm_segment_t oldfs;
472         struct kvec iov = {
473                 .iov_base = buf,
474                 .iov_len = size,
475         };
476         struct msghdr msg = {
477                 .msg_iovlen = 1,
478                 .msg_iov = (struct iovec *)&iov,
479                 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480         };
481         int rv;
482
483         oldfs = get_fs();
484         set_fs(KERNEL_DS);
485         rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486         set_fs(oldfs);
487
488         return rv;
489 }
490
491 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
492 {
493         int rv;
494
495         rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
496
497         if (rv < 0) {
498                 if (rv == -ECONNRESET)
499                         conn_info(tconn, "sock was reset by peer\n");
500                 else if (rv != -ERESTARTSYS)
501                         conn_err(tconn, "sock_recvmsg returned %d\n", rv);
502         } else if (rv == 0) {
503                 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504                         long t;
505                         rcu_read_lock();
506                         t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507                         rcu_read_unlock();
508
509                         t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
511                         if (t)
512                                 goto out;
513                 }
514                 conn_info(tconn, "sock was shut down by peer\n");
515         }
516
517         if (rv != size)
518                 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
519
520 out:
521         return rv;
522 }
523
524 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525 {
526         int err;
527
528         err = drbd_recv(tconn, buf, size);
529         if (err != size) {
530                 if (err >= 0)
531                         err = -EIO;
532         } else
533                 err = 0;
534         return err;
535 }
536
537 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538 {
539         int err;
540
541         err = drbd_recv_all(tconn, buf, size);
542         if (err && !signal_pending(current))
543                 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544         return err;
545 }
546
547 /* quoting tcp(7):
548  *   On individual connections, the socket buffer size must be set prior to the
549  *   listen(2) or connect(2) calls in order to have it take effect.
550  * This is our wrapper to do so.
551  */
552 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553                 unsigned int rcv)
554 {
555         /* open coded SO_SNDBUF, SO_RCVBUF */
556         if (snd) {
557                 sock->sk->sk_sndbuf = snd;
558                 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559         }
560         if (rcv) {
561                 sock->sk->sk_rcvbuf = rcv;
562                 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563         }
564 }
565
566 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
567 {
568         const char *what;
569         struct socket *sock;
570         struct sockaddr_in6 src_in6;
571         struct sockaddr_in6 peer_in6;
572         struct net_conf *nc;
573         int err, peer_addr_len, my_addr_len;
574         int sndbuf_size, rcvbuf_size, connect_int;
575         int disconnect_on_error = 1;
576
577         rcu_read_lock();
578         nc = rcu_dereference(tconn->net_conf);
579         if (!nc) {
580                 rcu_read_unlock();
581                 return NULL;
582         }
583         sndbuf_size = nc->sndbuf_size;
584         rcvbuf_size = nc->rcvbuf_size;
585         connect_int = nc->connect_int;
586         rcu_read_unlock();
587
588         my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589         memcpy(&src_in6, &tconn->my_addr, my_addr_len);
590
591         if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
592                 src_in6.sin6_port = 0;
593         else
594                 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
596         peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597         memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
598
599         what = "sock_create_kern";
600         err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601                                SOCK_STREAM, IPPROTO_TCP, &sock);
602         if (err < 0) {
603                 sock = NULL;
604                 goto out;
605         }
606
607         sock->sk->sk_rcvtimeo =
608         sock->sk->sk_sndtimeo = connect_int * HZ;
609         drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
610
611        /* explicitly bind to the configured IP as source IP
612         *  for the outgoing connections.
613         *  This is needed for multihomed hosts and to be
614         *  able to use lo: interfaces for drbd.
615         * Make sure to use 0 as port number, so linux selects
616         *  a free one dynamically.
617         */
618         what = "bind before connect";
619         err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
620         if (err < 0)
621                 goto out;
622
623         /* connect may fail, peer not yet available.
624          * stay C_WF_CONNECTION, don't go Disconnecting! */
625         disconnect_on_error = 0;
626         what = "connect";
627         err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
628
629 out:
630         if (err < 0) {
631                 if (sock) {
632                         sock_release(sock);
633                         sock = NULL;
634                 }
635                 switch (-err) {
636                         /* timeout, busy, signal pending */
637                 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638                 case EINTR: case ERESTARTSYS:
639                         /* peer not (yet) available, network problem */
640                 case ECONNREFUSED: case ENETUNREACH:
641                 case EHOSTDOWN:    case EHOSTUNREACH:
642                         disconnect_on_error = 0;
643                         break;
644                 default:
645                         conn_err(tconn, "%s failed, err = %d\n", what, err);
646                 }
647                 if (disconnect_on_error)
648                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
649         }
650
651         return sock;
652 }
653
654 struct accept_wait_data {
655         struct drbd_tconn *tconn;
656         struct socket *s_listen;
657         struct completion door_bell;
658         void (*original_sk_state_change)(struct sock *sk);
659
660 };
661
662 static void drbd_incoming_connection(struct sock *sk)
663 {
664         struct accept_wait_data *ad = sk->sk_user_data;
665         void (*state_change)(struct sock *sk);
666
667         state_change = ad->original_sk_state_change;
668         if (sk->sk_state == TCP_ESTABLISHED)
669                 complete(&ad->door_bell);
670         state_change(sk);
671 }
672
673 static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
674 {
675         int err, sndbuf_size, rcvbuf_size, my_addr_len;
676         struct sockaddr_in6 my_addr;
677         struct socket *s_listen;
678         struct net_conf *nc;
679         const char *what;
680
681         rcu_read_lock();
682         nc = rcu_dereference(tconn->net_conf);
683         if (!nc) {
684                 rcu_read_unlock();
685                 return -EIO;
686         }
687         sndbuf_size = nc->sndbuf_size;
688         rcvbuf_size = nc->rcvbuf_size;
689         rcu_read_unlock();
690
691         my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692         memcpy(&my_addr, &tconn->my_addr, my_addr_len);
693
694         what = "sock_create_kern";
695         err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
696                                SOCK_STREAM, IPPROTO_TCP, &s_listen);
697         if (err) {
698                 s_listen = NULL;
699                 goto out;
700         }
701
702         s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
703         drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
704
705         what = "bind before listen";
706         err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
707         if (err < 0)
708                 goto out;
709
710         ad->s_listen = s_listen;
711         write_lock_bh(&s_listen->sk->sk_callback_lock);
712         ad->original_sk_state_change = s_listen->sk->sk_state_change;
713         s_listen->sk->sk_state_change = drbd_incoming_connection;
714         s_listen->sk->sk_user_data = ad;
715         write_unlock_bh(&s_listen->sk->sk_callback_lock);
716
717         what = "listen";
718         err = s_listen->ops->listen(s_listen, 5);
719         if (err < 0)
720                 goto out;
721
722         return 0;
723 out:
724         if (s_listen)
725                 sock_release(s_listen);
726         if (err < 0) {
727                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
728                         conn_err(tconn, "%s failed, err = %d\n", what, err);
729                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
730                 }
731         }
732
733         return -EIO;
734 }
735
736 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
737 {
738         write_lock_bh(&sk->sk_callback_lock);
739         sk->sk_state_change = ad->original_sk_state_change;
740         sk->sk_user_data = NULL;
741         write_unlock_bh(&sk->sk_callback_lock);
742 }
743
744 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
745 {
746         int timeo, connect_int, err = 0;
747         struct socket *s_estab = NULL;
748         struct net_conf *nc;
749
750         rcu_read_lock();
751         nc = rcu_dereference(tconn->net_conf);
752         if (!nc) {
753                 rcu_read_unlock();
754                 return NULL;
755         }
756         connect_int = nc->connect_int;
757         rcu_read_unlock();
758
759         timeo = connect_int * HZ;
760         /* 28.5% random jitter */
761         timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
762
763         err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764         if (err <= 0)
765                 return NULL;
766
767         err = kernel_accept(ad->s_listen, &s_estab, 0);
768         if (err < 0) {
769                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
770                         conn_err(tconn, "accept failed, err = %d\n", err);
771                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
772                 }
773         }
774
775         if (s_estab)
776                 unregister_state_change(s_estab->sk, ad);
777
778         return s_estab;
779 }
780
781 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
782
783 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784                              enum drbd_packet cmd)
785 {
786         if (!conn_prepare_command(tconn, sock))
787                 return -EIO;
788         return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
789 }
790
791 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
792 {
793         unsigned int header_size = drbd_header_size(tconn);
794         struct packet_info pi;
795         int err;
796
797         err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798         if (err != header_size) {
799                 if (err >= 0)
800                         err = -EIO;
801                 return err;
802         }
803         err = decode_header(tconn, tconn->data.rbuf, &pi);
804         if (err)
805                 return err;
806         return pi.cmd;
807 }
808
809 /**
810  * drbd_socket_okay() - Free the socket if its connection is not okay
811  * @sock:       pointer to the pointer to the socket.
812  */
813 static int drbd_socket_okay(struct socket **sock)
814 {
815         int rr;
816         char tb[4];
817
818         if (!*sock)
819                 return false;
820
821         rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
822
823         if (rr > 0 || rr == -EAGAIN) {
824                 return true;
825         } else {
826                 sock_release(*sock);
827                 *sock = NULL;
828                 return false;
829         }
830 }
831 /* Gets called if a connection is established, or if a new minor gets created
832    in a connection */
833 int drbd_connected(struct drbd_conf *mdev)
834 {
835         int err;
836
837         atomic_set(&mdev->packet_seq, 0);
838         mdev->peer_seq = 0;
839
840         mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841                 &mdev->tconn->cstate_mutex :
842                 &mdev->own_state_mutex;
843
844         err = drbd_send_sync_param(mdev);
845         if (!err)
846                 err = drbd_send_sizes(mdev, 0, 0);
847         if (!err)
848                 err = drbd_send_uuids(mdev);
849         if (!err)
850                 err = drbd_send_current_state(mdev);
851         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852         clear_bit(RESIZE_PENDING, &mdev->flags);
853         atomic_set(&mdev->ap_in_flight, 0);
854         mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
855         return err;
856 }
857
858 /*
859  * return values:
860  *   1 yes, we have a valid connection
861  *   0 oops, did not work out, please try again
862  *  -1 peer talks different language,
863  *     no point in trying again, please go standalone.
864  *  -2 We do not have a network config...
865  */
866 static int conn_connect(struct drbd_tconn *tconn)
867 {
868         struct drbd_socket sock, msock;
869         struct drbd_conf *mdev;
870         struct net_conf *nc;
871         int vnr, timeout, h, ok;
872         bool discard_my_data;
873         enum drbd_state_rv rv;
874         struct accept_wait_data ad = {
875                 .tconn = tconn,
876                 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877         };
878
879         clear_bit(DISCONNECT_SENT, &tconn->flags);
880         if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
881                 return -2;
882
883         mutex_init(&sock.mutex);
884         sock.sbuf = tconn->data.sbuf;
885         sock.rbuf = tconn->data.rbuf;
886         sock.socket = NULL;
887         mutex_init(&msock.mutex);
888         msock.sbuf = tconn->meta.sbuf;
889         msock.rbuf = tconn->meta.rbuf;
890         msock.socket = NULL;
891
892         /* Assume that the peer only understands protocol 80 until we know better.  */
893         tconn->agreed_pro_version = 80;
894
895         if (prepare_listen_socket(tconn, &ad))
896                 return 0;
897
898         do {
899                 struct socket *s;
900
901                 s = drbd_try_connect(tconn);
902                 if (s) {
903                         if (!sock.socket) {
904                                 sock.socket = s;
905                                 send_first_packet(tconn, &sock, P_INITIAL_DATA);
906                         } else if (!msock.socket) {
907                                 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
908                                 msock.socket = s;
909                                 send_first_packet(tconn, &msock, P_INITIAL_META);
910                         } else {
911                                 conn_err(tconn, "Logic error in conn_connect()\n");
912                                 goto out_release_sockets;
913                         }
914                 }
915
916                 if (sock.socket && msock.socket) {
917                         rcu_read_lock();
918                         nc = rcu_dereference(tconn->net_conf);
919                         timeout = nc->ping_timeo * HZ / 10;
920                         rcu_read_unlock();
921                         schedule_timeout_interruptible(timeout);
922                         ok = drbd_socket_okay(&sock.socket);
923                         ok = drbd_socket_okay(&msock.socket) && ok;
924                         if (ok)
925                                 break;
926                 }
927
928 retry:
929                 s = drbd_wait_for_connect(tconn, &ad);
930                 if (s) {
931                         int fp = receive_first_packet(tconn, s);
932                         drbd_socket_okay(&sock.socket);
933                         drbd_socket_okay(&msock.socket);
934                         switch (fp) {
935                         case P_INITIAL_DATA:
936                                 if (sock.socket) {
937                                         conn_warn(tconn, "initial packet S crossed\n");
938                                         sock_release(sock.socket);
939                                         sock.socket = s;
940                                         goto randomize;
941                                 }
942                                 sock.socket = s;
943                                 break;
944                         case P_INITIAL_META:
945                                 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
946                                 if (msock.socket) {
947                                         conn_warn(tconn, "initial packet M crossed\n");
948                                         sock_release(msock.socket);
949                                         msock.socket = s;
950                                         goto randomize;
951                                 }
952                                 msock.socket = s;
953                                 break;
954                         default:
955                                 conn_warn(tconn, "Error receiving initial packet\n");
956                                 sock_release(s);
957 randomize:
958                                 if (prandom_u32() & 1)
959                                         goto retry;
960                         }
961                 }
962
963                 if (tconn->cstate <= C_DISCONNECTING)
964                         goto out_release_sockets;
965                 if (signal_pending(current)) {
966                         flush_signals(current);
967                         smp_rmb();
968                         if (get_t_state(&tconn->receiver) == EXITING)
969                                 goto out_release_sockets;
970                 }
971
972                 ok = drbd_socket_okay(&sock.socket);
973                 ok = drbd_socket_okay(&msock.socket) && ok;
974         } while (!ok);
975
976         if (ad.s_listen)
977                 sock_release(ad.s_listen);
978
979         sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980         msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
981
982         sock.socket->sk->sk_allocation = GFP_NOIO;
983         msock.socket->sk->sk_allocation = GFP_NOIO;
984
985         sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986         msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
987
988         /* NOT YET ...
989          * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990          * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
991          * first set it to the P_CONNECTION_FEATURES timeout,
992          * which we set to 4x the configured ping_timeout. */
993         rcu_read_lock();
994         nc = rcu_dereference(tconn->net_conf);
995
996         sock.socket->sk->sk_sndtimeo =
997         sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
998
999         msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1000         timeout = nc->timeout * HZ / 10;
1001         discard_my_data = nc->discard_my_data;
1002         rcu_read_unlock();
1003
1004         msock.socket->sk->sk_sndtimeo = timeout;
1005
1006         /* we don't want delays.
1007          * we use TCP_CORK where appropriate, though */
1008         drbd_tcp_nodelay(sock.socket);
1009         drbd_tcp_nodelay(msock.socket);
1010
1011         tconn->data.socket = sock.socket;
1012         tconn->meta.socket = msock.socket;
1013         tconn->last_received = jiffies;
1014
1015         h = drbd_do_features(tconn);
1016         if (h <= 0)
1017                 return h;
1018
1019         if (tconn->cram_hmac_tfm) {
1020                 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
1021                 switch (drbd_do_auth(tconn)) {
1022                 case -1:
1023                         conn_err(tconn, "Authentication of peer failed\n");
1024                         return -1;
1025                 case 0:
1026                         conn_err(tconn, "Authentication of peer failed, trying again.\n");
1027                         return 0;
1028                 }
1029         }
1030
1031         tconn->data.socket->sk->sk_sndtimeo = timeout;
1032         tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1033
1034         if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1035                 return -1;
1036
1037         set_bit(STATE_SENT, &tconn->flags);
1038
1039         rcu_read_lock();
1040         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041                 kref_get(&mdev->kref);
1042                 rcu_read_unlock();
1043
1044                 /* Prevent a race between resync-handshake and
1045                  * being promoted to Primary.
1046                  *
1047                  * Grab and release the state mutex, so we know that any current
1048                  * drbd_set_role() is finished, and any incoming drbd_set_role
1049                  * will see the STATE_SENT flag, and wait for it to be cleared.
1050                  */
1051                 mutex_lock(mdev->state_mutex);
1052                 mutex_unlock(mdev->state_mutex);
1053
1054                 if (discard_my_data)
1055                         set_bit(DISCARD_MY_DATA, &mdev->flags);
1056                 else
1057                         clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058
1059                 drbd_connected(mdev);
1060                 kref_put(&mdev->kref, &drbd_minor_destroy);
1061                 rcu_read_lock();
1062         }
1063         rcu_read_unlock();
1064
1065         rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1066         if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
1067                 clear_bit(STATE_SENT, &tconn->flags);
1068                 return 0;
1069         }
1070
1071         drbd_thread_start(&tconn->asender);
1072
1073         mutex_lock(&tconn->conf_update);
1074         /* The discard_my_data flag is a single-shot modifier to the next
1075          * connection attempt, the handshake of which is now well underway.
1076          * No need for rcu style copying of the whole struct
1077          * just to clear a single value. */
1078         tconn->net_conf->discard_my_data = 0;
1079         mutex_unlock(&tconn->conf_update);
1080
1081         return h;
1082
1083 out_release_sockets:
1084         if (ad.s_listen)
1085                 sock_release(ad.s_listen);
1086         if (sock.socket)
1087                 sock_release(sock.socket);
1088         if (msock.socket)
1089                 sock_release(msock.socket);
1090         return -1;
1091 }
1092
1093 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1094 {
1095         unsigned int header_size = drbd_header_size(tconn);
1096
1097         if (header_size == sizeof(struct p_header100) &&
1098             *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099                 struct p_header100 *h = header;
1100                 if (h->pad != 0) {
1101                         conn_err(tconn, "Header padding is not zero\n");
1102                         return -EINVAL;
1103                 }
1104                 pi->vnr = be16_to_cpu(h->volume);
1105                 pi->cmd = be16_to_cpu(h->command);
1106                 pi->size = be32_to_cpu(h->length);
1107         } else if (header_size == sizeof(struct p_header95) &&
1108                    *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1109                 struct p_header95 *h = header;
1110                 pi->cmd = be16_to_cpu(h->command);
1111                 pi->size = be32_to_cpu(h->length);
1112                 pi->vnr = 0;
1113         } else if (header_size == sizeof(struct p_header80) &&
1114                    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115                 struct p_header80 *h = header;
1116                 pi->cmd = be16_to_cpu(h->command);
1117                 pi->size = be16_to_cpu(h->length);
1118                 pi->vnr = 0;
1119         } else {
1120                 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121                          be32_to_cpu(*(__be32 *)header),
1122                          tconn->agreed_pro_version);
1123                 return -EINVAL;
1124         }
1125         pi->data = header + header_size;
1126         return 0;
1127 }
1128
1129 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1130 {
1131         void *buffer = tconn->data.rbuf;
1132         int err;
1133
1134         err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1135         if (err)
1136                 return err;
1137
1138         err = decode_header(tconn, buffer, pi);
1139         tconn->last_received = jiffies;
1140
1141         return err;
1142 }
1143
1144 static void drbd_flush(struct drbd_tconn *tconn)
1145 {
1146         int rv;
1147         struct drbd_conf *mdev;
1148         int vnr;
1149
1150         if (tconn->write_ordering >= WO_bdev_flush) {
1151                 rcu_read_lock();
1152                 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1153                         if (!get_ldev(mdev))
1154                                 continue;
1155                         kref_get(&mdev->kref);
1156                         rcu_read_unlock();
1157
1158                         rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159                                         GFP_NOIO, NULL);
1160                         if (rv) {
1161                                 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162                                 /* would rather check on EOPNOTSUPP, but that is not reliable.
1163                                  * don't try again for ANY return value != 0
1164                                  * if (rv == -EOPNOTSUPP) */
1165                                 drbd_bump_write_ordering(tconn, WO_drain_io);
1166                         }
1167                         put_ldev(mdev);
1168                         kref_put(&mdev->kref, &drbd_minor_destroy);
1169
1170                         rcu_read_lock();
1171                         if (rv)
1172                                 break;
1173                 }
1174                 rcu_read_unlock();
1175         }
1176 }
1177
1178 /**
1179  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180  * @mdev:       DRBD device.
1181  * @epoch:      Epoch object.
1182  * @ev:         Epoch event.
1183  */
1184 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1185                                                struct drbd_epoch *epoch,
1186                                                enum epoch_event ev)
1187 {
1188         int epoch_size;
1189         struct drbd_epoch *next_epoch;
1190         enum finish_epoch rv = FE_STILL_LIVE;
1191
1192         spin_lock(&tconn->epoch_lock);
1193         do {
1194                 next_epoch = NULL;
1195
1196                 epoch_size = atomic_read(&epoch->epoch_size);
1197
1198                 switch (ev & ~EV_CLEANUP) {
1199                 case EV_PUT:
1200                         atomic_dec(&epoch->active);
1201                         break;
1202                 case EV_GOT_BARRIER_NR:
1203                         set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1204                         break;
1205                 case EV_BECAME_LAST:
1206                         /* nothing to do*/
1207                         break;
1208                 }
1209
1210                 if (epoch_size != 0 &&
1211                     atomic_read(&epoch->active) == 0 &&
1212                     (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1213                         if (!(ev & EV_CLEANUP)) {
1214                                 spin_unlock(&tconn->epoch_lock);
1215                                 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
1216                                 spin_lock(&tconn->epoch_lock);
1217                         }
1218 #if 0
1219                         /* FIXME: dec unacked on connection, once we have
1220                          * something to count pending connection packets in. */
1221                         if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1222                                 dec_unacked(epoch->tconn);
1223 #endif
1224
1225                         if (tconn->current_epoch != epoch) {
1226                                 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227                                 list_del(&epoch->list);
1228                                 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1229                                 tconn->epochs--;
1230                                 kfree(epoch);
1231
1232                                 if (rv == FE_STILL_LIVE)
1233                                         rv = FE_DESTROYED;
1234                         } else {
1235                                 epoch->flags = 0;
1236                                 atomic_set(&epoch->epoch_size, 0);
1237                                 /* atomic_set(&epoch->active, 0); is already zero */
1238                                 if (rv == FE_STILL_LIVE)
1239                                         rv = FE_RECYCLED;
1240                         }
1241                 }
1242
1243                 if (!next_epoch)
1244                         break;
1245
1246                 epoch = next_epoch;
1247         } while (1);
1248
1249         spin_unlock(&tconn->epoch_lock);
1250
1251         return rv;
1252 }
1253
1254 /**
1255  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1256  * @tconn:      DRBD connection.
1257  * @wo:         Write ordering method to try.
1258  */
1259 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
1260 {
1261         struct disk_conf *dc;
1262         struct drbd_conf *mdev;
1263         enum write_ordering_e pwo;
1264         int vnr;
1265         static char *write_ordering_str[] = {
1266                 [WO_none] = "none",
1267                 [WO_drain_io] = "drain",
1268                 [WO_bdev_flush] = "flush",
1269         };
1270
1271         pwo = tconn->write_ordering;
1272         wo = min(pwo, wo);
1273         rcu_read_lock();
1274         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1275                 if (!get_ldev_if_state(mdev, D_ATTACHING))
1276                         continue;
1277                 dc = rcu_dereference(mdev->ldev->disk_conf);
1278
1279                 if (wo == WO_bdev_flush && !dc->disk_flushes)
1280                         wo = WO_drain_io;
1281                 if (wo == WO_drain_io && !dc->disk_drain)
1282                         wo = WO_none;
1283                 put_ldev(mdev);
1284         }
1285         rcu_read_unlock();
1286         tconn->write_ordering = wo;
1287         if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288                 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
1289 }
1290
1291 /**
1292  * drbd_submit_peer_request()
1293  * @mdev:       DRBD device.
1294  * @peer_req:   peer request
1295  * @rw:         flag field, see bio->bi_rw
1296  *
1297  * May spread the pages to multiple bios,
1298  * depending on bio_add_page restrictions.
1299  *
1300  * Returns 0 if all bios have been submitted,
1301  * -ENOMEM if we could not allocate enough bios,
1302  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303  *  single page to an empty bio (which should never happen and likely indicates
1304  *  that the lower level IO stack is in some way broken). This has been observed
1305  *  on certain Xen deployments.
1306  */
1307 /* TODO allocate from our own bio_set. */
1308 int drbd_submit_peer_request(struct drbd_conf *mdev,
1309                              struct drbd_peer_request *peer_req,
1310                              const unsigned rw, const int fault_type)
1311 {
1312         struct bio *bios = NULL;
1313         struct bio *bio;
1314         struct page *page = peer_req->pages;
1315         sector_t sector = peer_req->i.sector;
1316         unsigned ds = peer_req->i.size;
1317         unsigned n_bios = 0;
1318         unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1319         int err = -ENOMEM;
1320
1321         /* In most cases, we will only need one bio.  But in case the lower
1322          * level restrictions happen to be different at this offset on this
1323          * side than those of the sending peer, we may need to submit the
1324          * request in more than one bio.
1325          *
1326          * Plain bio_alloc is good enough here, this is no DRBD internally
1327          * generated bio, but a bio allocated on behalf of the peer.
1328          */
1329 next_bio:
1330         bio = bio_alloc(GFP_NOIO, nr_pages);
1331         if (!bio) {
1332                 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333                 goto fail;
1334         }
1335         /* > peer_req->i.sector, unless this is the first bio */
1336         bio->bi_iter.bi_sector = sector;
1337         bio->bi_bdev = mdev->ldev->backing_bdev;
1338         bio->bi_rw = rw;
1339         bio->bi_private = peer_req;
1340         bio->bi_end_io = drbd_peer_request_endio;
1341
1342         bio->bi_next = bios;
1343         bios = bio;
1344         ++n_bios;
1345
1346         page_chain_for_each(page) {
1347                 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348                 if (!bio_add_page(bio, page, len, 0)) {
1349                         /* A single page must always be possible!
1350                          * But in case it fails anyways,
1351                          * we deal with it, and complain (below). */
1352                         if (bio->bi_vcnt == 0) {
1353                                 dev_err(DEV,
1354                                         "bio_add_page failed for len=%u, "
1355                                         "bi_vcnt=0 (bi_sector=%llu)\n",
1356                                         len, (uint64_t)bio->bi_iter.bi_sector);
1357                                 err = -ENOSPC;
1358                                 goto fail;
1359                         }
1360                         goto next_bio;
1361                 }
1362                 ds -= len;
1363                 sector += len >> 9;
1364                 --nr_pages;
1365         }
1366         D_ASSERT(page == NULL);
1367         D_ASSERT(ds == 0);
1368
1369         atomic_set(&peer_req->pending_bios, n_bios);
1370         do {
1371                 bio = bios;
1372                 bios = bios->bi_next;
1373                 bio->bi_next = NULL;
1374
1375                 drbd_generic_make_request(mdev, fault_type, bio);
1376         } while (bios);
1377         return 0;
1378
1379 fail:
1380         while (bios) {
1381                 bio = bios;
1382                 bios = bios->bi_next;
1383                 bio_put(bio);
1384         }
1385         return err;
1386 }
1387
1388 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1389                                              struct drbd_peer_request *peer_req)
1390 {
1391         struct drbd_interval *i = &peer_req->i;
1392
1393         drbd_remove_interval(&mdev->write_requests, i);
1394         drbd_clear_interval(i);
1395
1396         /* Wake up any processes waiting for this peer request to complete.  */
1397         if (i->waiting)
1398                 wake_up(&mdev->misc_wait);
1399 }
1400
1401 void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402 {
1403         struct drbd_conf *mdev;
1404         int vnr;
1405
1406         rcu_read_lock();
1407         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408                 kref_get(&mdev->kref);
1409                 rcu_read_unlock();
1410                 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411                 kref_put(&mdev->kref, &drbd_minor_destroy);
1412                 rcu_read_lock();
1413         }
1414         rcu_read_unlock();
1415 }
1416
1417 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1418 {
1419         int rv;
1420         struct p_barrier *p = pi->data;
1421         struct drbd_epoch *epoch;
1422
1423         /* FIXME these are unacked on connection,
1424          * not a specific (peer)device.
1425          */
1426         tconn->current_epoch->barrier_nr = p->barrier;
1427         tconn->current_epoch->tconn = tconn;
1428         rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
1429
1430         /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431          * the activity log, which means it would not be resynced in case the
1432          * R_PRIMARY crashes now.
1433          * Therefore we must send the barrier_ack after the barrier request was
1434          * completed. */
1435         switch (tconn->write_ordering) {
1436         case WO_none:
1437                 if (rv == FE_RECYCLED)
1438                         return 0;
1439
1440                 /* receiver context, in the writeout path of the other node.
1441                  * avoid potential distributed deadlock */
1442                 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443                 if (epoch)
1444                         break;
1445                 else
1446                         conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
1447                         /* Fall through */
1448
1449         case WO_bdev_flush:
1450         case WO_drain_io:
1451                 conn_wait_active_ee_empty(tconn);
1452                 drbd_flush(tconn);
1453
1454                 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1455                         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456                         if (epoch)
1457                                 break;
1458                 }
1459
1460                 return 0;
1461         default:
1462                 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
1463                 return -EIO;
1464         }
1465
1466         epoch->flags = 0;
1467         atomic_set(&epoch->epoch_size, 0);
1468         atomic_set(&epoch->active, 0);
1469
1470         spin_lock(&tconn->epoch_lock);
1471         if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472                 list_add(&epoch->list, &tconn->current_epoch->list);
1473                 tconn->current_epoch = epoch;
1474                 tconn->epochs++;
1475         } else {
1476                 /* The current_epoch got recycled while we allocated this one... */
1477                 kfree(epoch);
1478         }
1479         spin_unlock(&tconn->epoch_lock);
1480
1481         return 0;
1482 }
1483
1484 /* used from receive_RSDataReply (recv_resync_read)
1485  * and from receive_Data */
1486 static struct drbd_peer_request *
1487 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488               int data_size) __must_hold(local)
1489 {
1490         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1491         struct drbd_peer_request *peer_req;
1492         struct page *page;
1493         int dgs, ds, err;
1494         void *dig_in = mdev->tconn->int_dig_in;
1495         void *dig_vv = mdev->tconn->int_dig_vv;
1496         unsigned long *data;
1497
1498         dgs = 0;
1499         if (mdev->tconn->peer_integrity_tfm) {
1500                 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1501                 /*
1502                  * FIXME: Receive the incoming digest into the receive buffer
1503                  *        here, together with its struct p_data?
1504                  */
1505                 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506                 if (err)
1507                         return NULL;
1508                 data_size -= dgs;
1509         }
1510
1511         if (!expect(IS_ALIGNED(data_size, 512)))
1512                 return NULL;
1513         if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514                 return NULL;
1515
1516         /* even though we trust out peer,
1517          * we sometimes have to double check. */
1518         if (sector + (data_size>>9) > capacity) {
1519                 dev_err(DEV, "request from peer beyond end of local disk: "
1520                         "capacity: %llus < sector: %llus + size: %u\n",
1521                         (unsigned long long)capacity,
1522                         (unsigned long long)sector, data_size);
1523                 return NULL;
1524         }
1525
1526         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527          * "criss-cross" setup, that might cause write-out on some other DRBD,
1528          * which in turn might block on the other node at this very place.  */
1529         peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1530         if (!peer_req)
1531                 return NULL;
1532
1533         if (!data_size)
1534                 return peer_req;
1535
1536         ds = data_size;
1537         page = peer_req->pages;
1538         page_chain_for_each(page) {
1539                 unsigned len = min_t(int, ds, PAGE_SIZE);
1540                 data = kmap(page);
1541                 err = drbd_recv_all_warn(mdev->tconn, data, len);
1542                 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1543                         dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544                         data[0] = data[0] ^ (unsigned long)-1;
1545                 }
1546                 kunmap(page);
1547                 if (err) {
1548                         drbd_free_peer_req(mdev, peer_req);
1549                         return NULL;
1550                 }
1551                 ds -= len;
1552         }
1553
1554         if (dgs) {
1555                 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1556                 if (memcmp(dig_in, dig_vv, dgs)) {
1557                         dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558                                 (unsigned long long)sector, data_size);
1559                         drbd_free_peer_req(mdev, peer_req);
1560                         return NULL;
1561                 }
1562         }
1563         mdev->recv_cnt += data_size>>9;
1564         return peer_req;
1565 }
1566
1567 /* drbd_drain_block() just takes a data block
1568  * out of the socket input buffer, and discards it.
1569  */
1570 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571 {
1572         struct page *page;
1573         int err = 0;
1574         void *data;
1575
1576         if (!data_size)
1577                 return 0;
1578
1579         page = drbd_alloc_pages(mdev, 1, 1);
1580
1581         data = kmap(page);
1582         while (data_size) {
1583                 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584
1585                 err = drbd_recv_all_warn(mdev->tconn, data, len);
1586                 if (err)
1587                         break;
1588                 data_size -= len;
1589         }
1590         kunmap(page);
1591         drbd_free_pages(mdev, page, 0);
1592         return err;
1593 }
1594
1595 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596                            sector_t sector, int data_size)
1597 {
1598         struct bio_vec *bvec;
1599         struct bio *bio;
1600         int dgs, err, i, expect;
1601         void *dig_in = mdev->tconn->int_dig_in;
1602         void *dig_vv = mdev->tconn->int_dig_vv;
1603
1604         dgs = 0;
1605         if (mdev->tconn->peer_integrity_tfm) {
1606                 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1607                 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1608                 if (err)
1609                         return err;
1610                 data_size -= dgs;
1611         }
1612
1613         /* optimistically update recv_cnt.  if receiving fails below,
1614          * we disconnect anyways, and counters will be reset. */
1615         mdev->recv_cnt += data_size>>9;
1616
1617         bio = req->master_bio;
1618         D_ASSERT(sector == bio->bi_iter.bi_sector);
1619
1620         bio_for_each_segment(bvec, bio, i) {
1621                 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1622                 expect = min_t(int, data_size, bvec->bv_len);
1623                 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1624                 kunmap(bvec->bv_page);
1625                 if (err)
1626                         return err;
1627                 data_size -= expect;
1628         }
1629
1630         if (dgs) {
1631                 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1632                 if (memcmp(dig_in, dig_vv, dgs)) {
1633                         dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1634                         return -EINVAL;
1635                 }
1636         }
1637
1638         D_ASSERT(data_size == 0);
1639         return 0;
1640 }
1641
1642 /*
1643  * e_end_resync_block() is called in asender context via
1644  * drbd_finish_peer_reqs().
1645  */
1646 static int e_end_resync_block(struct drbd_work *w, int unused)
1647 {
1648         struct drbd_peer_request *peer_req =
1649                 container_of(w, struct drbd_peer_request, w);
1650         struct drbd_conf *mdev = w->mdev;
1651         sector_t sector = peer_req->i.sector;
1652         int err;
1653
1654         D_ASSERT(drbd_interval_empty(&peer_req->i));
1655
1656         if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1657                 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1658                 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1659         } else {
1660                 /* Record failure to sync */
1661                 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1662
1663                 err  = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1664         }
1665         dec_unacked(mdev);
1666
1667         return err;
1668 }
1669
1670 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1671 {
1672         struct drbd_peer_request *peer_req;
1673
1674         peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1675         if (!peer_req)
1676                 goto fail;
1677
1678         dec_rs_pending(mdev);
1679
1680         inc_unacked(mdev);
1681         /* corresponding dec_unacked() in e_end_resync_block()
1682          * respective _drbd_clear_done_ee */
1683
1684         peer_req->w.cb = e_end_resync_block;
1685
1686         spin_lock_irq(&mdev->tconn->req_lock);
1687         list_add(&peer_req->w.list, &mdev->sync_ee);
1688         spin_unlock_irq(&mdev->tconn->req_lock);
1689
1690         atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1691         if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1692                 return 0;
1693
1694         /* don't care for the reason here */
1695         dev_err(DEV, "submit failed, triggering re-connect\n");
1696         spin_lock_irq(&mdev->tconn->req_lock);
1697         list_del(&peer_req->w.list);
1698         spin_unlock_irq(&mdev->tconn->req_lock);
1699
1700         drbd_free_peer_req(mdev, peer_req);
1701 fail:
1702         put_ldev(mdev);
1703         return -EIO;
1704 }
1705
1706 static struct drbd_request *
1707 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1708              sector_t sector, bool missing_ok, const char *func)
1709 {
1710         struct drbd_request *req;
1711
1712         /* Request object according to our peer */
1713         req = (struct drbd_request *)(unsigned long)id;
1714         if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1715                 return req;
1716         if (!missing_ok) {
1717                 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1718                         (unsigned long)id, (unsigned long long)sector);
1719         }
1720         return NULL;
1721 }
1722
1723 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1724 {
1725         struct drbd_conf *mdev;
1726         struct drbd_request *req;
1727         sector_t sector;
1728         int err;
1729         struct p_data *p = pi->data;
1730
1731         mdev = vnr_to_mdev(tconn, pi->vnr);
1732         if (!mdev)
1733                 return -EIO;
1734
1735         sector = be64_to_cpu(p->sector);
1736
1737         spin_lock_irq(&mdev->tconn->req_lock);
1738         req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1739         spin_unlock_irq(&mdev->tconn->req_lock);
1740         if (unlikely(!req))
1741                 return -EIO;
1742
1743         /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1744          * special casing it there for the various failure cases.
1745          * still no race with drbd_fail_pending_reads */
1746         err = recv_dless_read(mdev, req, sector, pi->size);
1747         if (!err)
1748                 req_mod(req, DATA_RECEIVED);
1749         /* else: nothing. handled from drbd_disconnect...
1750          * I don't think we may complete this just yet
1751          * in case we are "on-disconnect: freeze" */
1752
1753         return err;
1754 }
1755
1756 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1757 {
1758         struct drbd_conf *mdev;
1759         sector_t sector;
1760         int err;
1761         struct p_data *p = pi->data;
1762
1763         mdev = vnr_to_mdev(tconn, pi->vnr);
1764         if (!mdev)
1765                 return -EIO;
1766
1767         sector = be64_to_cpu(p->sector);
1768         D_ASSERT(p->block_id == ID_SYNCER);
1769
1770         if (get_ldev(mdev)) {
1771                 /* data is submitted to disk within recv_resync_read.
1772                  * corresponding put_ldev done below on error,
1773                  * or in drbd_peer_request_endio. */
1774                 err = recv_resync_read(mdev, sector, pi->size);
1775         } else {
1776                 if (__ratelimit(&drbd_ratelimit_state))
1777                         dev_err(DEV, "Can not write resync data to local disk.\n");
1778
1779                 err = drbd_drain_block(mdev, pi->size);
1780
1781                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1782         }
1783
1784         atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1785
1786         return err;
1787 }
1788
1789 static void restart_conflicting_writes(struct drbd_conf *mdev,
1790                                        sector_t sector, int size)
1791 {
1792         struct drbd_interval *i;
1793         struct drbd_request *req;
1794
1795         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1796                 if (!i->local)
1797                         continue;
1798                 req = container_of(i, struct drbd_request, i);
1799                 if (req->rq_state & RQ_LOCAL_PENDING ||
1800                     !(req->rq_state & RQ_POSTPONED))
1801                         continue;
1802                 /* as it is RQ_POSTPONED, this will cause it to
1803                  * be queued on the retry workqueue. */
1804                 __req_mod(req, CONFLICT_RESOLVED, NULL);
1805         }
1806 }
1807
1808 /*
1809  * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1810  */
1811 static int e_end_block(struct drbd_work *w, int cancel)
1812 {
1813         struct drbd_peer_request *peer_req =
1814                 container_of(w, struct drbd_peer_request, w);
1815         struct drbd_conf *mdev = w->mdev;
1816         sector_t sector = peer_req->i.sector;
1817         int err = 0, pcmd;
1818
1819         if (peer_req->flags & EE_SEND_WRITE_ACK) {
1820                 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1821                         pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1822                                 mdev->state.conn <= C_PAUSED_SYNC_T &&
1823                                 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1824                                 P_RS_WRITE_ACK : P_WRITE_ACK;
1825                         err = drbd_send_ack(mdev, pcmd, peer_req);
1826                         if (pcmd == P_RS_WRITE_ACK)
1827                                 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1828                 } else {
1829                         err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1830                         /* we expect it to be marked out of sync anyways...
1831                          * maybe assert this?  */
1832                 }
1833                 dec_unacked(mdev);
1834         }
1835         /* we delete from the conflict detection hash _after_ we sent out the
1836          * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1837         if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1838                 spin_lock_irq(&mdev->tconn->req_lock);
1839                 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1840                 drbd_remove_epoch_entry_interval(mdev, peer_req);
1841                 if (peer_req->flags & EE_RESTART_REQUESTS)
1842                         restart_conflicting_writes(mdev, sector, peer_req->i.size);
1843                 spin_unlock_irq(&mdev->tconn->req_lock);
1844         } else
1845                 D_ASSERT(drbd_interval_empty(&peer_req->i));
1846
1847         drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1848
1849         return err;
1850 }
1851
1852 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1853 {
1854         struct drbd_conf *mdev = w->mdev;
1855         struct drbd_peer_request *peer_req =
1856                 container_of(w, struct drbd_peer_request, w);
1857         int err;
1858
1859         err = drbd_send_ack(mdev, ack, peer_req);
1860         dec_unacked(mdev);
1861
1862         return err;
1863 }
1864
1865 static int e_send_superseded(struct drbd_work *w, int unused)
1866 {
1867         return e_send_ack(w, P_SUPERSEDED);
1868 }
1869
1870 static int e_send_retry_write(struct drbd_work *w, int unused)
1871 {
1872         struct drbd_tconn *tconn = w->mdev->tconn;
1873
1874         return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1875                              P_RETRY_WRITE : P_SUPERSEDED);
1876 }
1877
1878 static bool seq_greater(u32 a, u32 b)
1879 {
1880         /*
1881          * We assume 32-bit wrap-around here.
1882          * For 24-bit wrap-around, we would have to shift:
1883          *  a <<= 8; b <<= 8;
1884          */
1885         return (s32)a - (s32)b > 0;
1886 }
1887
1888 static u32 seq_max(u32 a, u32 b)
1889 {
1890         return seq_greater(a, b) ? a : b;
1891 }
1892
1893 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1894 {
1895         unsigned int newest_peer_seq;
1896
1897         if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
1898                 spin_lock(&mdev->peer_seq_lock);
1899                 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1900                 mdev->peer_seq = newest_peer_seq;
1901                 spin_unlock(&mdev->peer_seq_lock);
1902                 /* wake up only if we actually changed mdev->peer_seq */
1903                 if (peer_seq == newest_peer_seq)
1904                         wake_up(&mdev->seq_wait);
1905         }
1906 }
1907
1908 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1909 {
1910         return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1911 }
1912
1913 /* maybe change sync_ee into interval trees as well? */
1914 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
1915 {
1916         struct drbd_peer_request *rs_req;
1917         bool rv = 0;
1918
1919         spin_lock_irq(&mdev->tconn->req_lock);
1920         list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1921                 if (overlaps(peer_req->i.sector, peer_req->i.size,
1922                              rs_req->i.sector, rs_req->i.size)) {
1923                         rv = 1;
1924                         break;
1925                 }
1926         }
1927         spin_unlock_irq(&mdev->tconn->req_lock);
1928
1929         return rv;
1930 }
1931
1932 /* Called from receive_Data.
1933  * Synchronize packets on sock with packets on msock.
1934  *
1935  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1936  * packet traveling on msock, they are still processed in the order they have
1937  * been sent.
1938  *
1939  * Note: we don't care for Ack packets overtaking P_DATA packets.
1940  *
1941  * In case packet_seq is larger than mdev->peer_seq number, there are
1942  * outstanding packets on the msock. We wait for them to arrive.
1943  * In case we are the logically next packet, we update mdev->peer_seq
1944  * ourselves. Correctly handles 32bit wrap around.
1945  *
1946  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1947  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1948  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1949  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1950  *
1951  * returns 0 if we may process the packet,
1952  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1953 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1954 {
1955         DEFINE_WAIT(wait);
1956         long timeout;
1957         int ret = 0, tp;
1958
1959         if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
1960                 return 0;
1961
1962         spin_lock(&mdev->peer_seq_lock);
1963         for (;;) {
1964                 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1965                         mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1966                         break;
1967                 }
1968
1969                 if (signal_pending(current)) {
1970                         ret = -ERESTARTSYS;
1971                         break;
1972                 }
1973
1974                 rcu_read_lock();
1975                 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1976                 rcu_read_unlock();
1977
1978                 if (!tp)
1979                         break;
1980
1981                 /* Only need to wait if two_primaries is enabled */
1982                 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1983                 spin_unlock(&mdev->peer_seq_lock);
1984                 rcu_read_lock();
1985                 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1986                 rcu_read_unlock();
1987                 timeout = schedule_timeout(timeout);
1988                 spin_lock(&mdev->peer_seq_lock);
1989                 if (!timeout) {
1990                         ret = -ETIMEDOUT;
1991                         dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
1992                         break;
1993                 }
1994         }
1995         spin_unlock(&mdev->peer_seq_lock);
1996         finish_wait(&mdev->seq_wait, &wait);
1997         return ret;
1998 }
1999
2000 /* see also bio_flags_to_wire()
2001  * DRBD_REQ_*, because we need to semantically map the flags to data packet
2002  * flags and back. We may replicate to other kernel versions. */
2003 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
2004 {
2005         return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2006                 (dpf & DP_FUA ? REQ_FUA : 0) |
2007                 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2008                 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
2009 }
2010
2011 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2012                                     unsigned int size)
2013 {
2014         struct drbd_interval *i;
2015
2016     repeat:
2017         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2018                 struct drbd_request *req;
2019                 struct bio_and_error m;
2020
2021                 if (!i->local)
2022                         continue;
2023                 req = container_of(i, struct drbd_request, i);
2024                 if (!(req->rq_state & RQ_POSTPONED))
2025                         continue;
2026                 req->rq_state &= ~RQ_POSTPONED;
2027                 __req_mod(req, NEG_ACKED, &m);
2028                 spin_unlock_irq(&mdev->tconn->req_lock);
2029                 if (m.bio)
2030                         complete_master_bio(mdev, &m);
2031                 spin_lock_irq(&mdev->tconn->req_lock);
2032                 goto repeat;
2033         }
2034 }
2035
2036 static int handle_write_conflicts(struct drbd_conf *mdev,
2037                                   struct drbd_peer_request *peer_req)
2038 {
2039         struct drbd_tconn *tconn = mdev->tconn;
2040         bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
2041         sector_t sector = peer_req->i.sector;
2042         const unsigned int size = peer_req->i.size;
2043         struct drbd_interval *i;
2044         bool equal;
2045         int err;
2046
2047         /*
2048          * Inserting the peer request into the write_requests tree will prevent
2049          * new conflicting local requests from being added.
2050          */
2051         drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2052
2053     repeat:
2054         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2055                 if (i == &peer_req->i)
2056                         continue;
2057
2058                 if (!i->local) {
2059                         /*
2060                          * Our peer has sent a conflicting remote request; this
2061                          * should not happen in a two-node setup.  Wait for the
2062                          * earlier peer request to complete.
2063                          */
2064                         err = drbd_wait_misc(mdev, i);
2065                         if (err)
2066                                 goto out;
2067                         goto repeat;
2068                 }
2069
2070                 equal = i->sector == sector && i->size == size;
2071                 if (resolve_conflicts) {
2072                         /*
2073                          * If the peer request is fully contained within the
2074                          * overlapping request, it can be considered overwritten
2075                          * and thus superseded; otherwise, it will be retried
2076                          * once all overlapping requests have completed.
2077                          */
2078                         bool superseded = i->sector <= sector && i->sector +
2079                                        (i->size >> 9) >= sector + (size >> 9);
2080
2081                         if (!equal)
2082                                 dev_alert(DEV, "Concurrent writes detected: "
2083                                                "local=%llus +%u, remote=%llus +%u, "
2084                                                "assuming %s came first\n",
2085                                           (unsigned long long)i->sector, i->size,
2086                                           (unsigned long long)sector, size,
2087                                           superseded ? "local" : "remote");
2088
2089                         inc_unacked(mdev);
2090                         peer_req->w.cb = superseded ? e_send_superseded :
2091                                                    e_send_retry_write;
2092                         list_add_tail(&peer_req->w.list, &mdev->done_ee);
2093                         wake_asender(mdev->tconn);
2094
2095                         err = -ENOENT;
2096                         goto out;
2097                 } else {
2098                         struct drbd_request *req =
2099                                 container_of(i, struct drbd_request, i);
2100
2101                         if (!equal)
2102                                 dev_alert(DEV, "Concurrent writes detected: "
2103                                                "local=%llus +%u, remote=%llus +%u\n",
2104                                           (unsigned long long)i->sector, i->size,
2105                                           (unsigned long long)sector, size);
2106
2107                         if (req->rq_state & RQ_LOCAL_PENDING ||
2108                             !(req->rq_state & RQ_POSTPONED)) {
2109                                 /*
2110                                  * Wait for the node with the discard flag to
2111                                  * decide if this request has been superseded
2112                                  * or needs to be retried.
2113                                  * Requests that have been superseded will
2114                                  * disappear from the write_requests tree.
2115                                  *
2116                                  * In addition, wait for the conflicting
2117                                  * request to finish locally before submitting
2118                                  * the conflicting peer request.
2119                                  */
2120                                 err = drbd_wait_misc(mdev, &req->i);
2121                                 if (err) {
2122                                         _conn_request_state(mdev->tconn,
2123                                                             NS(conn, C_TIMEOUT),
2124                                                             CS_HARD);
2125                                         fail_postponed_requests(mdev, sector, size);
2126                                         goto out;
2127                                 }
2128                                 goto repeat;
2129                         }
2130                         /*
2131                          * Remember to restart the conflicting requests after
2132                          * the new peer request has completed.
2133                          */
2134                         peer_req->flags |= EE_RESTART_REQUESTS;
2135                 }
2136         }
2137         err = 0;
2138
2139     out:
2140         if (err)
2141                 drbd_remove_epoch_entry_interval(mdev, peer_req);
2142         return err;
2143 }
2144
2145 /* mirrored write */
2146 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2147 {
2148         struct drbd_conf *mdev;
2149         sector_t sector;
2150         struct drbd_peer_request *peer_req;
2151         struct p_data *p = pi->data;
2152         u32 peer_seq = be32_to_cpu(p->seq_num);
2153         int rw = WRITE;
2154         u32 dp_flags;
2155         int err, tp;
2156
2157         mdev = vnr_to_mdev(tconn, pi->vnr);
2158         if (!mdev)
2159                 return -EIO;
2160
2161         if (!get_ldev(mdev)) {
2162                 int err2;
2163
2164                 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2165                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2166                 atomic_inc(&tconn->current_epoch->epoch_size);
2167                 err2 = drbd_drain_block(mdev, pi->size);
2168                 if (!err)
2169                         err = err2;
2170                 return err;
2171         }
2172
2173         /*
2174          * Corresponding put_ldev done either below (on various errors), or in
2175          * drbd_peer_request_endio, if we successfully submit the data at the
2176          * end of this function.
2177          */
2178
2179         sector = be64_to_cpu(p->sector);
2180         peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2181         if (!peer_req) {
2182                 put_ldev(mdev);
2183                 return -EIO;
2184         }
2185
2186         peer_req->w.cb = e_end_block;
2187
2188         dp_flags = be32_to_cpu(p->dp_flags);
2189         rw |= wire_flags_to_bio(mdev, dp_flags);
2190         if (peer_req->pages == NULL) {
2191                 D_ASSERT(peer_req->i.size == 0);
2192                 D_ASSERT(dp_flags & DP_FLUSH);
2193         }
2194
2195         if (dp_flags & DP_MAY_SET_IN_SYNC)
2196                 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2197
2198         spin_lock(&tconn->epoch_lock);
2199         peer_req->epoch = tconn->current_epoch;
2200         atomic_inc(&peer_req->epoch->epoch_size);
2201         atomic_inc(&peer_req->epoch->active);
2202         spin_unlock(&tconn->epoch_lock);
2203
2204         rcu_read_lock();
2205         tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2206         rcu_read_unlock();
2207         if (tp) {
2208                 peer_req->flags |= EE_IN_INTERVAL_TREE;
2209                 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2210                 if (err)
2211                         goto out_interrupted;
2212                 spin_lock_irq(&mdev->tconn->req_lock);
2213                 err = handle_write_conflicts(mdev, peer_req);
2214                 if (err) {
2215                         spin_unlock_irq(&mdev->tconn->req_lock);
2216                         if (err == -ENOENT) {
2217                                 put_ldev(mdev);
2218                                 return 0;
2219                         }
2220                         goto out_interrupted;
2221                 }
2222         } else {
2223                 update_peer_seq(mdev, peer_seq);
2224                 spin_lock_irq(&mdev->tconn->req_lock);
2225         }
2226         list_add(&peer_req->w.list, &mdev->active_ee);
2227         spin_unlock_irq(&mdev->tconn->req_lock);
2228
2229         if (mdev->state.conn == C_SYNC_TARGET)
2230                 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
2231
2232         if (mdev->tconn->agreed_pro_version < 100) {
2233                 rcu_read_lock();
2234                 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2235                 case DRBD_PROT_C:
2236                         dp_flags |= DP_SEND_WRITE_ACK;
2237                         break;
2238                 case DRBD_PROT_B:
2239                         dp_flags |= DP_SEND_RECEIVE_ACK;
2240                         break;
2241                 }
2242                 rcu_read_unlock();
2243         }
2244
2245         if (dp_flags & DP_SEND_WRITE_ACK) {
2246                 peer_req->flags |= EE_SEND_WRITE_ACK;
2247                 inc_unacked(mdev);
2248                 /* corresponding dec_unacked() in e_end_block()
2249                  * respective _drbd_clear_done_ee */
2250         }
2251
2252         if (dp_flags & DP_SEND_RECEIVE_ACK) {
2253                 /* I really don't like it that the receiver thread
2254                  * sends on the msock, but anyways */
2255                 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2256         }
2257
2258         if (mdev->state.pdsk < D_INCONSISTENT) {
2259                 /* In case we have the only disk of the cluster, */
2260                 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2261                 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2262                 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2263                 drbd_al_begin_io(mdev, &peer_req->i, true);
2264         }
2265
2266         err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2267         if (!err)
2268                 return 0;
2269
2270         /* don't care for the reason here */
2271         dev_err(DEV, "submit failed, triggering re-connect\n");
2272         spin_lock_irq(&mdev->tconn->req_lock);
2273         list_del(&peer_req->w.list);
2274         drbd_remove_epoch_entry_interval(mdev, peer_req);
2275         spin_unlock_irq(&mdev->tconn->req_lock);
2276         if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2277                 drbd_al_complete_io(mdev, &peer_req->i);
2278
2279 out_interrupted:
2280         drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
2281         put_ldev(mdev);
2282         drbd_free_peer_req(mdev, peer_req);
2283         return err;
2284 }
2285
2286 /* We may throttle resync, if the lower device seems to be busy,
2287  * and current sync rate is above c_min_rate.
2288  *
2289  * To decide whether or not the lower device is busy, we use a scheme similar
2290  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2291  * (more than 64 sectors) of activity we cannot account for with our own resync
2292  * activity, it obviously is "busy".
2293  *
2294  * The current sync rate used here uses only the most recent two step marks,
2295  * to have a short time average so we can react faster.
2296  */
2297 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2298 {
2299         struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2300         unsigned long db, dt, dbdt;
2301         struct lc_element *tmp;
2302         int curr_events;
2303         int throttle = 0;
2304         unsigned int c_min_rate;
2305
2306         rcu_read_lock();
2307         c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2308         rcu_read_unlock();
2309
2310         /* feature disabled? */
2311         if (c_min_rate == 0)
2312                 return 0;
2313
2314         spin_lock_irq(&mdev->al_lock);
2315         tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2316         if (tmp) {
2317                 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2318                 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2319                         spin_unlock_irq(&mdev->al_lock);
2320                         return 0;
2321                 }
2322                 /* Do not slow down if app IO is already waiting for this extent */
2323         }
2324         spin_unlock_irq(&mdev->al_lock);
2325
2326         curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2327                       (int)part_stat_read(&disk->part0, sectors[1]) -
2328                         atomic_read(&mdev->rs_sect_ev);
2329
2330         if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2331                 unsigned long rs_left;
2332                 int i;
2333
2334                 mdev->rs_last_events = curr_events;
2335
2336                 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2337                  * approx. */
2338                 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2339
2340                 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2341                         rs_left = mdev->ov_left;
2342                 else
2343                         rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2344
2345                 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2346                 if (!dt)
2347                         dt++;
2348                 db = mdev->rs_mark_left[i] - rs_left;
2349                 dbdt = Bit2KB(db/dt);
2350
2351                 if (dbdt > c_min_rate)
2352                         throttle = 1;
2353         }
2354         return throttle;
2355 }
2356
2357
2358 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2359 {
2360         struct drbd_conf *mdev;
2361         sector_t sector;
2362         sector_t capacity;
2363         struct drbd_peer_request *peer_req;
2364         struct digest_info *di = NULL;
2365         int size, verb;
2366         unsigned int fault_type;
2367         struct p_block_req *p = pi->data;
2368
2369         mdev = vnr_to_mdev(tconn, pi->vnr);
2370         if (!mdev)
2371                 return -EIO;
2372         capacity = drbd_get_capacity(mdev->this_bdev);
2373
2374         sector = be64_to_cpu(p->sector);
2375         size   = be32_to_cpu(p->blksize);
2376
2377         if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2378                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2379                                 (unsigned long long)sector, size);
2380                 return -EINVAL;
2381         }
2382         if (sector + (size>>9) > capacity) {
2383                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2384                                 (unsigned long long)sector, size);
2385                 return -EINVAL;
2386         }
2387
2388         if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2389                 verb = 1;
2390                 switch (pi->cmd) {
2391                 case P_DATA_REQUEST:
2392                         drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2393                         break;
2394                 case P_RS_DATA_REQUEST:
2395                 case P_CSUM_RS_REQUEST:
2396                 case P_OV_REQUEST:
2397                         drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2398                         break;
2399                 case P_OV_REPLY:
2400                         verb = 0;
2401                         dec_rs_pending(mdev);
2402                         drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2403                         break;
2404                 default:
2405                         BUG();
2406                 }
2407                 if (verb && __ratelimit(&drbd_ratelimit_state))
2408                         dev_err(DEV, "Can not satisfy peer's read request, "
2409                             "no local data.\n");
2410
2411                 /* drain possibly payload */
2412                 return drbd_drain_block(mdev, pi->size);
2413         }
2414
2415         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2416          * "criss-cross" setup, that might cause write-out on some other DRBD,
2417          * which in turn might block on the other node at this very place.  */
2418         peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2419         if (!peer_req) {
2420                 put_ldev(mdev);
2421                 return -ENOMEM;
2422         }
2423
2424         switch (pi->cmd) {
2425         case P_DATA_REQUEST:
2426                 peer_req->w.cb = w_e_end_data_req;
2427                 fault_type = DRBD_FAULT_DT_RD;
2428                 /* application IO, don't drbd_rs_begin_io */
2429                 goto submit;
2430
2431         case P_RS_DATA_REQUEST:
2432                 peer_req->w.cb = w_e_end_rsdata_req;
2433                 fault_type = DRBD_FAULT_RS_RD;
2434                 /* used in the sector offset progress display */
2435                 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2436                 break;
2437
2438         case P_OV_REPLY:
2439         case P_CSUM_RS_REQUEST:
2440                 fault_type = DRBD_FAULT_RS_RD;
2441                 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2442                 if (!di)
2443                         goto out_free_e;
2444
2445                 di->digest_size = pi->size;
2446                 di->digest = (((char *)di)+sizeof(struct digest_info));
2447
2448                 peer_req->digest = di;
2449                 peer_req->flags |= EE_HAS_DIGEST;
2450
2451                 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2452                         goto out_free_e;
2453
2454                 if (pi->cmd == P_CSUM_RS_REQUEST) {
2455                         D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2456                         peer_req->w.cb = w_e_end_csum_rs_req;
2457                         /* used in the sector offset progress display */
2458                         mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2459                 } else if (pi->cmd == P_OV_REPLY) {
2460                         /* track progress, we may need to throttle */
2461                         atomic_add(size >> 9, &mdev->rs_sect_in);
2462                         peer_req->w.cb = w_e_end_ov_reply;
2463                         dec_rs_pending(mdev);
2464                         /* drbd_rs_begin_io done when we sent this request,
2465                          * but accounting still needs to be done. */
2466                         goto submit_for_resync;
2467                 }
2468                 break;
2469
2470         case P_OV_REQUEST:
2471                 if (mdev->ov_start_sector == ~(sector_t)0 &&
2472                     mdev->tconn->agreed_pro_version >= 90) {
2473                         unsigned long now = jiffies;
2474                         int i;
2475                         mdev->ov_start_sector = sector;
2476                         mdev->ov_position = sector;
2477                         mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2478                         mdev->rs_total = mdev->ov_left;
2479                         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2480                                 mdev->rs_mark_left[i] = mdev->ov_left;
2481                                 mdev->rs_mark_time[i] = now;
2482                         }
2483                         dev_info(DEV, "Online Verify start sector: %llu\n",
2484                                         (unsigned long long)sector);
2485                 }
2486                 peer_req->w.cb = w_e_end_ov_req;
2487                 fault_type = DRBD_FAULT_RS_RD;
2488                 break;
2489
2490         default:
2491                 BUG();
2492         }
2493
2494         /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2495          * wrt the receiver, but it is not as straightforward as it may seem.
2496          * Various places in the resync start and stop logic assume resync
2497          * requests are processed in order, requeuing this on the worker thread
2498          * introduces a bunch of new code for synchronization between threads.
2499          *
2500          * Unlimited throttling before drbd_rs_begin_io may stall the resync
2501          * "forever", throttling after drbd_rs_begin_io will lock that extent
2502          * for application writes for the same time.  For now, just throttle
2503          * here, where the rest of the code expects the receiver to sleep for
2504          * a while, anyways.
2505          */
2506
2507         /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2508          * this defers syncer requests for some time, before letting at least
2509          * on request through.  The resync controller on the receiving side
2510          * will adapt to the incoming rate accordingly.
2511          *
2512          * We cannot throttle here if remote is Primary/SyncTarget:
2513          * we would also throttle its application reads.
2514          * In that case, throttling is done on the SyncTarget only.
2515          */
2516         if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2517                 schedule_timeout_uninterruptible(HZ/10);
2518         if (drbd_rs_begin_io(mdev, sector))
2519                 goto out_free_e;
2520
2521 submit_for_resync:
2522         atomic_add(size >> 9, &mdev->rs_sect_ev);
2523
2524 submit:
2525         inc_unacked(mdev);
2526         spin_lock_irq(&mdev->tconn->req_lock);
2527         list_add_tail(&peer_req->w.list, &mdev->read_ee);
2528         spin_unlock_irq(&mdev->tconn->req_lock);
2529
2530         if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2531                 return 0;
2532
2533         /* don't care for the reason here */
2534         dev_err(DEV, "submit failed, triggering re-connect\n");
2535         spin_lock_irq(&mdev->tconn->req_lock);
2536         list_del(&peer_req->w.list);
2537         spin_unlock_irq(&mdev->tconn->req_lock);
2538         /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2539
2540 out_free_e:
2541         put_ldev(mdev);
2542         drbd_free_peer_req(mdev, peer_req);
2543         return -EIO;
2544 }
2545
2546 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2547 {
2548         int self, peer, rv = -100;
2549         unsigned long ch_self, ch_peer;
2550         enum drbd_after_sb_p after_sb_0p;
2551
2552         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2553         peer = mdev->p_uuid[UI_BITMAP] & 1;
2554
2555         ch_peer = mdev->p_uuid[UI_SIZE];
2556         ch_self = mdev->comm_bm_set;
2557
2558         rcu_read_lock();
2559         after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2560         rcu_read_unlock();
2561         switch (after_sb_0p) {
2562         case ASB_CONSENSUS:
2563         case ASB_DISCARD_SECONDARY:
2564         case ASB_CALL_HELPER:
2565         case ASB_VIOLENTLY:
2566                 dev_err(DEV, "Configuration error.\n");
2567                 break;
2568         case ASB_DISCONNECT:
2569                 break;
2570         case ASB_DISCARD_YOUNGER_PRI:
2571                 if (self == 0 && peer == 1) {
2572                         rv = -1;
2573                         break;
2574                 }
2575                 if (self == 1 && peer == 0) {
2576                         rv =  1;
2577                         break;
2578                 }
2579                 /* Else fall through to one of the other strategies... */
2580         case ASB_DISCARD_OLDER_PRI:
2581                 if (self == 0 && peer == 1) {
2582                         rv = 1;
2583                         break;
2584                 }
2585                 if (self == 1 && peer == 0) {
2586                         rv = -1;
2587                         break;
2588                 }
2589                 /* Else fall through to one of the other strategies... */
2590                 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2591                      "Using discard-least-changes instead\n");
2592         case ASB_DISCARD_ZERO_CHG:
2593                 if (ch_peer == 0 && ch_self == 0) {
2594                         rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2595                                 ? -1 : 1;
2596                         break;
2597                 } else {
2598                         if (ch_peer == 0) { rv =  1; break; }
2599                         if (ch_self == 0) { rv = -1; break; }
2600                 }
2601                 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2602                         break;
2603         case ASB_DISCARD_LEAST_CHG:
2604                 if      (ch_self < ch_peer)
2605                         rv = -1;
2606                 else if (ch_self > ch_peer)
2607                         rv =  1;
2608                 else /* ( ch_self == ch_peer ) */
2609                      /* Well, then use something else. */
2610                         rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2611                                 ? -1 : 1;
2612                 break;
2613         case ASB_DISCARD_LOCAL:
2614                 rv = -1;
2615                 break;
2616         case ASB_DISCARD_REMOTE:
2617                 rv =  1;
2618         }
2619
2620         return rv;
2621 }
2622
2623 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2624 {
2625         int hg, rv = -100;
2626         enum drbd_after_sb_p after_sb_1p;
2627
2628         rcu_read_lock();
2629         after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2630         rcu_read_unlock();
2631         switch (after_sb_1p) {
2632         case ASB_DISCARD_YOUNGER_PRI:
2633         case ASB_DISCARD_OLDER_PRI:
2634         case ASB_DISCARD_LEAST_CHG:
2635         case ASB_DISCARD_LOCAL:
2636         case ASB_DISCARD_REMOTE:
2637         case ASB_DISCARD_ZERO_CHG:
2638                 dev_err(DEV, "Configuration error.\n");
2639                 break;
2640         case ASB_DISCONNECT:
2641                 break;
2642         case ASB_CONSENSUS:
2643                 hg = drbd_asb_recover_0p(mdev);
2644                 if (hg == -1 && mdev->state.role == R_SECONDARY)
2645                         rv = hg;
2646                 if (hg == 1  && mdev->state.role == R_PRIMARY)
2647                         rv = hg;
2648                 break;
2649         case ASB_VIOLENTLY:
2650                 rv = drbd_asb_recover_0p(mdev);
2651                 break;
2652         case ASB_DISCARD_SECONDARY:
2653                 return mdev->state.role == R_PRIMARY ? 1 : -1;
2654         case ASB_CALL_HELPER:
2655                 hg = drbd_asb_recover_0p(mdev);
2656                 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2657                         enum drbd_state_rv rv2;
2658
2659                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2660                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2661                           * we do not need to wait for the after state change work either. */
2662                         rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2663                         if (rv2 != SS_SUCCESS) {
2664                                 drbd_khelper(mdev, "pri-lost-after-sb");
2665                         } else {
2666                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2667                                 rv = hg;
2668                         }
2669                 } else
2670                         rv = hg;
2671         }
2672
2673         return rv;
2674 }
2675
2676 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2677 {
2678         int hg, rv = -100;
2679         enum drbd_after_sb_p after_sb_2p;
2680
2681         rcu_read_lock();
2682         after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2683         rcu_read_unlock();
2684         switch (after_sb_2p) {
2685         case ASB_DISCARD_YOUNGER_PRI:
2686         case ASB_DISCARD_OLDER_PRI:
2687         case ASB_DISCARD_LEAST_CHG:
2688         case ASB_DISCARD_LOCAL:
2689         case ASB_DISCARD_REMOTE:
2690         case ASB_CONSENSUS:
2691         case ASB_DISCARD_SECONDARY:
2692         case ASB_DISCARD_ZERO_CHG:
2693                 dev_err(DEV, "Configuration error.\n");
2694                 break;
2695         case ASB_VIOLENTLY:
2696                 rv = drbd_asb_recover_0p(mdev);
2697                 break;
2698         case ASB_DISCONNECT:
2699                 break;
2700         case ASB_CALL_HELPER:
2701                 hg = drbd_asb_recover_0p(mdev);
2702                 if (hg == -1) {
2703                         enum drbd_state_rv rv2;
2704
2705                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2706                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2707                           * we do not need to wait for the after state change work either. */
2708                         rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2709                         if (rv2 != SS_SUCCESS) {
2710                                 drbd_khelper(mdev, "pri-lost-after-sb");
2711                         } else {
2712                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2713                                 rv = hg;
2714                         }
2715                 } else
2716                         rv = hg;
2717         }
2718
2719         return rv;
2720 }
2721
2722 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2723                            u64 bits, u64 flags)
2724 {
2725         if (!uuid) {
2726                 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2727                 return;
2728         }
2729         dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2730              text,
2731              (unsigned long long)uuid[UI_CURRENT],
2732              (unsigned long long)uuid[UI_BITMAP],
2733              (unsigned long long)uuid[UI_HISTORY_START],
2734              (unsigned long long)uuid[UI_HISTORY_END],
2735              (unsigned long long)bits,
2736              (unsigned long long)flags);
2737 }
2738
2739 /*
2740   100   after split brain try auto recover
2741     2   C_SYNC_SOURCE set BitMap
2742     1   C_SYNC_SOURCE use BitMap
2743     0   no Sync
2744    -1   C_SYNC_TARGET use BitMap
2745    -2   C_SYNC_TARGET set BitMap
2746  -100   after split brain, disconnect
2747 -1000   unrelated data
2748 -1091   requires proto 91
2749 -1096   requires proto 96
2750  */
2751 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2752 {
2753         u64 self, peer;
2754         int i, j;
2755
2756         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2757         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2758
2759         *rule_nr = 10;
2760         if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2761                 return 0;
2762
2763         *rule_nr = 20;
2764         if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2765              peer != UUID_JUST_CREATED)
2766                 return -2;
2767
2768         *rule_nr = 30;
2769         if (self != UUID_JUST_CREATED &&
2770             (peer == UUID_JUST_CREATED || peer == (u64)0))
2771                 return 2;
2772
2773         if (self == peer) {
2774                 int rct, dc; /* roles at crash time */
2775
2776                 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2777
2778                         if (mdev->tconn->agreed_pro_version < 91)
2779                                 return -1091;
2780
2781                         if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2782                             (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2783                                 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2784                                 drbd_uuid_move_history(mdev);
2785                                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2786                                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
2787
2788                                 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2789                                                mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2790                                 *rule_nr = 34;
2791                         } else {
2792                                 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2793                                 *rule_nr = 36;
2794                         }
2795
2796                         return 1;
2797                 }
2798
2799                 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2800
2801                         if (mdev->tconn->agreed_pro_version < 91)
2802                                 return -1091;
2803
2804                         if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2805                             (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2806                                 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2807
2808                                 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2809                                 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2810                                 mdev->p_uuid[UI_BITMAP] = 0UL;
2811
2812                                 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2813                                 *rule_nr = 35;
2814                         } else {
2815                                 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2816                                 *rule_nr = 37;
2817                         }
2818
2819                         return -1;
2820                 }
2821
2822                 /* Common power [off|failure] */
2823                 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2824                         (mdev->p_uuid[UI_FLAGS] & 2);
2825                 /* lowest bit is set when we were primary,
2826                  * next bit (weight 2) is set when peer was primary */
2827                 *rule_nr = 40;
2828
2829                 switch (rct) {
2830                 case 0: /* !self_pri && !peer_pri */ return 0;
2831                 case 1: /*  self_pri && !peer_pri */ return 1;
2832                 case 2: /* !self_pri &&  peer_pri */ return -1;
2833                 case 3: /*  self_pri &&  peer_pri */
2834                         dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2835                         return dc ? -1 : 1;
2836                 }
2837         }
2838
2839         *rule_nr = 50;
2840         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2841         if (self == peer)
2842                 return -1;
2843
2844         *rule_nr = 51;
2845         peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2846         if (self == peer) {
2847                 if (mdev->tconn->agreed_pro_version < 96 ?
2848                     (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2849                     (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2850                     peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2851                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2852                            resync as sync source modifications of the peer's UUIDs. */
2853
2854                         if (mdev->tconn->agreed_pro_version < 91)
2855                                 return -1091;
2856
2857                         mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2858                         mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2859
2860                         dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2861                         drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2862
2863                         return -1;
2864                 }
2865         }
2866
2867         *rule_nr = 60;
2868         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2869         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2870                 peer = mdev->p_uuid[i] & ~((u64)1);
2871                 if (self == peer)
2872                         return -2;
2873         }
2874
2875         *rule_nr = 70;
2876         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2877         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2878         if (self == peer)
2879                 return 1;
2880
2881         *rule_nr = 71;
2882         self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2883         if (self == peer) {
2884                 if (mdev->tconn->agreed_pro_version < 96 ?
2885                     (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2886                     (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2887                     self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2888                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2889                            resync as sync source modifications of our UUIDs. */
2890
2891                         if (mdev->tconn->agreed_pro_version < 91)
2892                                 return -1091;
2893
2894                         __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2895                         __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2896
2897                         dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2898                         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2899                                        mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2900
2901                         return 1;
2902                 }
2903         }
2904
2905
2906         *rule_nr = 80;
2907         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2908         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2909                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2910                 if (self == peer)
2911                         return 2;
2912         }
2913
2914         *rule_nr = 90;
2915         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2916         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2917         if (self == peer && self != ((u64)0))
2918                 return 100;
2919
2920         *rule_nr = 100;
2921         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2922                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2923                 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2924                         peer = mdev->p_uuid[j] & ~((u64)1);
2925                         if (self == peer)
2926                                 return -100;
2927                 }
2928         }
2929
2930         return -1000;
2931 }
2932
2933 /* drbd_sync_handshake() returns the new conn state on success, or
2934    CONN_MASK (-1) on failure.
2935  */
2936 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2937                                            enum drbd_disk_state peer_disk) __must_hold(local)
2938 {
2939         enum drbd_conns rv = C_MASK;
2940         enum drbd_disk_state mydisk;
2941         struct net_conf *nc;
2942         int hg, rule_nr, rr_conflict, tentative;
2943
2944         mydisk = mdev->state.disk;
2945         if (mydisk == D_NEGOTIATING)
2946                 mydisk = mdev->new_state_tmp.disk;
2947
2948         dev_info(DEV, "drbd_sync_handshake:\n");
2949
2950         spin_lock_irq(&mdev->ldev->md.uuid_lock);
2951         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2952         drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2953                        mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2954
2955         hg = drbd_uuid_compare(mdev, &rule_nr);
2956         spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2957
2958         dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2959
2960         if (hg == -1000) {
2961                 dev_alert(DEV, "Unrelated data, aborting!\n");
2962                 return C_MASK;
2963         }
2964         if (hg < -1000) {
2965                 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2966                 return C_MASK;
2967         }
2968
2969         if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2970             (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2971                 int f = (hg == -100) || abs(hg) == 2;
2972                 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2973                 if (f)
2974                         hg = hg*2;
2975                 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2976                      hg > 0 ? "source" : "target");
2977         }
2978
2979         if (abs(hg) == 100)
2980                 drbd_khelper(mdev, "initial-split-brain");
2981
2982         rcu_read_lock();
2983         nc = rcu_dereference(mdev->tconn->net_conf);
2984
2985         if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2986                 int pcount = (mdev->state.role == R_PRIMARY)
2987                            + (peer_role == R_PRIMARY);
2988                 int forced = (hg == -100);
2989
2990                 switch (pcount) {
2991                 case 0:
2992                         hg = drbd_asb_recover_0p(mdev);
2993                         break;
2994                 case 1:
2995                         hg = drbd_asb_recover_1p(mdev);
2996                         break;
2997                 case 2:
2998                         hg = drbd_asb_recover_2p(mdev);
2999                         break;
3000                 }
3001                 if (abs(hg) < 100) {
3002                         dev_warn(DEV, "Split-Brain detected, %d primaries, "
3003                              "automatically solved. Sync from %s node\n",
3004                              pcount, (hg < 0) ? "peer" : "this");
3005                         if (forced) {
3006                                 dev_warn(DEV, "Doing a full sync, since"
3007                                      " UUIDs where ambiguous.\n");
3008                                 hg = hg*2;
3009                         }
3010                 }
3011         }
3012
3013         if (hg == -100) {
3014                 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
3015                         hg = -1;
3016                 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
3017                         hg = 1;
3018
3019                 if (abs(hg) < 100)
3020                         dev_warn(DEV, "Split-Brain detected, manually solved. "
3021                              "Sync from %s node\n",
3022                              (hg < 0) ? "peer" : "this");
3023         }
3024         rr_conflict = nc->rr_conflict;
3025         tentative = nc->tentative;
3026         rcu_read_unlock();
3027
3028         if (hg == -100) {
3029                 /* FIXME this log message is not correct if we end up here
3030                  * after an attempted attach on a diskless node.
3031                  * We just refuse to attach -- well, we drop the "connection"
3032                  * to that disk, in a way... */
3033                 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3034                 drbd_khelper(mdev, "split-brain");
3035                 return C_MASK;
3036         }
3037
3038         if (hg > 0 && mydisk <= D_INCONSISTENT) {
3039                 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3040                 return C_MASK;
3041         }
3042
3043         if (hg < 0 && /* by intention we do not use mydisk here. */
3044             mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
3045                 switch (rr_conflict) {
3046                 case ASB_CALL_HELPER:
3047                         drbd_khelper(mdev, "pri-lost");
3048                         /* fall through */
3049                 case ASB_DISCONNECT:
3050                         dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3051                         return C_MASK;
3052                 case ASB_VIOLENTLY:
3053                         dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3054                              "assumption\n");
3055                 }
3056         }
3057
3058         if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
3059                 if (hg == 0)
3060                         dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3061                 else
3062                         dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3063                                  drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3064                                  abs(hg) >= 2 ? "full" : "bit-map based");
3065                 return C_MASK;
3066         }
3067
3068         if (abs(hg) >= 2) {
3069                 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3070                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3071                                         BM_LOCKED_SET_ALLOWED))
3072                         return C_MASK;
3073         }
3074
3075         if (hg > 0) { /* become sync source. */
3076                 rv = C_WF_BITMAP_S;
3077         } else if (hg < 0) { /* become sync target */
3078                 rv = C_WF_BITMAP_T;
3079         } else {
3080                 rv = C_CONNECTED;
3081                 if (drbd_bm_total_weight(mdev)) {
3082                         dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3083                              drbd_bm_total_weight(mdev));
3084                 }
3085         }
3086
3087         return rv;
3088 }
3089
3090 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3091 {
3092         /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3093         if (peer == ASB_DISCARD_REMOTE)
3094                 return ASB_DISCARD_LOCAL;
3095
3096         /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3097         if (peer == ASB_DISCARD_LOCAL)
3098                 return ASB_DISCARD_REMOTE;
3099
3100         /* everything else is valid if they are equal on both sides. */
3101         return peer;
3102 }
3103
3104 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3105 {
3106         struct p_protocol *p = pi->data;
3107         enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3108         int p_proto, p_discard_my_data, p_two_primaries, cf;
3109         struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3110         char integrity_alg[SHARED_SECRET_MAX] = "";
3111         struct crypto_hash *peer_integrity_tfm = NULL;
3112         void *int_dig_in = NULL, *int_dig_vv = NULL;
3113
3114         p_proto         = be32_to_cpu(p->protocol);
3115         p_after_sb_0p   = be32_to_cpu(p->after_sb_0p);
3116         p_after_sb_1p   = be32_to_cpu(p->after_sb_1p);
3117         p_after_sb_2p   = be32_to_cpu(p->after_sb_2p);
3118         p_two_primaries = be32_to_cpu(p->two_primaries);
3119         cf              = be32_to_cpu(p->conn_flags);
3120         p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3121
3122         if (tconn->agreed_pro_version >= 87) {
3123                 int err;
3124
3125                 if (pi->size > sizeof(integrity_alg))
3126                         return -EIO;
3127                 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3128                 if (err)
3129                         return err;
3130                 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3131         }
3132
3133         if (pi->cmd != P_PROTOCOL_UPDATE) {
3134                 clear_bit(CONN_DRY_RUN, &tconn->flags);
3135
3136                 if (cf & CF_DRY_RUN)
3137                         set_bit(CONN_DRY_RUN, &tconn->flags);
3138
3139                 rcu_read_lock();
3140                 nc = rcu_dereference(tconn->net_conf);
3141
3142                 if (p_proto != nc->wire_protocol) {
3143                         conn_err(tconn, "incompatible %s settings\n", "protocol");
3144                         goto disconnect_rcu_unlock;
3145                 }
3146
3147                 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3148                         conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3149                         goto disconnect_rcu_unlock;
3150                 }
3151
3152                 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3153                         conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3154                         goto disconnect_rcu_unlock;
3155                 }
3156
3157                 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3158                         conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3159                         goto disconnect_rcu_unlock;
3160                 }
3161
3162                 if (p_discard_my_data && nc->discard_my_data) {
3163                         conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3164                         goto disconnect_rcu_unlock;
3165                 }
3166
3167                 if (p_two_primaries != nc->two_primaries) {
3168                         conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3169                         goto disconnect_rcu_unlock;
3170                 }
3171
3172                 if (strcmp(integrity_alg, nc->integrity_alg)) {
3173                         conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3174                         goto disconnect_rcu_unlock;
3175                 }
3176
3177                 rcu_read_unlock();
3178         }
3179
3180         if (integrity_alg[0]) {
3181                 int hash_size;
3182
3183                 /*
3184                  * We can only change the peer data integrity algorithm
3185                  * here.  Changing our own data integrity algorithm
3186                  * requires that we send a P_PROTOCOL_UPDATE packet at
3187                  * the same time; otherwise, the peer has no way to
3188                  * tell between which packets the algorithm should
3189                  * change.
3190                  */
3191
3192                 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3193                 if (!peer_integrity_tfm) {
3194                         conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3195                                  integrity_alg);
3196                         goto disconnect;
3197                 }
3198
3199                 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3200                 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3201                 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3202                 if (!(int_dig_in && int_dig_vv)) {
3203                         conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3204                         goto disconnect;
3205                 }
3206         }
3207
3208         new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3209         if (!new_net_conf) {
3210                 conn_err(tconn, "Allocation of new net_conf failed\n");
3211                 goto disconnect;
3212         }
3213
3214         mutex_lock(&tconn->data.mutex);
3215         mutex_lock(&tconn->conf_update);
3216         old_net_conf = tconn->net_conf;
3217         *new_net_conf = *old_net_conf;
3218
3219         new_net_conf->wire_protocol = p_proto;
3220         new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3221         new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3222         new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3223         new_net_conf->two_primaries = p_two_primaries;
3224
3225         rcu_assign_pointer(tconn->net_conf, new_net_conf);
3226         mutex_unlock(&tconn->conf_update);
3227         mutex_unlock(&tconn->data.mutex);
3228
3229         crypto_free_hash(tconn->peer_integrity_tfm);
3230         kfree(tconn->int_dig_in);
3231         kfree(tconn->int_dig_vv);
3232         tconn->peer_integrity_tfm = peer_integrity_tfm;
3233         tconn->int_dig_in = int_dig_in;
3234         tconn->int_dig_vv = int_dig_vv;
3235
3236         if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3237                 conn_info(tconn, "peer data-integrity-alg: %s\n",
3238                           integrity_alg[0] ? integrity_alg : "(none)");
3239
3240         synchronize_rcu();
3241         kfree(old_net_conf);
3242         return 0;
3243
3244 disconnect_rcu_unlock:
3245         rcu_read_unlock();
3246 disconnect:
3247         crypto_free_hash(peer_integrity_tfm);
3248         kfree(int_dig_in);
3249         kfree(int_dig_vv);
3250         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3251         return -EIO;
3252 }
3253
3254 /* helper function
3255  * input: alg name, feature name
3256  * return: NULL (alg name was "")
3257  *         ERR_PTR(error) if something goes wrong
3258  *         or the crypto hash ptr, if it worked out ok. */
3259 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3260                 const char *alg, const char *name)
3261 {
3262         struct crypto_hash *tfm;
3263
3264         if (!alg[0])
3265                 return NULL;
3266
3267         tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3268         if (IS_ERR(tfm)) {
3269                 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3270                         alg, name, PTR_ERR(tfm));
3271                 return tfm;
3272         }
3273         return tfm;
3274 }
3275
3276 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3277 {
3278         void *buffer = tconn->data.rbuf;
3279         int size = pi->size;
3280
3281         while (size) {
3282                 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3283                 s = drbd_recv(tconn, buffer, s);
3284                 if (s <= 0) {
3285                         if (s < 0)
3286                                 return s;
3287                         break;
3288                 }
3289                 size -= s;
3290         }
3291         if (size)
3292                 return -EIO;
3293         return 0;
3294 }
3295
3296 /*
3297  * config_unknown_volume  -  device configuration command for unknown volume
3298  *
3299  * When a device is added to an existing connection, the node on which the
3300  * device is added first will send configuration commands to its peer but the
3301  * peer will not know about the device yet.  It will warn and ignore these
3302  * commands.  Once the device is added on the second node, the second node will
3303  * send the same device configuration commands, but in the other direction.
3304  *
3305  * (We can also end up here if drbd is misconfigured.)
3306  */
3307 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3308 {
3309         conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3310                   cmdname(pi->cmd), pi->vnr);
3311         return ignore_remaining_packet(tconn, pi);
3312 }
3313
3314 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3315 {
3316         struct drbd_conf *mdev;
3317         struct p_rs_param_95 *p;
3318         unsigned int header_size, data_size, exp_max_sz;
3319         struct crypto_hash *verify_tfm = NULL;
3320         struct crypto_hash *csums_tfm = NULL;
3321         struct net_conf *old_net_conf, *new_net_conf = NULL;
3322         struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3323         const int apv = tconn->agreed_pro_version;
3324         struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3325         int fifo_size = 0;
3326         int err;
3327
3328         mdev = vnr_to_mdev(tconn, pi->vnr);
3329         if (!mdev)
3330                 return config_unknown_volume(tconn, pi);
3331
3332         exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
3333                     : apv == 88 ? sizeof(struct p_rs_param)
3334                                         + SHARED_SECRET_MAX
3335                     : apv <= 94 ? sizeof(struct p_rs_param_89)
3336                     : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3337
3338         if (pi->size > exp_max_sz) {
3339                 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3340                     pi->size, exp_max_sz);
3341                 return -EIO;
3342         }
3343
3344         if (apv <= 88) {
3345                 header_size = sizeof(struct p_rs_param);
3346                 data_size = pi->size - header_size;
3347         } else if (apv <= 94) {
3348                 header_size = sizeof(struct p_rs_param_89);
3349                 data_size = pi->size - header_size;
3350                 D_ASSERT(data_size == 0);
3351         } else {
3352                 header_size = sizeof(struct p_rs_param_95);
3353                 data_size = pi->size - header_size;
3354                 D_ASSERT(data_size == 0);
3355         }
3356
3357         /* initialize verify_alg and csums_alg */
3358         p = pi->data;
3359         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3360
3361         err = drbd_recv_all(mdev->tconn, p, header_size);
3362         if (err)
3363                 return err;
3364
3365         mutex_lock(&mdev->tconn->conf_update);
3366         old_net_conf = mdev->tconn->net_conf;
3367         if (get_ldev(mdev)) {
3368                 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3369                 if (!new_disk_conf) {
3370                         put_ldev(mdev);
3371                         mutex_unlock(&mdev->tconn->conf_update);
3372                         dev_err(DEV, "Allocation of new disk_conf failed\n");
3373                         return -ENOMEM;
3374                 }
3375
3376                 old_disk_conf = mdev->ldev->disk_conf;
3377                 *new_disk_conf = *old_disk_conf;
3378
3379                 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3380         }
3381
3382         if (apv >= 88) {
3383                 if (apv == 88) {
3384                         if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3385                                 dev_err(DEV, "verify-alg of wrong size, "
3386                                         "peer wants %u, accepting only up to %u byte\n",
3387                                         data_size, SHARED_SECRET_MAX);
3388                                 err = -EIO;
3389                                 goto reconnect;
3390                         }
3391
3392                         err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3393                         if (err)
3394                                 goto reconnect;
3395                         /* we expect NUL terminated string */
3396                         /* but just in case someone tries to be evil */
3397                         D_ASSERT(p->verify_alg[data_size-1] == 0);
3398                         p->verify_alg[data_size-1] = 0;
3399
3400                 } else /* apv >= 89 */ {
3401                         /* we still expect NUL terminated strings */
3402                         /* but just in case someone tries to be evil */
3403                         D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3404                         D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3405                         p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3406                         p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3407                 }
3408
3409                 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3410                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3411                                 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3412                                     old_net_conf->verify_alg, p->verify_alg);
3413                                 goto disconnect;
3414                         }
3415                         verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3416                                         p->verify_alg, "verify-alg");
3417                         if (IS_ERR(verify_tfm)) {
3418                                 verify_tfm = NULL;
3419                                 goto disconnect;
3420                         }
3421                 }
3422
3423                 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3424                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3425                                 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3426                                     old_net_conf->csums_alg, p->csums_alg);
3427                                 goto disconnect;
3428                         }
3429                         csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3430                                         p->csums_alg, "csums-alg");
3431                         if (IS_ERR(csums_tfm)) {
3432                                 csums_tfm = NULL;
3433                                 goto disconnect;
3434                         }
3435                 }
3436
3437                 if (apv > 94 && new_disk_conf) {
3438                         new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3439                         new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3440                         new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3441                         new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3442
3443                         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3444                         if (fifo_size != mdev->rs_plan_s->size) {
3445                                 new_plan = fifo_alloc(fifo_size);
3446                                 if (!new_plan) {
3447                                         dev_err(DEV, "kmalloc of fifo_buffer failed");
3448                                         put_ldev(mdev);
3449                                         goto disconnect;
3450                                 }
3451                         }
3452                 }
3453
3454                 if (verify_tfm || csums_tfm) {
3455                         new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3456                         if (!new_net_conf) {
3457                                 dev_err(DEV, "Allocation of new net_conf failed\n");
3458                                 goto disconnect;
3459                         }
3460
3461                         *new_net_conf = *old_net_conf;
3462
3463                         if (verify_tfm) {
3464                                 strcpy(new_net_conf->verify_alg, p->verify_alg);
3465                                 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3466                                 crypto_free_hash(mdev->tconn->verify_tfm);
3467                                 mdev->tconn->verify_tfm = verify_tfm;
3468                                 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3469                         }
3470                         if (csums_tfm) {
3471                                 strcpy(new_net_conf->csums_alg, p->csums_alg);
3472                                 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3473                                 crypto_free_hash(mdev->tconn->csums_tfm);
3474                                 mdev->tconn->csums_tfm = csums_tfm;
3475                                 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3476                         }
3477                         rcu_assign_pointer(tconn->net_conf, new_net_conf);
3478                 }
3479         }
3480
3481         if (new_disk_conf) {
3482                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3483                 put_ldev(mdev);
3484         }
3485
3486         if (new_plan) {
3487                 old_plan = mdev->rs_plan_s;
3488                 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3489         }
3490
3491         mutex_unlock(&mdev->tconn->conf_update);
3492         synchronize_rcu();
3493         if (new_net_conf)
3494                 kfree(old_net_conf);
3495         kfree(old_disk_conf);
3496         kfree(old_plan);
3497
3498         return 0;
3499
3500 reconnect:
3501         if (new_disk_conf) {
3502                 put_ldev(mdev);
3503                 kfree(new_disk_conf);
3504         }
3505         mutex_unlock(&mdev->tconn->conf_update);
3506         return -EIO;
3507
3508 disconnect:
3509         kfree(new_plan);
3510         if (new_disk_conf) {
3511                 put_ldev(mdev);
3512                 kfree(new_disk_conf);
3513         }
3514         mutex_unlock(&mdev->tconn->conf_update);
3515         /* just for completeness: actually not needed,
3516          * as this is not reached if csums_tfm was ok. */
3517         crypto_free_hash(csums_tfm);
3518         /* but free the verify_tfm again, if csums_tfm did not work out */
3519         crypto_free_hash(verify_tfm);
3520         conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3521         return -EIO;
3522 }
3523
3524 /* warn if the arguments differ by more than 12.5% */
3525 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3526         const char *s, sector_t a, sector_t b)
3527 {
3528         sector_t d;
3529         if (a == 0 || b == 0)
3530                 return;
3531         d = (a > b) ? (a - b) : (b - a);
3532         if (d > (a>>3) || d > (b>>3))
3533                 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3534                      (unsigned long long)a, (unsigned long long)b);
3535 }
3536
3537 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3538 {
3539         struct drbd_conf *mdev;
3540         struct p_sizes *p = pi->data;
3541         enum determine_dev_size dd = DS_UNCHANGED;
3542         sector_t p_size, p_usize, my_usize;
3543         int ldsc = 0; /* local disk size changed */
3544         enum dds_flags ddsf;
3545
3546         mdev = vnr_to_mdev(tconn, pi->vnr);
3547         if (!mdev)
3548                 return config_unknown_volume(tconn, pi);
3549
3550         p_size = be64_to_cpu(p->d_size);
3551         p_usize = be64_to_cpu(p->u_size);
3552
3553         /* just store the peer's disk size for now.
3554          * we still need to figure out whether we accept that. */
3555         mdev->p_size = p_size;
3556
3557         if (get_ldev(mdev)) {
3558                 rcu_read_lock();
3559                 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3560                 rcu_read_unlock();
3561
3562                 warn_if_differ_considerably(mdev, "lower level device sizes",
3563                            p_size, drbd_get_max_capacity(mdev->ldev));
3564                 warn_if_differ_considerably(mdev, "user requested size",
3565                                             p_usize, my_usize);
3566
3567                 /* if this is the first connect, or an otherwise expected
3568                  * param exchange, choose the minimum */
3569                 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3570                         p_usize = min_not_zero(my_usize, p_usize);
3571
3572                 /* Never shrink a device with usable data during connect.
3573                    But allow online shrinking if we are connected. */
3574                 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3575                     drbd_get_capacity(mdev->this_bdev) &&
3576                     mdev->state.disk >= D_OUTDATED &&
3577                     mdev->state.conn < C_CONNECTED) {
3578                         dev_err(DEV, "The peer's disk size is too small!\n");
3579                         conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3580                         put_ldev(mdev);
3581                         return -EIO;
3582                 }
3583
3584                 if (my_usize != p_usize) {
3585                         struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3586
3587                         new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3588                         if (!new_disk_conf) {
3589                                 dev_err(DEV, "Allocation of new disk_conf failed\n");
3590                                 put_ldev(mdev);
3591                                 return -ENOMEM;
3592                         }
3593
3594                         mutex_lock(&mdev->tconn->conf_update);
3595                         old_disk_conf = mdev->ldev->disk_conf;
3596                         *new_disk_conf = *old_disk_conf;
3597                         new_disk_conf->disk_size = p_usize;
3598
3599                         rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3600                         mutex_unlock(&mdev->tconn->conf_update);
3601                         synchronize_rcu();
3602                         kfree(old_disk_conf);
3603
3604                         dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3605                                  (unsigned long)my_usize);
3606                 }
3607
3608                 put_ldev(mdev);
3609         }
3610
3611         ddsf = be16_to_cpu(p->dds_flags);
3612         if (get_ldev(mdev)) {
3613                 dd = drbd_determine_dev_size(mdev, ddsf, NULL);
3614                 put_ldev(mdev);
3615                 if (dd == DS_ERROR)
3616                         return -EIO;
3617                 drbd_md_sync(mdev);
3618         } else {
3619                 /* I am diskless, need to accept the peer's size. */
3620                 drbd_set_my_capacity(mdev, p_size);
3621         }
3622
3623         mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3624         drbd_reconsider_max_bio_size(mdev);
3625
3626         if (get_ldev(mdev)) {
3627                 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3628                         mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3629                         ldsc = 1;
3630                 }
3631
3632                 put_ldev(mdev);
3633         }
3634
3635         if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3636                 if (be64_to_cpu(p->c_size) !=
3637                     drbd_get_capacity(mdev->this_bdev) || ldsc) {
3638                         /* we have different sizes, probably peer
3639                          * needs to know my new size... */
3640                         drbd_send_sizes(mdev, 0, ddsf);
3641                 }
3642                 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3643                     (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
3644                         if (mdev->state.pdsk >= D_INCONSISTENT &&
3645                             mdev->state.disk >= D_INCONSISTENT) {
3646                                 if (ddsf & DDSF_NO_RESYNC)
3647                                         dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3648                                 else
3649                                         resync_after_online_grow(mdev);
3650                         } else
3651                                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3652                 }
3653         }
3654
3655         return 0;
3656 }
3657
3658 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3659 {
3660         struct drbd_conf *mdev;
3661         struct p_uuids *p = pi->data;
3662         u64 *p_uuid;
3663         int i, updated_uuids = 0;
3664
3665         mdev = vnr_to_mdev(tconn, pi->vnr);
3666         if (!mdev)
3667                 return config_unknown_volume(tconn, pi);
3668
3669         p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3670         if (!p_uuid) {
3671                 dev_err(DEV, "kmalloc of p_uuid failed\n");
3672                 return false;
3673         }
3674
3675         for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3676                 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3677
3678         kfree(mdev->p_uuid);
3679         mdev->p_uuid = p_uuid;
3680
3681         if (mdev->state.conn < C_CONNECTED &&
3682             mdev->state.disk < D_INCONSISTENT &&
3683             mdev->state.role == R_PRIMARY &&
3684             (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3685                 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3686                     (unsigned long long)mdev->ed_uuid);
3687                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3688                 return -EIO;
3689         }
3690
3691         if (get_ldev(mdev)) {
3692                 int skip_initial_sync =
3693                         mdev->state.conn == C_CONNECTED &&
3694                         mdev->tconn->agreed_pro_version >= 90 &&
3695                         mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3696                         (p_uuid[UI_FLAGS] & 8);
3697                 if (skip_initial_sync) {
3698                         dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3699                         drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3700                                         "clear_n_write from receive_uuids",
3701                                         BM_LOCKED_TEST_ALLOWED);
3702                         _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3703                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
3704                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3705                                         CS_VERBOSE, NULL);
3706                         drbd_md_sync(mdev);
3707                         updated_uuids = 1;
3708                 }
3709                 put_ldev(mdev);
3710         } else if (mdev->state.disk < D_INCONSISTENT &&
3711                    mdev->state.role == R_PRIMARY) {
3712                 /* I am a diskless primary, the peer just created a new current UUID
3713                    for me. */
3714                 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3715         }
3716
3717         /* Before we test for the disk state, we should wait until an eventually
3718            ongoing cluster wide state change is finished. That is important if
3719            we are primary and are detaching from our disk. We need to see the
3720            new disk state... */
3721         mutex_lock(mdev->state_mutex);
3722         mutex_unlock(mdev->state_mutex);
3723         if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3724                 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3725
3726         if (updated_uuids)
3727                 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3728
3729         return 0;
3730 }
3731
3732 /**
3733  * convert_state() - Converts the peer's view of the cluster state to our point of view
3734  * @ps:         The state as seen by the peer.
3735  */
3736 static union drbd_state convert_state(union drbd_state ps)
3737 {
3738         union drbd_state ms;
3739
3740         static enum drbd_conns c_tab[] = {
3741                 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3742                 [C_CONNECTED] = C_CONNECTED,
3743
3744                 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3745                 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3746                 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3747                 [C_VERIFY_S]       = C_VERIFY_T,
3748                 [C_MASK]   = C_MASK,
3749         };
3750
3751         ms.i = ps.i;
3752
3753         ms.conn = c_tab[ps.conn];
3754         ms.peer = ps.role;
3755         ms.role = ps.peer;
3756         ms.pdsk = ps.disk;
3757         ms.disk = ps.pdsk;
3758         ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3759
3760         return ms;
3761 }
3762
3763 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3764 {
3765         struct drbd_conf *mdev;
3766         struct p_req_state *p = pi->data;
3767         union drbd_state mask, val;
3768         enum drbd_state_rv rv;
3769
3770         mdev = vnr_to_mdev(tconn, pi->vnr);
3771         if (!mdev)
3772                 return -EIO;
3773
3774         mask.i = be32_to_cpu(p->mask);
3775         val.i = be32_to_cpu(p->val);
3776
3777         if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
3778             mutex_is_locked(mdev->state_mutex)) {
3779                 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3780                 return 0;
3781         }
3782
3783         mask = convert_state(mask);
3784         val = convert_state(val);
3785
3786         rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3787         drbd_send_sr_reply(mdev, rv);
3788
3789         drbd_md_sync(mdev);
3790
3791         return 0;
3792 }
3793
3794 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3795 {
3796         struct p_req_state *p = pi->data;
3797         union drbd_state mask, val;
3798         enum drbd_state_rv rv;
3799
3800         mask.i = be32_to_cpu(p->mask);
3801         val.i = be32_to_cpu(p->val);
3802
3803         if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
3804             mutex_is_locked(&tconn->cstate_mutex)) {
3805                 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3806                 return 0;
3807         }
3808
3809         mask = convert_state(mask);
3810         val = convert_state(val);
3811
3812         rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3813         conn_send_sr_reply(tconn, rv);
3814
3815         return 0;
3816 }
3817
3818 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3819 {
3820         struct drbd_conf *mdev;
3821         struct p_state *p = pi->data;
3822         union drbd_state os, ns, peer_state;
3823         enum drbd_disk_state real_peer_disk;
3824         enum chg_state_flags cs_flags;
3825         int rv;
3826
3827         mdev = vnr_to_mdev(tconn, pi->vnr);
3828         if (!mdev)
3829                 return config_unknown_volume(tconn, pi);
3830
3831         peer_state.i = be32_to_cpu(p->state);
3832
3833         real_peer_disk = peer_state.disk;
3834         if (peer_state.disk == D_NEGOTIATING) {
3835                 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3836                 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3837         }
3838
3839         spin_lock_irq(&mdev->tconn->req_lock);
3840  retry:
3841         os = ns = drbd_read_state(mdev);
3842         spin_unlock_irq(&mdev->tconn->req_lock);
3843
3844         /* If some other part of the code (asender thread, timeout)
3845          * already decided to close the connection again,
3846          * we must not "re-establish" it here. */
3847         if (os.conn <= C_TEAR_DOWN)
3848                 return -ECONNRESET;
3849
3850         /* If this is the "end of sync" confirmation, usually the peer disk
3851          * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3852          * set) resync started in PausedSyncT, or if the timing of pause-/
3853          * unpause-sync events has been "just right", the peer disk may
3854          * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3855          */
3856         if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3857             real_peer_disk == D_UP_TO_DATE &&
3858             os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3859                 /* If we are (becoming) SyncSource, but peer is still in sync
3860                  * preparation, ignore its uptodate-ness to avoid flapping, it
3861                  * will change to inconsistent once the peer reaches active
3862                  * syncing states.
3863                  * It may have changed syncer-paused flags, however, so we
3864                  * cannot ignore this completely. */
3865                 if (peer_state.conn > C_CONNECTED &&
3866                     peer_state.conn < C_SYNC_SOURCE)
3867                         real_peer_disk = D_INCONSISTENT;
3868
3869                 /* if peer_state changes to connected at the same time,
3870                  * it explicitly notifies us that it finished resync.
3871                  * Maybe we should finish it up, too? */
3872                 else if (os.conn >= C_SYNC_SOURCE &&
3873                          peer_state.conn == C_CONNECTED) {
3874                         if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3875                                 drbd_resync_finished(mdev);
3876                         return 0;
3877                 }
3878         }
3879
3880         /* explicit verify finished notification, stop sector reached. */
3881         if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3882             peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3883                 ov_out_of_sync_print(mdev);
3884                 drbd_resync_finished(mdev);
3885                 return 0;
3886         }
3887
3888         /* peer says his disk is inconsistent, while we think it is uptodate,
3889          * and this happens while the peer still thinks we have a sync going on,
3890          * but we think we are already done with the sync.
3891          * We ignore this to avoid flapping pdsk.
3892          * This should not happen, if the peer is a recent version of drbd. */
3893         if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3894             os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3895                 real_peer_disk = D_UP_TO_DATE;
3896
3897         if (ns.conn == C_WF_REPORT_PARAMS)
3898                 ns.conn = C_CONNECTED;
3899
3900         if (peer_state.conn == C_AHEAD)
3901                 ns.conn = C_BEHIND;
3902
3903         if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3904             get_ldev_if_state(mdev, D_NEGOTIATING)) {
3905                 int cr; /* consider resync */
3906
3907                 /* if we established a new connection */
3908                 cr  = (os.conn < C_CONNECTED);
3909                 /* if we had an established connection
3910                  * and one of the nodes newly attaches a disk */
3911                 cr |= (os.conn == C_CONNECTED &&
3912                        (peer_state.disk == D_NEGOTIATING ||
3913                         os.disk == D_NEGOTIATING));
3914                 /* if we have both been inconsistent, and the peer has been
3915                  * forced to be UpToDate with --overwrite-data */
3916                 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3917                 /* if we had been plain connected, and the admin requested to
3918                  * start a sync by "invalidate" or "invalidate-remote" */
3919                 cr |= (os.conn == C_CONNECTED &&
3920                                 (peer_state.conn >= C_STARTING_SYNC_S &&
3921                                  peer_state.conn <= C_WF_BITMAP_T));
3922
3923                 if (cr)
3924                         ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3925
3926                 put_ldev(mdev);
3927                 if (ns.conn == C_MASK) {
3928                         ns.conn = C_CONNECTED;
3929                         if (mdev->state.disk == D_NEGOTIATING) {
3930                                 drbd_force_state(mdev, NS(disk, D_FAILED));
3931                         } else if (peer_state.disk == D_NEGOTIATING) {
3932                                 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3933                                 peer_state.disk = D_DISKLESS;
3934                                 real_peer_disk = D_DISKLESS;
3935                         } else {
3936                                 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3937                                         return -EIO;
3938                                 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3939                                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3940                                 return -EIO;
3941                         }
3942                 }
3943         }
3944
3945         spin_lock_irq(&mdev->tconn->req_lock);
3946         if (os.i != drbd_read_state(mdev).i)
3947                 goto retry;
3948         clear_bit(CONSIDER_RESYNC, &mdev->flags);
3949         ns.peer = peer_state.role;
3950         ns.pdsk = real_peer_disk;
3951         ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3952         if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3953                 ns.disk = mdev->new_state_tmp.disk;
3954         cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3955         if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3956             test_bit(NEW_CUR_UUID, &mdev->flags)) {
3957                 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3958                    for temporal network outages! */
3959                 spin_unlock_irq(&mdev->tconn->req_lock);
3960                 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3961                 tl_clear(mdev->tconn);
3962                 drbd_uuid_new_current(mdev);
3963                 clear_bit(NEW_CUR_UUID, &mdev->flags);
3964                 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3965                 return -EIO;
3966         }
3967         rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3968         ns = drbd_read_state(mdev);
3969         spin_unlock_irq(&mdev->tconn->req_lock);
3970
3971         if (rv < SS_SUCCESS) {
3972                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3973                 return -EIO;
3974         }
3975
3976         if (os.conn > C_WF_REPORT_PARAMS) {
3977                 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3978                     peer_state.disk != D_NEGOTIATING ) {
3979                         /* we want resync, peer has not yet decided to sync... */
3980                         /* Nowadays only used when forcing a node into primary role and
3981                            setting its disk to UpToDate with that */
3982                         drbd_send_uuids(mdev);
3983                         drbd_send_current_state(mdev);
3984                 }
3985         }
3986
3987         clear_bit(DISCARD_MY_DATA, &mdev->flags);
3988
3989         drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
3990
3991         return 0;
3992 }
3993
3994 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
3995 {
3996         struct drbd_conf *mdev;
3997         struct p_rs_uuid *p = pi->data;
3998
3999         mdev = vnr_to_mdev(tconn, pi->vnr);
4000         if (!mdev)
4001                 return -EIO;
4002
4003         wait_event(mdev->misc_wait,
4004                    mdev->state.conn == C_WF_SYNC_UUID ||
4005                    mdev->state.conn == C_BEHIND ||
4006                    mdev->state.conn < C_CONNECTED ||
4007                    mdev->state.disk < D_NEGOTIATING);
4008
4009         /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4010
4011         /* Here the _drbd_uuid_ functions are right, current should
4012            _not_ be rotated into the history */
4013         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4014                 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4015                 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4016
4017                 drbd_print_uuids(mdev, "updated sync uuid");
4018                 drbd_start_resync(mdev, C_SYNC_TARGET);
4019
4020                 put_ldev(mdev);
4021         } else
4022                 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4023
4024         return 0;
4025 }
4026
4027 /**
4028  * receive_bitmap_plain
4029  *
4030  * Return 0 when done, 1 when another iteration is needed, and a negative error
4031  * code upon failure.
4032  */
4033 static int
4034 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
4035                      unsigned long *p, struct bm_xfer_ctx *c)
4036 {
4037         unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4038                                  drbd_header_size(mdev->tconn);
4039         unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4040                                        c->bm_words - c->word_offset);
4041         unsigned int want = num_words * sizeof(*p);
4042         int err;
4043
4044         if (want != size) {
4045                 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4046                 return -EIO;
4047         }
4048         if (want == 0)
4049                 return 0;
4050         err = drbd_recv_all(mdev->tconn, p, want);
4051         if (err)
4052                 return err;
4053
4054         drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
4055
4056         c->word_offset += num_words;
4057         c->bit_offset = c->word_offset * BITS_PER_LONG;
4058         if (c->bit_offset > c->bm_bits)
4059                 c->bit_offset = c->bm_bits;
4060
4061         return 1;
4062 }
4063
4064 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4065 {
4066         return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4067 }
4068
4069 static int dcbp_get_start(struct p_compressed_bm *p)
4070 {
4071         return (p->encoding & 0x80) != 0;
4072 }
4073
4074 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4075 {
4076         return (p->encoding >> 4) & 0x7;
4077 }
4078
4079 /**
4080  * recv_bm_rle_bits
4081  *
4082  * Return 0 when done, 1 when another iteration is needed, and a negative error
4083  * code upon failure.
4084  */
4085 static int
4086 recv_bm_rle_bits(struct drbd_conf *mdev,
4087                 struct p_compressed_bm *p,
4088                  struct bm_xfer_ctx *c,
4089                  unsigned int len)
4090 {
4091         struct bitstream bs;
4092         u64 look_ahead;
4093         u64 rl;
4094         u64 tmp;
4095         unsigned long s = c->bit_offset;
4096         unsigned long e;
4097         int toggle = dcbp_get_start(p);
4098         int have;
4099         int bits;
4100
4101         bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4102
4103         bits = bitstream_get_bits(&bs, &look_ahead, 64);
4104         if (bits < 0)
4105                 return -EIO;
4106
4107         for (have = bits; have > 0; s += rl, toggle = !toggle) {
4108                 bits = vli_decode_bits(&rl, look_ahead);
4109                 if (bits <= 0)
4110                         return -EIO;
4111
4112                 if (toggle) {
4113                         e = s + rl -1;
4114                         if (e >= c->bm_bits) {
4115                                 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4116                                 return -EIO;
4117                         }
4118                         _drbd_bm_set_bits(mdev, s, e);
4119                 }
4120
4121                 if (have < bits) {
4122                         dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4123                                 have, bits, look_ahead,
4124                                 (unsigned int)(bs.cur.b - p->code),
4125                                 (unsigned int)bs.buf_len);
4126                         return -EIO;
4127                 }
4128                 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4129                 if (likely(bits < 64))
4130                         look_ahead >>= bits;
4131                 else
4132                         look_ahead = 0;
4133                 have -= bits;
4134
4135                 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4136                 if (bits < 0)
4137                         return -EIO;
4138                 look_ahead |= tmp << have;
4139                 have += bits;
4140         }
4141
4142         c->bit_offset = s;
4143         bm_xfer_ctx_bit_to_word_offset(c);
4144
4145         return (s != c->bm_bits);
4146 }
4147
4148 /**
4149  * decode_bitmap_c
4150  *
4151  * Return 0 when done, 1 when another iteration is needed, and a negative error
4152  * code upon failure.
4153  */
4154 static int
4155 decode_bitmap_c(struct drbd_conf *mdev,
4156                 struct p_compressed_bm *p,
4157                 struct bm_xfer_ctx *c,
4158                 unsigned int len)
4159 {
4160         if (dcbp_get_code(p) == RLE_VLI_Bits)
4161                 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4162
4163         /* other variants had been implemented for evaluation,
4164          * but have been dropped as this one turned out to be "best"
4165          * during all our tests. */
4166
4167         dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4168         conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4169         return -EIO;
4170 }
4171
4172 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4173                 const char *direction, struct bm_xfer_ctx *c)
4174 {
4175         /* what would it take to transfer it "plaintext" */
4176         unsigned int header_size = drbd_header_size(mdev->tconn);
4177         unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4178         unsigned int plain =
4179                 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4180                 c->bm_words * sizeof(unsigned long);
4181         unsigned int total = c->bytes[0] + c->bytes[1];
4182         unsigned int r;
4183
4184         /* total can not be zero. but just in case: */
4185         if (total == 0)
4186                 return;
4187
4188         /* don't report if not compressed */
4189         if (total >= plain)
4190                 return;
4191
4192         /* total < plain. check for overflow, still */
4193         r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4194                                     : (1000 * total / plain);
4195
4196         if (r > 1000)
4197                 r = 1000;
4198
4199         r = 1000 - r;
4200         dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4201              "total %u; compression: %u.%u%%\n",
4202                         direction,
4203                         c->bytes[1], c->packets[1],
4204                         c->bytes[0], c->packets[0],
4205                         total, r/10, r % 10);
4206 }
4207
4208 /* Since we are processing the bitfield from lower addresses to higher,
4209    it does not matter if the process it in 32 bit chunks or 64 bit
4210    chunks as long as it is little endian. (Understand it as byte stream,
4211    beginning with the lowest byte...) If we would use big endian
4212    we would need to process it from the highest address to the lowest,
4213    in order to be agnostic to the 32 vs 64 bits issue.
4214
4215    returns 0 on failure, 1 if we successfully received it. */
4216 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4217 {
4218         struct drbd_conf *mdev;
4219         struct bm_xfer_ctx c;
4220         int err;
4221
4222         mdev = vnr_to_mdev(tconn, pi->vnr);
4223         if (!mdev)
4224                 return -EIO;
4225
4226         drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4227         /* you are supposed to send additional out-of-sync information
4228          * if you actually set bits during this phase */
4229
4230         c = (struct bm_xfer_ctx) {
4231                 .bm_bits = drbd_bm_bits(mdev),
4232                 .bm_words = drbd_bm_words(mdev),
4233         };
4234
4235         for(;;) {
4236                 if (pi->cmd == P_BITMAP)
4237                         err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4238                 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4239                         /* MAYBE: sanity check that we speak proto >= 90,
4240                          * and the feature is enabled! */
4241                         struct p_compressed_bm *p = pi->data;
4242
4243                         if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4244                                 dev_err(DEV, "ReportCBitmap packet too large\n");
4245                                 err = -EIO;
4246                                 goto out;
4247                         }
4248                         if (pi->size <= sizeof(*p)) {
4249                                 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4250                                 err = -EIO;
4251                                 goto out;
4252                         }
4253                         err = drbd_recv_all(mdev->tconn, p, pi->size);
4254                         if (err)
4255                                goto out;
4256                         err = decode_bitmap_c(mdev, p, &c, pi->size);
4257                 } else {
4258                         dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4259                         err = -EIO;
4260                         goto out;
4261                 }
4262
4263                 c.packets[pi->cmd == P_BITMAP]++;
4264                 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4265
4266                 if (err <= 0) {
4267                         if (err < 0)
4268                                 goto out;
4269                         break;
4270                 }
4271                 err = drbd_recv_header(mdev->tconn, pi);
4272                 if (err)
4273                         goto out;
4274         }
4275
4276         INFO_bm_xfer_stats(mdev, "receive", &c);
4277
4278         if (mdev->state.conn == C_WF_BITMAP_T) {
4279                 enum drbd_state_rv rv;
4280
4281                 err = drbd_send_bitmap(mdev);
4282                 if (err)
4283                         goto out;
4284                 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4285                 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4286                 D_ASSERT(rv == SS_SUCCESS);
4287         } else if (mdev->state.conn != C_WF_BITMAP_S) {
4288                 /* admin may have requested C_DISCONNECTING,
4289                  * other threads may have noticed network errors */
4290                 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4291                     drbd_conn_str(mdev->state.conn));
4292         }
4293         err = 0;
4294
4295  out:
4296         drbd_bm_unlock(mdev);
4297         if (!err && mdev->state.conn == C_WF_BITMAP_S)
4298                 drbd_start_resync(mdev, C_SYNC_SOURCE);
4299         return err;
4300 }
4301
4302 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4303 {
4304         conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4305                  pi->cmd, pi->size);
4306
4307         return ignore_remaining_packet(tconn, pi);
4308 }
4309
4310 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4311 {
4312         /* Make sure we've acked all the TCP data associated
4313          * with the data requests being unplugged */
4314         drbd_tcp_quickack(tconn->data.socket);
4315
4316         return 0;
4317 }
4318
4319 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4320 {
4321         struct drbd_conf *mdev;
4322         struct p_block_desc *p = pi->data;
4323
4324         mdev = vnr_to_mdev(tconn, pi->vnr);
4325         if (!mdev)
4326                 return -EIO;
4327
4328         switch (mdev->state.conn) {
4329         case C_WF_SYNC_UUID:
4330         case C_WF_BITMAP_T:
4331         case C_BEHIND:
4332                         break;
4333         default:
4334                 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4335                                 drbd_conn_str(mdev->state.conn));
4336         }
4337
4338         drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4339
4340         return 0;
4341 }
4342
4343 struct data_cmd {
4344         int expect_payload;
4345         size_t pkt_size;
4346         int (*fn)(struct drbd_tconn *, struct packet_info *);
4347 };
4348
4349 static struct data_cmd drbd_cmd_handler[] = {
4350         [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
4351         [P_DATA_REPLY]      = { 1, sizeof(struct p_data), receive_DataReply },
4352         [P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4353         [P_BARRIER]         = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4354         [P_BITMAP]          = { 1, 0, receive_bitmap } ,
4355         [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4356         [P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
4357         [P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
4358         [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4359         [P_SYNC_PARAM]      = { 1, 0, receive_SyncParam },
4360         [P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
4361         [P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
4362         [P_UUIDS]           = { 0, sizeof(struct p_uuids), receive_uuids },
4363         [P_SIZES]           = { 0, sizeof(struct p_sizes), receive_sizes },
4364         [P_STATE]           = { 0, sizeof(struct p_state), receive_state },
4365         [P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
4366         [P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4367         [P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
4368         [P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
4369         [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4370         [P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
4371         [P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4372         [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4373         [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4374 };
4375
4376 static void drbdd(struct drbd_tconn *tconn)
4377 {
4378         struct packet_info pi;
4379         size_t shs; /* sub header size */
4380         int err;
4381
4382         while (get_t_state(&tconn->receiver) == RUNNING) {
4383                 struct data_cmd *cmd;
4384
4385                 drbd_thread_current_set_cpu(&tconn->receiver);
4386                 if (drbd_recv_header(tconn, &pi))
4387                         goto err_out;
4388
4389                 cmd = &drbd_cmd_handler[pi.cmd];
4390                 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4391                         conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4392                                  cmdname(pi.cmd), pi.cmd);
4393                         goto err_out;
4394                 }
4395
4396                 shs = cmd->pkt_size;
4397                 if (pi.size > shs && !cmd->expect_payload) {
4398                         conn_err(tconn, "No payload expected %s l:%d\n",
4399                                  cmdname(pi.cmd), pi.size);
4400                         goto err_out;
4401                 }
4402
4403                 if (shs) {
4404                         err = drbd_recv_all_warn(tconn, pi.data, shs);
4405                         if (err)
4406                                 goto err_out;
4407                         pi.size -= shs;
4408                 }
4409
4410                 err = cmd->fn(tconn, &pi);
4411                 if (err) {
4412                         conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4413                                  cmdname(pi.cmd), err, pi.size);
4414                         goto err_out;
4415                 }
4416         }
4417         return;
4418
4419     err_out:
4420         conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4421 }
4422
4423 void conn_flush_workqueue(struct drbd_tconn *tconn)
4424 {
4425         struct drbd_wq_barrier barr;
4426
4427         barr.w.cb = w_prev_work_done;
4428         barr.w.tconn = tconn;
4429         init_completion(&barr.done);
4430         drbd_queue_work(&tconn->sender_work, &barr.w);
4431         wait_for_completion(&barr.done);
4432 }
4433
4434 static void conn_disconnect(struct drbd_tconn *tconn)
4435 {
4436         struct drbd_conf *mdev;
4437         enum drbd_conns oc;
4438         int vnr;
4439
4440         if (tconn->cstate == C_STANDALONE)
4441                 return;
4442
4443         /* We are about to start the cleanup after connection loss.
4444          * Make sure drbd_make_request knows about that.
4445          * Usually we should be in some network failure state already,
4446          * but just in case we are not, we fix it up here.
4447          */
4448         conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4449
4450         /* asender does not clean up anything. it must not interfere, either */
4451         drbd_thread_stop(&tconn->asender);
4452         drbd_free_sock(tconn);
4453
4454         rcu_read_lock();
4455         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4456                 kref_get(&mdev->kref);
4457                 rcu_read_unlock();
4458                 drbd_disconnected(mdev);
4459                 kref_put(&mdev->kref, &drbd_minor_destroy);
4460                 rcu_read_lock();
4461         }
4462         rcu_read_unlock();
4463
4464         if (!list_empty(&tconn->current_epoch->list))
4465                 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4466         /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4467         atomic_set(&tconn->current_epoch->epoch_size, 0);
4468         tconn->send.seen_any_write_yet = false;
4469
4470         conn_info(tconn, "Connection closed\n");
4471
4472         if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4473                 conn_try_outdate_peer_async(tconn);
4474
4475         spin_lock_irq(&tconn->req_lock);
4476         oc = tconn->cstate;
4477         if (oc >= C_UNCONNECTED)
4478                 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4479
4480         spin_unlock_irq(&tconn->req_lock);
4481
4482         if (oc == C_DISCONNECTING)
4483                 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4484 }
4485
4486 static int drbd_disconnected(struct drbd_conf *mdev)
4487 {
4488         unsigned int i;
4489
4490         /* wait for current activity to cease. */
4491         spin_lock_irq(&mdev->tconn->req_lock);
4492         _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4493         _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4494         _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4495         spin_unlock_irq(&mdev->tconn->req_lock);
4496
4497         /* We do not have data structures that would allow us to
4498          * get the rs_pending_cnt down to 0 again.
4499          *  * On C_SYNC_TARGET we do not have any data structures describing
4500          *    the pending RSDataRequest's we have sent.
4501          *  * On C_SYNC_SOURCE there is no data structure that tracks
4502          *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4503          *  And no, it is not the sum of the reference counts in the
4504          *  resync_LRU. The resync_LRU tracks the whole operation including
4505          *  the disk-IO, while the rs_pending_cnt only tracks the blocks
4506          *  on the fly. */
4507         drbd_rs_cancel_all(mdev);
4508         mdev->rs_total = 0;
4509         mdev->rs_failed = 0;
4510         atomic_set(&mdev->rs_pending_cnt, 0);
4511         wake_up(&mdev->misc_wait);
4512
4513         del_timer_sync(&mdev->resync_timer);
4514         resync_timer_fn((unsigned long)mdev);
4515
4516         /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4517          * w_make_resync_request etc. which may still be on the worker queue
4518          * to be "canceled" */
4519         drbd_flush_workqueue(mdev);
4520
4521         drbd_finish_peer_reqs(mdev);
4522
4523         /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4524            might have issued a work again. The one before drbd_finish_peer_reqs() is
4525            necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4526         drbd_flush_workqueue(mdev);
4527
4528         /* need to do it again, drbd_finish_peer_reqs() may have populated it
4529          * again via drbd_try_clear_on_disk_bm(). */
4530         drbd_rs_cancel_all(mdev);
4531
4532         kfree(mdev->p_uuid);
4533         mdev->p_uuid = NULL;
4534
4535         if (!drbd_suspended(mdev))
4536                 tl_clear(mdev->tconn);
4537
4538         drbd_md_sync(mdev);
4539
4540         /* serialize with bitmap writeout triggered by the state change,
4541          * if any. */
4542         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4543
4544         /* tcp_close and release of sendpage pages can be deferred.  I don't
4545          * want to use SO_LINGER, because apparently it can be deferred for
4546          * more than 20 seconds (longest time I checked).
4547          *
4548          * Actually we don't care for exactly when the network stack does its
4549          * put_page(), but release our reference on these pages right here.
4550          */
4551         i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4552         if (i)
4553                 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4554         i = atomic_read(&mdev->pp_in_use_by_net);
4555         if (i)
4556                 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4557         i = atomic_read(&mdev->pp_in_use);
4558         if (i)
4559                 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4560
4561         D_ASSERT(list_empty(&mdev->read_ee));
4562         D_ASSERT(list_empty(&mdev->active_ee));
4563         D_ASSERT(list_empty(&mdev->sync_ee));
4564         D_ASSERT(list_empty(&mdev->done_ee));
4565
4566         return 0;
4567 }
4568
4569 /*
4570  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4571  * we can agree on is stored in agreed_pro_version.
4572  *
4573  * feature flags and the reserved array should be enough room for future
4574  * enhancements of the handshake protocol, and possible plugins...
4575  *
4576  * for now, they are expected to be zero, but ignored.
4577  */
4578 static int drbd_send_features(struct drbd_tconn *tconn)
4579 {
4580         struct drbd_socket *sock;
4581         struct p_connection_features *p;
4582
4583         sock = &tconn->data;
4584         p = conn_prepare_command(tconn, sock);
4585         if (!p)
4586                 return -EIO;
4587         memset(p, 0, sizeof(*p));
4588         p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4589         p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4590         return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4591 }
4592
4593 /*
4594  * return values:
4595  *   1 yes, we have a valid connection
4596  *   0 oops, did not work out, please try again
4597  *  -1 peer talks different language,
4598  *     no point in trying again, please go standalone.
4599  */
4600 static int drbd_do_features(struct drbd_tconn *tconn)
4601 {
4602         /* ASSERT current == tconn->receiver ... */
4603         struct p_connection_features *p;
4604         const int expect = sizeof(struct p_connection_features);
4605         struct packet_info pi;
4606         int err;
4607
4608         err = drbd_send_features(tconn);
4609         if (err)
4610                 return 0;
4611
4612         err = drbd_recv_header(tconn, &pi);
4613         if (err)
4614                 return 0;
4615
4616         if (pi.cmd != P_CONNECTION_FEATURES) {
4617                 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4618                          cmdname(pi.cmd), pi.cmd);
4619                 return -1;
4620         }
4621
4622         if (pi.size != expect) {
4623                 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4624                      expect, pi.size);
4625                 return -1;
4626         }
4627
4628         p = pi.data;
4629         err = drbd_recv_all_warn(tconn, p, expect);
4630         if (err)
4631                 return 0;
4632
4633         p->protocol_min = be32_to_cpu(p->protocol_min);
4634         p->protocol_max = be32_to_cpu(p->protocol_max);
4635         if (p->protocol_max == 0)
4636                 p->protocol_max = p->protocol_min;
4637
4638         if (PRO_VERSION_MAX < p->protocol_min ||
4639             PRO_VERSION_MIN > p->protocol_max)
4640                 goto incompat;
4641
4642         tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4643
4644         conn_info(tconn, "Handshake successful: "
4645              "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4646
4647         return 1;
4648
4649  incompat:
4650         conn_err(tconn, "incompatible DRBD dialects: "
4651             "I support %d-%d, peer supports %d-%d\n",
4652             PRO_VERSION_MIN, PRO_VERSION_MAX,
4653             p->protocol_min, p->protocol_max);
4654         return -1;
4655 }
4656
4657 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4658 static int drbd_do_auth(struct drbd_tconn *tconn)
4659 {
4660         conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4661         conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4662         return -1;
4663 }
4664 #else
4665 #define CHALLENGE_LEN 64
4666
4667 /* Return value:
4668         1 - auth succeeded,
4669         0 - failed, try again (network error),
4670         -1 - auth failed, don't try again.
4671 */
4672
4673 static int drbd_do_auth(struct drbd_tconn *tconn)
4674 {
4675         struct drbd_socket *sock;
4676         char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
4677         struct scatterlist sg;
4678         char *response = NULL;
4679         char *right_response = NULL;
4680         char *peers_ch = NULL;
4681         unsigned int key_len;
4682         char secret[SHARED_SECRET_MAX]; /* 64 byte */
4683         unsigned int resp_size;
4684         struct hash_desc desc;
4685         struct packet_info pi;
4686         struct net_conf *nc;
4687         int err, rv;
4688
4689         /* FIXME: Put the challenge/response into the preallocated socket buffer.  */
4690
4691         rcu_read_lock();
4692         nc = rcu_dereference(tconn->net_conf);
4693         key_len = strlen(nc->shared_secret);
4694         memcpy(secret, nc->shared_secret, key_len);
4695         rcu_read_unlock();
4696
4697         desc.tfm = tconn->cram_hmac_tfm;
4698         desc.flags = 0;
4699
4700         rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4701         if (rv) {
4702                 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4703                 rv = -1;
4704                 goto fail;
4705         }
4706
4707         get_random_bytes(my_challenge, CHALLENGE_LEN);
4708
4709         sock = &tconn->data;
4710         if (!conn_prepare_command(tconn, sock)) {
4711                 rv = 0;
4712                 goto fail;
4713         }
4714         rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4715                                 my_challenge, CHALLENGE_LEN);
4716         if (!rv)
4717                 goto fail;
4718
4719         err = drbd_recv_header(tconn, &pi);
4720         if (err) {
4721                 rv = 0;
4722                 goto fail;
4723         }
4724
4725         if (pi.cmd != P_AUTH_CHALLENGE) {
4726                 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4727                          cmdname(pi.cmd), pi.cmd);
4728                 rv = 0;
4729                 goto fail;
4730         }
4731
4732         if (pi.size > CHALLENGE_LEN * 2) {
4733                 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4734                 rv = -1;
4735                 goto fail;
4736         }
4737
4738         peers_ch = kmalloc(pi.size, GFP_NOIO);
4739         if (peers_ch == NULL) {
4740                 conn_err(tconn, "kmalloc of peers_ch failed\n");
4741                 rv = -1;
4742                 goto fail;
4743         }
4744
4745         err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4746         if (err) {
4747                 rv = 0;
4748                 goto fail;
4749         }
4750
4751         resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4752         response = kmalloc(resp_size, GFP_NOIO);
4753         if (response == NULL) {
4754                 conn_err(tconn, "kmalloc of response failed\n");
4755                 rv = -1;
4756                 goto fail;
4757         }
4758
4759         sg_init_table(&sg, 1);
4760         sg_set_buf(&sg, peers_ch, pi.size);
4761
4762         rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4763         if (rv) {
4764                 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4765                 rv = -1;
4766                 goto fail;
4767         }
4768
4769         if (!conn_prepare_command(tconn, sock)) {
4770                 rv = 0;
4771                 goto fail;
4772         }
4773         rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4774                                 response, resp_size);
4775         if (!rv)
4776                 goto fail;
4777
4778         err = drbd_recv_header(tconn, &pi);
4779         if (err) {
4780                 rv = 0;
4781                 goto fail;
4782         }
4783
4784         if (pi.cmd != P_AUTH_RESPONSE) {
4785                 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4786                          cmdname(pi.cmd), pi.cmd);
4787                 rv = 0;
4788                 goto fail;
4789         }
4790
4791         if (pi.size != resp_size) {
4792                 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4793                 rv = 0;
4794                 goto fail;
4795         }
4796
4797         err = drbd_recv_all_warn(tconn, response , resp_size);
4798         if (err) {
4799                 rv = 0;
4800                 goto fail;
4801         }
4802
4803         right_response = kmalloc(resp_size, GFP_NOIO);
4804         if (right_response == NULL) {
4805                 conn_err(tconn, "kmalloc of right_response failed\n");
4806                 rv = -1;
4807                 goto fail;
4808         }
4809
4810         sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4811
4812         rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4813         if (rv) {
4814                 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4815                 rv = -1;
4816                 goto fail;
4817         }
4818
4819         rv = !memcmp(response, right_response, resp_size);
4820
4821         if (rv)
4822                 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4823                      resp_size);
4824         else
4825                 rv = -1;
4826
4827  fail:
4828         kfree(peers_ch);
4829         kfree(response);
4830         kfree(right_response);
4831
4832         return rv;
4833 }
4834 #endif
4835
4836 int drbdd_init(struct drbd_thread *thi)
4837 {
4838         struct drbd_tconn *tconn = thi->tconn;
4839         int h;
4840
4841         conn_info(tconn, "receiver (re)started\n");
4842
4843         do {
4844                 h = conn_connect(tconn);
4845                 if (h == 0) {
4846                         conn_disconnect(tconn);
4847                         schedule_timeout_interruptible(HZ);
4848                 }
4849                 if (h == -1) {
4850                         conn_warn(tconn, "Discarding network configuration.\n");
4851                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4852                 }
4853         } while (h == 0);
4854
4855         if (h > 0)
4856                 drbdd(tconn);
4857
4858         conn_disconnect(tconn);
4859
4860         conn_info(tconn, "receiver terminated\n");
4861         return 0;
4862 }
4863
4864 /* ********* acknowledge sender ******** */
4865
4866 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4867 {
4868         struct p_req_state_reply *p = pi->data;
4869         int retcode = be32_to_cpu(p->retcode);
4870
4871         if (retcode >= SS_SUCCESS) {
4872                 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4873         } else {
4874                 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4875                 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4876                          drbd_set_st_err_str(retcode), retcode);
4877         }
4878         wake_up(&tconn->ping_wait);
4879
4880         return 0;
4881 }
4882
4883 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4884 {
4885         struct drbd_conf *mdev;
4886         struct p_req_state_reply *p = pi->data;
4887         int retcode = be32_to_cpu(p->retcode);
4888
4889         mdev = vnr_to_mdev(tconn, pi->vnr);
4890         if (!mdev)
4891                 return -EIO;
4892
4893         if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4894                 D_ASSERT(tconn->agreed_pro_version < 100);
4895                 return got_conn_RqSReply(tconn, pi);
4896         }
4897
4898         if (retcode >= SS_SUCCESS) {
4899                 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4900         } else {
4901                 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4902                 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4903                         drbd_set_st_err_str(retcode), retcode);
4904         }
4905         wake_up(&mdev->state_wait);
4906
4907         return 0;
4908 }
4909
4910 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4911 {
4912         return drbd_send_ping_ack(tconn);
4913
4914 }
4915
4916 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4917 {
4918         /* restore idle timeout */
4919         tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4920         if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4921                 wake_up(&tconn->ping_wait);
4922
4923         return 0;
4924 }
4925
4926 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4927 {
4928         struct drbd_conf *mdev;
4929         struct p_block_ack *p = pi->data;
4930         sector_t sector = be64_to_cpu(p->sector);
4931         int blksize = be32_to_cpu(p->blksize);
4932
4933         mdev = vnr_to_mdev(tconn, pi->vnr);
4934         if (!mdev)
4935                 return -EIO;
4936
4937         D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4938
4939         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4940
4941         if (get_ldev(mdev)) {
4942                 drbd_rs_complete_io(mdev, sector);
4943                 drbd_set_in_sync(mdev, sector, blksize);
4944                 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4945                 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4946                 put_ldev(mdev);
4947         }
4948         dec_rs_pending(mdev);
4949         atomic_add(blksize >> 9, &mdev->rs_sect_in);
4950
4951         return 0;
4952 }
4953
4954 static int
4955 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4956                               struct rb_root *root, const char *func,
4957                               enum drbd_req_event what, bool missing_ok)
4958 {
4959         struct drbd_request *req;
4960         struct bio_and_error m;
4961
4962         spin_lock_irq(&mdev->tconn->req_lock);
4963         req = find_request(mdev, root, id, sector, missing_ok, func);
4964         if (unlikely(!req)) {
4965                 spin_unlock_irq(&mdev->tconn->req_lock);
4966                 return -EIO;
4967         }
4968         __req_mod(req, what, &m);
4969         spin_unlock_irq(&mdev->tconn->req_lock);
4970
4971         if (m.bio)
4972                 complete_master_bio(mdev, &m);
4973         return 0;
4974 }
4975
4976 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4977 {
4978         struct drbd_conf *mdev;
4979         struct p_block_ack *p = pi->data;
4980         sector_t sector = be64_to_cpu(p->sector);
4981         int blksize = be32_to_cpu(p->blksize);
4982         enum drbd_req_event what;
4983
4984         mdev = vnr_to_mdev(tconn, pi->vnr);
4985         if (!mdev)
4986                 return -EIO;
4987
4988         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4989
4990         if (p->block_id == ID_SYNCER) {
4991                 drbd_set_in_sync(mdev, sector, blksize);
4992                 dec_rs_pending(mdev);
4993                 return 0;
4994         }
4995         switch (pi->cmd) {
4996         case P_RS_WRITE_ACK:
4997                 what = WRITE_ACKED_BY_PEER_AND_SIS;
4998                 break;
4999         case P_WRITE_ACK:
5000                 what = WRITE_ACKED_BY_PEER;
5001                 break;
5002         case P_RECV_ACK:
5003                 what = RECV_ACKED_BY_PEER;
5004                 break;
5005         case P_SUPERSEDED:
5006                 what = CONFLICT_RESOLVED;
5007                 break;
5008         case P_RETRY_WRITE:
5009                 what = POSTPONE_WRITE;
5010                 break;
5011         default:
5012                 BUG();
5013         }
5014
5015         return validate_req_change_req_state(mdev, p->block_id, sector,
5016                                              &mdev->write_requests, __func__,
5017                                              what, false);
5018 }
5019
5020 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5021 {
5022         struct drbd_conf *mdev;
5023         struct p_block_ack *p = pi->data;
5024         sector_t sector = be64_to_cpu(p->sector);
5025         int size = be32_to_cpu(p->blksize);
5026         int err;
5027
5028         mdev = vnr_to_mdev(tconn, pi->vnr);
5029         if (!mdev)
5030                 return -EIO;
5031
5032         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5033
5034         if (p->block_id == ID_SYNCER) {
5035                 dec_rs_pending(mdev);
5036                 drbd_rs_failed_io(mdev, sector, size);
5037                 return 0;
5038         }
5039
5040         err = validate_req_change_req_state(mdev, p->block_id, sector,
5041                                             &mdev->write_requests, __func__,
5042                                             NEG_ACKED, true);
5043         if (err) {
5044                 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5045                    The master bio might already be completed, therefore the
5046                    request is no longer in the collision hash. */
5047                 /* In Protocol B we might already have got a P_RECV_ACK
5048                    but then get a P_NEG_ACK afterwards. */
5049                 drbd_set_out_of_sync(mdev, sector, size);
5050         }
5051         return 0;
5052 }
5053
5054 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5055 {
5056         struct drbd_conf *mdev;
5057         struct p_block_ack *p = pi->data;
5058         sector_t sector = be64_to_cpu(p->sector);
5059
5060         mdev = vnr_to_mdev(tconn, pi->vnr);
5061         if (!mdev)
5062                 return -EIO;
5063
5064         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5065
5066         dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5067             (unsigned long long)sector, be32_to_cpu(p->blksize));
5068
5069         return validate_req_change_req_state(mdev, p->block_id, sector,
5070                                              &mdev->read_requests, __func__,
5071                                              NEG_ACKED, false);
5072 }
5073
5074 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5075 {
5076         struct drbd_conf *mdev;
5077         sector_t sector;
5078         int size;
5079         struct p_block_ack *p = pi->data;
5080
5081         mdev = vnr_to_mdev(tconn, pi->vnr);
5082         if (!mdev)
5083                 return -EIO;
5084
5085         sector = be64_to_cpu(p->sector);
5086         size = be32_to_cpu(p->blksize);
5087
5088         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5089
5090         dec_rs_pending(mdev);
5091
5092         if (get_ldev_if_state(mdev, D_FAILED)) {
5093                 drbd_rs_complete_io(mdev, sector);
5094                 switch (pi->cmd) {
5095                 case P_NEG_RS_DREPLY:
5096                         drbd_rs_failed_io(mdev, sector, size);
5097                 case P_RS_CANCEL:
5098                         break;
5099                 default:
5100                         BUG();
5101                 }
5102                 put_ldev(mdev);
5103         }
5104
5105         return 0;
5106 }
5107
5108 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5109 {
5110         struct p_barrier_ack *p = pi->data;
5111         struct drbd_conf *mdev;
5112         int vnr;
5113
5114         tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
5115
5116         rcu_read_lock();
5117         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5118                 if (mdev->state.conn == C_AHEAD &&
5119                     atomic_read(&mdev->ap_in_flight) == 0 &&
5120                     !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5121                         mdev->start_resync_timer.expires = jiffies + HZ;
5122                         add_timer(&mdev->start_resync_timer);
5123                 }
5124         }
5125         rcu_read_unlock();
5126
5127         return 0;
5128 }
5129
5130 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5131 {
5132         struct drbd_conf *mdev;
5133         struct p_block_ack *p = pi->data;
5134         struct drbd_work *w;
5135         sector_t sector;
5136         int size;
5137
5138         mdev = vnr_to_mdev(tconn, pi->vnr);
5139         if (!mdev)
5140                 return -EIO;
5141
5142         sector = be64_to_cpu(p->sector);
5143         size = be32_to_cpu(p->blksize);
5144
5145         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5146
5147         if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5148                 drbd_ov_out_of_sync_found(mdev, sector, size);
5149         else
5150                 ov_out_of_sync_print(mdev);
5151
5152         if (!get_ldev(mdev))
5153                 return 0;
5154
5155         drbd_rs_complete_io(mdev, sector);
5156         dec_rs_pending(mdev);
5157
5158         --mdev->ov_left;
5159
5160         /* let's advance progress step marks only for every other megabyte */
5161         if ((mdev->ov_left & 0x200) == 0x200)
5162                 drbd_advance_rs_marks(mdev, mdev->ov_left);
5163
5164         if (mdev->ov_left == 0) {
5165                 w = kmalloc(sizeof(*w), GFP_NOIO);
5166                 if (w) {
5167                         w->cb = w_ov_finished;
5168                         w->mdev = mdev;
5169                         drbd_queue_work(&mdev->tconn->sender_work, w);
5170                 } else {
5171                         dev_err(DEV, "kmalloc(w) failed.");
5172                         ov_out_of_sync_print(mdev);
5173                         drbd_resync_finished(mdev);
5174                 }
5175         }
5176         put_ldev(mdev);
5177         return 0;
5178 }
5179
5180 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5181 {
5182         return 0;
5183 }
5184
5185 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5186 {
5187         struct drbd_conf *mdev;
5188         int vnr, not_empty = 0;
5189
5190         do {
5191                 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5192                 flush_signals(current);
5193
5194                 rcu_read_lock();
5195                 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5196                         kref_get(&mdev->kref);
5197                         rcu_read_unlock();
5198                         if (drbd_finish_peer_reqs(mdev)) {
5199                                 kref_put(&mdev->kref, &drbd_minor_destroy);
5200                                 return 1;
5201                         }
5202                         kref_put(&mdev->kref, &drbd_minor_destroy);
5203                         rcu_read_lock();
5204                 }
5205                 set_bit(SIGNAL_ASENDER, &tconn->flags);
5206
5207                 spin_lock_irq(&tconn->req_lock);
5208                 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5209                         not_empty = !list_empty(&mdev->done_ee);
5210                         if (not_empty)
5211                                 break;
5212                 }
5213                 spin_unlock_irq(&tconn->req_lock);
5214                 rcu_read_unlock();
5215         } while (not_empty);
5216
5217         return 0;
5218 }
5219
5220 struct asender_cmd {
5221         size_t pkt_size;
5222         int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5223 };
5224
5225 static struct asender_cmd asender_tbl[] = {
5226         [P_PING]            = { 0, got_Ping },
5227         [P_PING_ACK]        = { 0, got_PingAck },
5228         [P_RECV_ACK]        = { sizeof(struct p_block_ack), got_BlockAck },
5229         [P_WRITE_ACK]       = { sizeof(struct p_block_ack), got_BlockAck },
5230         [P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5231         [P_SUPERSEDED]   = { sizeof(struct p_block_ack), got_BlockAck },
5232         [P_NEG_ACK]         = { sizeof(struct p_block_ack), got_NegAck },
5233         [P_NEG_DREPLY]      = { sizeof(struct p_block_ack), got_NegDReply },
5234         [P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
5235         [P_OV_RESULT]       = { sizeof(struct p_block_ack), got_OVResult },
5236         [P_BARRIER_ACK]     = { sizeof(struct p_barrier_ack), got_BarrierAck },
5237         [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5238         [P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5239         [P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5240         [P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
5241         [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5242         [P_RETRY_WRITE]     = { sizeof(struct p_block_ack), got_BlockAck },
5243 };
5244
5245 int drbd_asender(struct drbd_thread *thi)
5246 {
5247         struct drbd_tconn *tconn = thi->tconn;
5248         struct asender_cmd *cmd = NULL;
5249         struct packet_info pi;
5250         int rv;
5251         void *buf    = tconn->meta.rbuf;
5252         int received = 0;
5253         unsigned int header_size = drbd_header_size(tconn);
5254         int expect   = header_size;
5255         bool ping_timeout_active = false;
5256         struct net_conf *nc;
5257         int ping_timeo, tcp_cork, ping_int;
5258         struct sched_param param = { .sched_priority = 2 };
5259
5260         rv = sched_setscheduler(current, SCHED_RR, &param);
5261         if (rv < 0)
5262                 conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
5263
5264         while (get_t_state(thi) == RUNNING) {
5265                 drbd_thread_current_set_cpu(thi);
5266
5267                 rcu_read_lock();
5268                 nc = rcu_dereference(tconn->net_conf);
5269                 ping_timeo = nc->ping_timeo;
5270                 tcp_cork = nc->tcp_cork;
5271                 ping_int = nc->ping_int;
5272                 rcu_read_unlock();
5273
5274                 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5275                         if (drbd_send_ping(tconn)) {
5276                                 conn_err(tconn, "drbd_send_ping has failed\n");
5277                                 goto reconnect;
5278                         }
5279                         tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5280                         ping_timeout_active = true;
5281                 }
5282
5283                 /* TODO: conditionally cork; it may hurt latency if we cork without
5284                    much to send */
5285                 if (tcp_cork)
5286                         drbd_tcp_cork(tconn->meta.socket);
5287                 if (tconn_finish_peer_reqs(tconn)) {
5288                         conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5289                         goto reconnect;
5290                 }
5291                 /* but unconditionally uncork unless disabled */
5292                 if (tcp_cork)
5293                         drbd_tcp_uncork(tconn->meta.socket);
5294
5295                 /* short circuit, recv_msg would return EINTR anyways. */
5296                 if (signal_pending(current))
5297                         continue;
5298
5299                 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5300                 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5301
5302                 flush_signals(current);
5303
5304                 /* Note:
5305                  * -EINTR        (on meta) we got a signal
5306                  * -EAGAIN       (on meta) rcvtimeo expired
5307                  * -ECONNRESET   other side closed the connection
5308                  * -ERESTARTSYS  (on data) we got a signal
5309                  * rv <  0       other than above: unexpected error!
5310                  * rv == expected: full header or command
5311                  * rv <  expected: "woken" by signal during receive
5312                  * rv == 0       : "connection shut down by peer"
5313                  */
5314                 if (likely(rv > 0)) {
5315                         received += rv;
5316                         buf      += rv;
5317                 } else if (rv == 0) {
5318                         if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5319                                 long t;
5320                                 rcu_read_lock();
5321                                 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5322                                 rcu_read_unlock();
5323
5324                                 t = wait_event_timeout(tconn->ping_wait,
5325                                                        tconn->cstate < C_WF_REPORT_PARAMS,
5326                                                        t);
5327                                 if (t)
5328                                         break;
5329                         }
5330                         conn_err(tconn, "meta connection shut down by peer.\n");
5331                         goto reconnect;
5332                 } else if (rv == -EAGAIN) {
5333                         /* If the data socket received something meanwhile,
5334                          * that is good enough: peer is still alive. */
5335                         if (time_after(tconn->last_received,
5336                                 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5337                                 continue;
5338                         if (ping_timeout_active) {
5339                                 conn_err(tconn, "PingAck did not arrive in time.\n");
5340                                 goto reconnect;
5341                         }
5342                         set_bit(SEND_PING, &tconn->flags);
5343                         continue;
5344                 } else if (rv == -EINTR) {
5345                         continue;
5346                 } else {
5347                         conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5348                         goto reconnect;
5349                 }
5350
5351                 if (received == expect && cmd == NULL) {
5352                         if (decode_header(tconn, tconn->meta.rbuf, &pi))
5353                                 goto reconnect;
5354                         cmd = &asender_tbl[pi.cmd];
5355                         if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5356                                 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5357                                          cmdname(pi.cmd), pi.cmd);
5358                                 goto disconnect;
5359                         }
5360                         expect = header_size + cmd->pkt_size;
5361                         if (pi.size != expect - header_size) {
5362                                 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5363                                         pi.cmd, pi.size);
5364                                 goto reconnect;
5365                         }
5366                 }
5367                 if (received == expect) {
5368                         bool err;
5369
5370                         err = cmd->fn(tconn, &pi);
5371                         if (err) {
5372                                 conn_err(tconn, "%pf failed\n", cmd->fn);
5373                                 goto reconnect;
5374                         }
5375
5376                         tconn->last_received = jiffies;
5377
5378                         if (cmd == &asender_tbl[P_PING_ACK]) {
5379                                 /* restore idle timeout */
5380                                 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5381                                 ping_timeout_active = false;
5382                         }
5383
5384                         buf      = tconn->meta.rbuf;
5385                         received = 0;
5386                         expect   = header_size;
5387                         cmd      = NULL;
5388                 }
5389         }
5390
5391         if (0) {
5392 reconnect:
5393                 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5394                 conn_md_sync(tconn);
5395         }
5396         if (0) {
5397 disconnect:
5398                 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5399         }
5400         clear_bit(SIGNAL_ASENDER, &tconn->flags);
5401
5402         conn_info(tconn, "asender terminated\n");
5403
5404         return 0;
5405 }