]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/staging/lustre/lnet/lnet/router.c
Merge remote-tracking branch 'usb-chipidea-next/ci-for-usb-next'
[karo-tx-linux.git] / drivers / staging / lustre / lnet / lnet / router.c
1 /*
2  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3  *
4  * Copyright (c) 2011, 2012, Intel Corporation.
5  *
6  *   This file is part of Portals
7  *   http://sourceforge.net/projects/sandiaportals/
8  *
9  *   Portals is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Portals is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Portals; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include "../../include/linux/lnet/lib-lnet.h"
26
27 #define LNET_NRB_TINY_MIN       512     /* min value for each CPT */
28 #define LNET_NRB_TINY           (LNET_NRB_TINY_MIN * 4)
29 #define LNET_NRB_SMALL_MIN      4096    /* min value for each CPT */
30 #define LNET_NRB_SMALL          (LNET_NRB_SMALL_MIN * 4)
31 #define LNET_NRB_LARGE_MIN      256     /* min value for each CPT */
32 #define LNET_NRB_LARGE          (LNET_NRB_LARGE_MIN * 4)
33
34 static char *forwarding = "";
35 module_param(forwarding, charp, 0444);
36 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
37
38 static int tiny_router_buffers;
39 module_param(tiny_router_buffers, int, 0444);
40 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
41 static int small_router_buffers;
42 module_param(small_router_buffers, int, 0444);
43 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
44 static int large_router_buffers;
45 module_param(large_router_buffers, int, 0444);
46 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
47 static int peer_buffer_credits;
48 module_param(peer_buffer_credits, int, 0444);
49 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
50
51 static int auto_down = 1;
52 module_param(auto_down, int, 0444);
53 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
54
55 int
56 lnet_peer_buffer_credits(lnet_ni_t *ni)
57 {
58         /* NI option overrides LNet default */
59         if (ni->ni_peerrtrcredits > 0)
60                 return ni->ni_peerrtrcredits;
61         if (peer_buffer_credits > 0)
62                 return peer_buffer_credits;
63
64         /* As an approximation, allow this peer the same number of router
65          * buffers as it is allowed outstanding sends */
66         return ni->ni_peertxcredits;
67 }
68
69 /* forward ref's */
70 static int lnet_router_checker(void *);
71
72 static int check_routers_before_use;
73 module_param(check_routers_before_use, int, 0444);
74 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
75
76 int avoid_asym_router_failure = 1;
77 module_param(avoid_asym_router_failure, int, 0644);
78 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
79
80 static int dead_router_check_interval = 60;
81 module_param(dead_router_check_interval, int, 0644);
82 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
83
84 static int live_router_check_interval = 60;
85 module_param(live_router_check_interval, int, 0644);
86 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
87
88 static int router_ping_timeout = 50;
89 module_param(router_ping_timeout, int, 0644);
90 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
91
92 int
93 lnet_peers_start_down(void)
94 {
95         return check_routers_before_use;
96 }
97
98 void
99 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
100                    unsigned long when)
101 {
102         if (time_before(when, lp->lp_timestamp)) { /* out of date information */
103                 CDEBUG(D_NET, "Out of date\n");
104                 return;
105         }
106
107         lp->lp_timestamp = when;                /* update timestamp */
108         lp->lp_ping_deadline = 0;              /* disable ping timeout */
109
110         if (lp->lp_alive_count != 0 &&    /* got old news */
111             (!lp->lp_alive) == (!alive)) {      /* new date for old news */
112                 CDEBUG(D_NET, "Old news\n");
113                 return;
114         }
115
116         /* Flag that notification is outstanding */
117
118         lp->lp_alive_count++;
119         lp->lp_alive = !(!alive);              /* 1 bit! */
120         lp->lp_notify = 1;
121         lp->lp_notifylnd |= notifylnd;
122         if (lp->lp_alive)
123                 lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
124
125         CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
126 }
127
128 static void
129 lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
130 {
131         int alive;
132         int notifylnd;
133
134         /* Notify only in 1 thread at any time to ensure ordered notification.
135          * NB individual events can be missed; the only guarantee is that you
136          * always get the most recent news */
137
138         if (lp->lp_notifying || ni == NULL)
139                 return;
140
141         lp->lp_notifying = 1;
142
143         while (lp->lp_notify) {
144                 alive = lp->lp_alive;
145                 notifylnd = lp->lp_notifylnd;
146
147                 lp->lp_notifylnd = 0;
148                 lp->lp_notify    = 0;
149
150                 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
151                         lnet_net_unlock(lp->lp_cpt);
152
153                         /* A new notification could happen now; I'll handle it
154                          * when control returns to me */
155
156                         (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
157
158                         lnet_net_lock(lp->lp_cpt);
159                 }
160         }
161
162         lp->lp_notifying = 0;
163 }
164
165
166 static void
167 lnet_rtr_addref_locked(lnet_peer_t *lp)
168 {
169         LASSERT(lp->lp_refcount > 0);
170         LASSERT(lp->lp_rtr_refcount >= 0);
171
172         /* lnet_net_lock must be exclusively locked */
173         lp->lp_rtr_refcount++;
174         if (lp->lp_rtr_refcount == 1) {
175                 struct list_head *pos;
176
177                 /* a simple insertion sort */
178                 list_for_each_prev(pos, &the_lnet.ln_routers) {
179                         lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
180                                                           lp_rtr_list);
181
182                         if (rtr->lp_nid < lp->lp_nid)
183                                 break;
184                 }
185
186                 list_add(&lp->lp_rtr_list, pos);
187                 /* addref for the_lnet.ln_routers */
188                 lnet_peer_addref_locked(lp);
189                 the_lnet.ln_routers_version++;
190         }
191 }
192
193 static void
194 lnet_rtr_decref_locked(lnet_peer_t *lp)
195 {
196         LASSERT(lp->lp_refcount > 0);
197         LASSERT(lp->lp_rtr_refcount > 0);
198
199         /* lnet_net_lock must be exclusively locked */
200         lp->lp_rtr_refcount--;
201         if (lp->lp_rtr_refcount == 0) {
202                 LASSERT(list_empty(&lp->lp_routes));
203
204                 if (lp->lp_rcd != NULL) {
205                         list_add(&lp->lp_rcd->rcd_list,
206                                      &the_lnet.ln_rcd_deathrow);
207                         lp->lp_rcd = NULL;
208                 }
209
210                 list_del(&lp->lp_rtr_list);
211                 /* decref for the_lnet.ln_routers */
212                 lnet_peer_decref_locked(lp);
213                 the_lnet.ln_routers_version++;
214         }
215 }
216
217 lnet_remotenet_t *
218 lnet_find_net_locked(__u32 net)
219 {
220         lnet_remotenet_t *rnet;
221         struct list_head *tmp;
222         struct list_head *rn_list;
223
224         LASSERT(!the_lnet.ln_shutdown);
225
226         rn_list = lnet_net2rnethash(net);
227         list_for_each(tmp, rn_list) {
228                 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
229
230                 if (rnet->lrn_net == net)
231                         return rnet;
232         }
233         return NULL;
234 }
235
236 static void lnet_shuffle_seed(void)
237 {
238         static int seeded;
239         int lnd_type, seed[2];
240         struct timeval tv;
241         lnet_ni_t *ni;
242         struct list_head *tmp;
243
244         if (seeded)
245                 return;
246
247         cfs_get_random_bytes(seed, sizeof(seed));
248
249         /* Nodes with small feet have little entropy
250          * the NID for this node gives the most entropy in the low bits */
251         list_for_each(tmp, &the_lnet.ln_nis) {
252                 ni = list_entry(tmp, lnet_ni_t, ni_list);
253                 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
254
255                 if (lnd_type != LOLND)
256                         seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
257         }
258
259         do_gettimeofday(&tv);
260         cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
261         seeded = 1;
262 }
263
264 /* NB expects LNET_LOCK held */
265 static void
266 lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
267 {
268         unsigned int len = 0;
269         unsigned int offset = 0;
270         struct list_head *e;
271
272         lnet_shuffle_seed();
273
274         list_for_each(e, &rnet->lrn_routes) {
275                 len++;
276         }
277
278         /* len+1 positions to add a new entry, also prevents division by 0 */
279         offset = cfs_rand() % (len + 1);
280         list_for_each(e, &rnet->lrn_routes) {
281                 if (offset == 0)
282                         break;
283                 offset--;
284         }
285         list_add(&route->lr_list, e);
286         list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
287
288         the_lnet.ln_remote_nets_version++;
289         lnet_rtr_addref_locked(route->lr_gateway);
290 }
291
292 int
293 lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
294                unsigned int priority)
295 {
296         struct list_head *e;
297         lnet_remotenet_t *rnet;
298         lnet_remotenet_t *rnet2;
299         lnet_route_t *route;
300         lnet_ni_t *ni;
301         int add_route;
302         int rc;
303
304         CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
305                libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
306
307         if (gateway == LNET_NID_ANY ||
308             LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
309             net == LNET_NIDNET(LNET_NID_ANY) ||
310             LNET_NETTYP(net) == LOLND ||
311             LNET_NIDNET(gateway) == net ||
312             hops < 1 || hops > 255)
313                 return -EINVAL;
314
315         if (lnet_islocalnet(net))              /* it's a local network */
316                 return 0;                      /* ignore the route entry */
317
318         /* Assume net, route, all new */
319         LIBCFS_ALLOC(route, sizeof(*route));
320         LIBCFS_ALLOC(rnet, sizeof(*rnet));
321         if (route == NULL || rnet == NULL) {
322                 CERROR("Out of memory creating route %s %d %s\n",
323                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
324                 if (route != NULL)
325                         LIBCFS_FREE(route, sizeof(*route));
326                 if (rnet != NULL)
327                         LIBCFS_FREE(rnet, sizeof(*rnet));
328                 return -ENOMEM;
329         }
330
331         INIT_LIST_HEAD(&rnet->lrn_routes);
332         rnet->lrn_net = net;
333         route->lr_hops = hops;
334         route->lr_net = net;
335         route->lr_priority = priority;
336
337         lnet_net_lock(LNET_LOCK_EX);
338
339         rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
340         if (rc != 0) {
341                 lnet_net_unlock(LNET_LOCK_EX);
342
343                 LIBCFS_FREE(route, sizeof(*route));
344                 LIBCFS_FREE(rnet, sizeof(*rnet));
345
346                 if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
347                         return 0;       /* ignore the route entry */
348                 CERROR("Error %d creating route %s %d %s\n", rc,
349                        libcfs_net2str(net), hops,
350                        libcfs_nid2str(gateway));
351
352                 return rc;
353         }
354
355         LASSERT(!the_lnet.ln_shutdown);
356
357         rnet2 = lnet_find_net_locked(net);
358         if (rnet2 == NULL) {
359                 /* new network */
360                 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
361                 rnet2 = rnet;
362         }
363
364         /* Search for a duplicate route (it's a NOOP if it is) */
365         add_route = 1;
366         list_for_each(e, &rnet2->lrn_routes) {
367                 lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
368
369                 if (route2->lr_gateway == route->lr_gateway) {
370                         add_route = 0;
371                         break;
372                 }
373
374                 /* our lookups must be true */
375                 LASSERT(route2->lr_gateway->lp_nid != gateway);
376         }
377
378         if (add_route) {
379                 lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
380                 lnet_add_route_to_rnet(rnet2, route);
381
382                 ni = route->lr_gateway->lp_ni;
383                 lnet_net_unlock(LNET_LOCK_EX);
384
385                 /* XXX Assume alive */
386                 if (ni->ni_lnd->lnd_notify != NULL)
387                         (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
388
389                 lnet_net_lock(LNET_LOCK_EX);
390         }
391
392         /* -1 for notify or !add_route */
393         lnet_peer_decref_locked(route->lr_gateway);
394         lnet_net_unlock(LNET_LOCK_EX);
395
396         if (!add_route)
397                 LIBCFS_FREE(route, sizeof(*route));
398
399         if (rnet != rnet2)
400                 LIBCFS_FREE(rnet, sizeof(*rnet));
401
402         return 0;
403 }
404
405 int
406 lnet_check_routes(void)
407 {
408         lnet_remotenet_t *rnet;
409         lnet_route_t *route;
410         lnet_route_t *route2;
411         struct list_head *e1;
412         struct list_head *e2;
413         int cpt;
414         struct list_head *rn_list;
415         int i;
416
417         cpt = lnet_net_lock_current();
418
419         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
420                 rn_list = &the_lnet.ln_remote_nets_hash[i];
421                 list_for_each(e1, rn_list) {
422                         rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
423
424                         route2 = NULL;
425                         list_for_each(e2, &rnet->lrn_routes) {
426                                 lnet_nid_t nid1;
427                                 lnet_nid_t nid2;
428                                 int net;
429
430                                 route = list_entry(e2, lnet_route_t,
431                                                        lr_list);
432
433                                 if (route2 == NULL) {
434                                         route2 = route;
435                                         continue;
436                                 }
437
438                                 if (route->lr_gateway->lp_ni ==
439                                     route2->lr_gateway->lp_ni)
440                                         continue;
441
442                                 nid1 = route->lr_gateway->lp_nid;
443                                 nid2 = route2->lr_gateway->lp_nid;
444                                 net = rnet->lrn_net;
445
446                                 lnet_net_unlock(cpt);
447
448                                 CERROR("Routes to %s via %s and %s not supported\n",
449                                        libcfs_net2str(net),
450                                        libcfs_nid2str(nid1),
451                                        libcfs_nid2str(nid2));
452                                 return -EINVAL;
453                         }
454                 }
455         }
456
457         lnet_net_unlock(cpt);
458         return 0;
459 }
460
461 int
462 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
463 {
464         struct lnet_peer *gateway;
465         lnet_remotenet_t *rnet;
466         lnet_route_t *route;
467         struct list_head *e1;
468         struct list_head *e2;
469         int rc = -ENOENT;
470         struct list_head *rn_list;
471         int idx = 0;
472
473         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
474                libcfs_net2str(net), libcfs_nid2str(gw_nid));
475
476         /* NB Caller may specify either all routes via the given gateway
477          * or a specific route entry actual NIDs) */
478
479         lnet_net_lock(LNET_LOCK_EX);
480         if (net == LNET_NIDNET(LNET_NID_ANY))
481                 rn_list = &the_lnet.ln_remote_nets_hash[0];
482         else
483                 rn_list = lnet_net2rnethash(net);
484
485  again:
486         list_for_each(e1, rn_list) {
487                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
488
489                 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
490                         net == rnet->lrn_net))
491                         continue;
492
493                 list_for_each(e2, &rnet->lrn_routes) {
494                         route = list_entry(e2, lnet_route_t, lr_list);
495
496                         gateway = route->lr_gateway;
497                         if (!(gw_nid == LNET_NID_ANY ||
498                               gw_nid == gateway->lp_nid))
499                                 continue;
500
501                         list_del(&route->lr_list);
502                         list_del(&route->lr_gwlist);
503                         the_lnet.ln_remote_nets_version++;
504
505                         if (list_empty(&rnet->lrn_routes))
506                                 list_del(&rnet->lrn_list);
507                         else
508                                 rnet = NULL;
509
510                         lnet_rtr_decref_locked(gateway);
511                         lnet_peer_decref_locked(gateway);
512
513                         lnet_net_unlock(LNET_LOCK_EX);
514
515                         LIBCFS_FREE(route, sizeof(*route));
516
517                         if (rnet != NULL)
518                                 LIBCFS_FREE(rnet, sizeof(*rnet));
519
520                         rc = 0;
521                         lnet_net_lock(LNET_LOCK_EX);
522                         goto again;
523                 }
524         }
525
526         if (net == LNET_NIDNET(LNET_NID_ANY) &&
527             ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
528                 rn_list = &the_lnet.ln_remote_nets_hash[idx];
529                 goto again;
530         }
531         lnet_net_unlock(LNET_LOCK_EX);
532
533         return rc;
534 }
535
536 void
537 lnet_destroy_routes(void)
538 {
539         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
540 }
541
542 int
543 lnet_get_route(int idx, __u32 *net, __u32 *hops,
544                lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
545 {
546         struct list_head *e1;
547         struct list_head *e2;
548         lnet_remotenet_t *rnet;
549         lnet_route_t *route;
550         int cpt;
551         int i;
552         struct list_head *rn_list;
553
554         cpt = lnet_net_lock_current();
555
556         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
557                 rn_list = &the_lnet.ln_remote_nets_hash[i];
558                 list_for_each(e1, rn_list) {
559                         rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
560
561                         list_for_each(e2, &rnet->lrn_routes) {
562                                 route = list_entry(e2, lnet_route_t,
563                                                        lr_list);
564
565                                 if (idx-- == 0) {
566                                         *net      = rnet->lrn_net;
567                                         *hops     = route->lr_hops;
568                                         *priority = route->lr_priority;
569                                         *gateway  = route->lr_gateway->lp_nid;
570                                         *alive    = route->lr_gateway->lp_alive;
571                                         lnet_net_unlock(cpt);
572                                         return 0;
573                                 }
574                         }
575                 }
576         }
577
578         lnet_net_unlock(cpt);
579         return -ENOENT;
580 }
581
582 void
583 lnet_swap_pinginfo(lnet_ping_info_t *info)
584 {
585         int i;
586         lnet_ni_status_t *stat;
587
588         __swab32s(&info->pi_magic);
589         __swab32s(&info->pi_features);
590         __swab32s(&info->pi_pid);
591         __swab32s(&info->pi_nnis);
592         for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
593                 stat = &info->pi_ni[i];
594                 __swab64s(&stat->ns_nid);
595                 __swab32s(&stat->ns_status);
596         }
597 }
598
599 /**
600  * parse router-checker pinginfo, record number of down NIs for remote
601  * networks on that router.
602  */
603 static void
604 lnet_parse_rc_info(lnet_rc_data_t *rcd)
605 {
606         lnet_ping_info_t *info = rcd->rcd_pinginfo;
607         struct lnet_peer *gw = rcd->rcd_gateway;
608         lnet_route_t *rtr;
609
610         if (!gw->lp_alive)
611                 return;
612
613         if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
614                 lnet_swap_pinginfo(info);
615
616         /* NB always racing with network! */
617         if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
618                 CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
619                        libcfs_nid2str(gw->lp_nid), info->pi_magic);
620                 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
621                 return;
622         }
623
624         gw->lp_ping_feats = info->pi_features;
625         if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
626                 CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
627                        libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
628                 return; /* nothing I can understand */
629         }
630
631         if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
632                 return; /* can't carry NI status info */
633
634         list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
635                 int ptl_status = LNET_NI_STATUS_INVALID;
636                 int down = 0;
637                 int up = 0;
638                 int i;
639
640                 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
641                         lnet_ni_status_t *stat = &info->pi_ni[i];
642                         lnet_nid_t nid = stat->ns_nid;
643
644                         if (nid == LNET_NID_ANY) {
645                                 CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
646                                        libcfs_nid2str(gw->lp_nid));
647                                 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
648                                 return;
649                         }
650
651                         if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
652                                 continue;
653
654                         if (stat->ns_status == LNET_NI_STATUS_DOWN) {
655                                 if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
656                                         down++;
657                                 else if (ptl_status != LNET_NI_STATUS_UP)
658                                         ptl_status = LNET_NI_STATUS_DOWN;
659                                 continue;
660                         }
661
662                         if (stat->ns_status == LNET_NI_STATUS_UP) {
663                                 if (LNET_NIDNET(nid) == rtr->lr_net) {
664                                         up = 1;
665                                         break;
666                                 }
667                                 /* ptl NIs are considered down only when
668                                  * they're all down */
669                                 if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
670                                         ptl_status = LNET_NI_STATUS_UP;
671                                 continue;
672                         }
673
674                         CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
675                                libcfs_nid2str(gw->lp_nid), stat->ns_status);
676                         gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
677                         return;
678                 }
679
680                 if (up) { /* ignore downed NIs if NI for dest network is up */
681                         rtr->lr_downis = 0;
682                         continue;
683                 }
684                 rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
685         }
686 }
687
688 static void
689 lnet_router_checker_event(lnet_event_t *event)
690 {
691         lnet_rc_data_t *rcd = event->md.user_ptr;
692         struct lnet_peer *lp;
693
694         LASSERT(rcd != NULL);
695
696         if (event->unlinked) {
697                 LNetInvalidateHandle(&rcd->rcd_mdh);
698                 return;
699         }
700
701         LASSERT(event->type == LNET_EVENT_SEND ||
702                 event->type == LNET_EVENT_REPLY);
703
704         lp = rcd->rcd_gateway;
705         LASSERT(lp != NULL);
706
707          /* NB: it's called with holding lnet_res_lock, we have a few
708           * places need to hold both locks at the same time, please take
709           * care of lock ordering */
710         lnet_net_lock(lp->lp_cpt);
711         if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
712                 /* ignore if no longer a router or rcd is replaced */
713                 goto out;
714         }
715
716         if (event->type == LNET_EVENT_SEND) {
717                 lp->lp_ping_notsent = 0;
718                 if (event->status == 0)
719                         goto out;
720         }
721
722         /* LNET_EVENT_REPLY */
723         /* A successful REPLY means the router is up.  If _any_ comms
724          * to the router fail I assume it's down (this will happen if
725          * we ping alive routers to try to detect router death before
726          * apps get burned). */
727
728         lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
729         /* The router checker will wake up very shortly and do the
730          * actual notification.
731          * XXX If 'lp' stops being a router before then, it will still
732          * have the notification pending!!! */
733
734         if (avoid_asym_router_failure && event->status == 0)
735                 lnet_parse_rc_info(rcd);
736
737  out:
738         lnet_net_unlock(lp->lp_cpt);
739 }
740
741 static void
742 lnet_wait_known_routerstate(void)
743 {
744         lnet_peer_t *rtr;
745         struct list_head *entry;
746         int all_known;
747
748         LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
749
750         for (;;) {
751                 int cpt = lnet_net_lock_current();
752
753                 all_known = 1;
754                 list_for_each(entry, &the_lnet.ln_routers) {
755                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
756
757                         if (rtr->lp_alive_count == 0) {
758                                 all_known = 0;
759                                 break;
760                         }
761                 }
762
763                 lnet_net_unlock(cpt);
764
765                 if (all_known)
766                         return;
767
768                 set_current_state(TASK_UNINTERRUPTIBLE);
769                 schedule_timeout(cfs_time_seconds(1));
770         }
771 }
772
773 void
774 lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
775 {
776         lnet_route_t *rte;
777
778         if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
779                 list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
780                         if (rte->lr_net == net) {
781                                 rte->lr_downis = 0;
782                                 break;
783                         }
784                 }
785         }
786 }
787
788 static void
789 lnet_update_ni_status_locked(void)
790 {
791         lnet_ni_t *ni;
792         long now;
793         int timeout;
794
795         LASSERT(the_lnet.ln_routing);
796
797         timeout = router_ping_timeout +
798                   max(live_router_check_interval, dead_router_check_interval);
799
800         now = get_seconds();
801         list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
802                 if (ni->ni_lnd->lnd_type == LOLND)
803                         continue;
804
805                 if (now < ni->ni_last_alive + timeout)
806                         continue;
807
808                 lnet_ni_lock(ni);
809                 /* re-check with lock */
810                 if (now < ni->ni_last_alive + timeout) {
811                         lnet_ni_unlock(ni);
812                         continue;
813                 }
814
815                 LASSERT(ni->ni_status != NULL);
816
817                 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
818                         CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
819                                libcfs_nid2str(ni->ni_nid), timeout);
820                         /* NB: so far, this is the only place to set
821                          * NI status to "down" */
822                         ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
823                 }
824                 lnet_ni_unlock(ni);
825         }
826 }
827
828 static void
829 lnet_destroy_rc_data(lnet_rc_data_t *rcd)
830 {
831         LASSERT(list_empty(&rcd->rcd_list));
832         /* detached from network */
833         LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
834
835         if (rcd->rcd_gateway != NULL) {
836                 int cpt = rcd->rcd_gateway->lp_cpt;
837
838                 lnet_net_lock(cpt);
839                 lnet_peer_decref_locked(rcd->rcd_gateway);
840                 lnet_net_unlock(cpt);
841         }
842
843         if (rcd->rcd_pinginfo != NULL)
844                 LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
845
846         LIBCFS_FREE(rcd, sizeof(*rcd));
847 }
848
849 static lnet_rc_data_t *
850 lnet_create_rc_data_locked(lnet_peer_t *gateway)
851 {
852         lnet_rc_data_t *rcd = NULL;
853         lnet_ping_info_t *pi;
854         int rc;
855         int i;
856
857         lnet_net_unlock(gateway->lp_cpt);
858
859         LIBCFS_ALLOC(rcd, sizeof(*rcd));
860         if (rcd == NULL)
861                 goto out;
862
863         LNetInvalidateHandle(&rcd->rcd_mdh);
864         INIT_LIST_HEAD(&rcd->rcd_list);
865
866         LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
867         if (pi == NULL)
868                 goto out;
869
870         for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
871                 pi->pi_ni[i].ns_nid = LNET_NID_ANY;
872                 pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
873         }
874         rcd->rcd_pinginfo = pi;
875
876         LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
877         rc = LNetMDBind((lnet_md_t){.start     = pi,
878                                     .user_ptr  = rcd,
879                                     .length    = LNET_PINGINFO_SIZE,
880                                     .threshold = LNET_MD_THRESH_INF,
881                                     .options   = LNET_MD_TRUNCATE,
882                                     .eq_handle = the_lnet.ln_rc_eqh},
883                         LNET_UNLINK,
884                         &rcd->rcd_mdh);
885         if (rc < 0) {
886                 CERROR("Can't bind MD: %d\n", rc);
887                 goto out;
888         }
889         LASSERT(rc == 0);
890
891         lnet_net_lock(gateway->lp_cpt);
892         /* router table changed or someone has created rcd for this gateway */
893         if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
894                 lnet_net_unlock(gateway->lp_cpt);
895                 goto out;
896         }
897
898         lnet_peer_addref_locked(gateway);
899         rcd->rcd_gateway = gateway;
900         gateway->lp_rcd = rcd;
901         gateway->lp_ping_notsent = 0;
902
903         return rcd;
904
905  out:
906         if (rcd != NULL) {
907                 if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
908                         rc = LNetMDUnlink(rcd->rcd_mdh);
909                         LASSERT(rc == 0);
910                 }
911                 lnet_destroy_rc_data(rcd);
912         }
913
914         lnet_net_lock(gateway->lp_cpt);
915         return gateway->lp_rcd;
916 }
917
918 static int
919 lnet_router_check_interval(lnet_peer_t *rtr)
920 {
921         int secs;
922
923         secs = rtr->lp_alive ? live_router_check_interval :
924                                dead_router_check_interval;
925         if (secs < 0)
926                 secs = 0;
927
928         return secs;
929 }
930
931 static void
932 lnet_ping_router_locked(lnet_peer_t *rtr)
933 {
934         lnet_rc_data_t *rcd = NULL;
935         unsigned long now = cfs_time_current();
936         int secs;
937
938         lnet_peer_addref_locked(rtr);
939
940         if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
941             cfs_time_after(now, rtr->lp_ping_deadline))
942                 lnet_notify_locked(rtr, 1, 0, now);
943
944         /* Run any outstanding notifications */
945         lnet_ni_notify_locked(rtr->lp_ni, rtr);
946
947         if (!lnet_isrouter(rtr) ||
948             the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
949                 /* router table changed or router checker is shutting down */
950                 lnet_peer_decref_locked(rtr);
951                 return;
952         }
953
954         rcd = rtr->lp_rcd != NULL ?
955               rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
956
957         if (rcd == NULL)
958                 return;
959
960         secs = lnet_router_check_interval(rtr);
961
962         CDEBUG(D_NET,
963                "rtr %s %d: deadline %lu ping_notsent %d alive %d alive_count %d lp_ping_timestamp %lu\n",
964                libcfs_nid2str(rtr->lp_nid), secs,
965                rtr->lp_ping_deadline, rtr->lp_ping_notsent,
966                rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
967
968         if (secs != 0 && !rtr->lp_ping_notsent &&
969             cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
970                                              cfs_time_seconds(secs)))) {
971                 int rc;
972                 lnet_process_id_t id;
973                 lnet_handle_md_t mdh;
974
975                 id.nid = rtr->lp_nid;
976                 id.pid = LUSTRE_SRV_LNET_PID;
977                 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
978
979                 rtr->lp_ping_notsent   = 1;
980                 rtr->lp_ping_timestamp = now;
981
982                 mdh = rcd->rcd_mdh;
983
984                 if (rtr->lp_ping_deadline == 0) {
985                         rtr->lp_ping_deadline =
986                                 cfs_time_shift(router_ping_timeout);
987                 }
988
989                 lnet_net_unlock(rtr->lp_cpt);
990
991                 rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
992                              LNET_PROTO_PING_MATCHBITS, 0);
993
994                 lnet_net_lock(rtr->lp_cpt);
995                 if (rc != 0)
996                         rtr->lp_ping_notsent = 0; /* no event pending */
997         }
998
999         lnet_peer_decref_locked(rtr);
1000 }
1001
1002 int
1003 lnet_router_checker_start(void)
1004 {
1005         int rc;
1006         int eqsz;
1007
1008         LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1009
1010         if (check_routers_before_use &&
1011             dead_router_check_interval <= 0) {
1012                 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be set if 'check_routers_before_use' is set\n");
1013                 return -EINVAL;
1014         }
1015
1016         if (!the_lnet.ln_routing &&
1017             live_router_check_interval <= 0 &&
1018             dead_router_check_interval <= 0)
1019                 return 0;
1020
1021         sema_init(&the_lnet.ln_rc_signal, 0);
1022         /* EQ size doesn't matter; the callback is guaranteed to get every
1023          * event */
1024         eqsz = 0;
1025         rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
1026                          &the_lnet.ln_rc_eqh);
1027         if (rc != 0) {
1028                 CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
1029                 return -ENOMEM;
1030         }
1031
1032         the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
1033         rc = PTR_ERR(kthread_run(lnet_router_checker,
1034                                  NULL, "router_checker"));
1035         if (IS_ERR_VALUE(rc)) {
1036                 CERROR("Can't start router checker thread: %d\n", rc);
1037                 /* block until event callback signals exit */
1038                 down(&the_lnet.ln_rc_signal);
1039                 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1040                 LASSERT(rc == 0);
1041                 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1042                 return -ENOMEM;
1043         }
1044
1045         if (check_routers_before_use) {
1046                 /* Note that a helpful side-effect of pinging all known routers
1047                  * at startup is that it makes them drop stale connections they
1048                  * may have to a previous instance of me. */
1049                 lnet_wait_known_routerstate();
1050         }
1051
1052         return 0;
1053 }
1054
1055 void
1056 lnet_router_checker_stop(void)
1057 {
1058         int rc;
1059
1060         if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
1061                 return;
1062
1063         LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1064         the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
1065
1066         /* block until event callback signals exit */
1067         down(&the_lnet.ln_rc_signal);
1068         LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1069
1070         rc = LNetEQFree(the_lnet.ln_rc_eqh);
1071         LASSERT(rc == 0);
1072 }
1073
1074 static void
1075 lnet_prune_rc_data(int wait_unlink)
1076 {
1077         lnet_rc_data_t *rcd;
1078         lnet_rc_data_t *tmp;
1079         lnet_peer_t *lp;
1080         struct list_head head;
1081         int i = 2;
1082
1083         if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
1084                    list_empty(&the_lnet.ln_rcd_deathrow) &&
1085                    list_empty(&the_lnet.ln_rcd_zombie)))
1086                 return;
1087
1088         INIT_LIST_HEAD(&head);
1089
1090         lnet_net_lock(LNET_LOCK_EX);
1091
1092         if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1093                 /* router checker is stopping, prune all */
1094                 list_for_each_entry(lp, &the_lnet.ln_routers,
1095                                         lp_rtr_list) {
1096                         if (lp->lp_rcd == NULL)
1097                                 continue;
1098
1099                         LASSERT(list_empty(&lp->lp_rcd->rcd_list));
1100                         list_add(&lp->lp_rcd->rcd_list,
1101                                      &the_lnet.ln_rcd_deathrow);
1102                         lp->lp_rcd = NULL;
1103                 }
1104         }
1105
1106         /* unlink all RCDs on deathrow list */
1107         list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
1108
1109         if (!list_empty(&head)) {
1110                 lnet_net_unlock(LNET_LOCK_EX);
1111
1112                 list_for_each_entry(rcd, &head, rcd_list)
1113                         LNetMDUnlink(rcd->rcd_mdh);
1114
1115                 lnet_net_lock(LNET_LOCK_EX);
1116         }
1117
1118         list_splice_init(&head, &the_lnet.ln_rcd_zombie);
1119
1120         /* release all zombie RCDs */
1121         while (!list_empty(&the_lnet.ln_rcd_zombie)) {
1122                 list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
1123                                              rcd_list) {
1124                         if (LNetHandleIsInvalid(rcd->rcd_mdh))
1125                                 list_move(&rcd->rcd_list, &head);
1126                 }
1127
1128                 wait_unlink = wait_unlink &&
1129                               !list_empty(&the_lnet.ln_rcd_zombie);
1130
1131                 lnet_net_unlock(LNET_LOCK_EX);
1132
1133                 while (!list_empty(&head)) {
1134                         rcd = list_entry(head.next,
1135                                              lnet_rc_data_t, rcd_list);
1136                         list_del_init(&rcd->rcd_list);
1137                         lnet_destroy_rc_data(rcd);
1138                 }
1139
1140                 if (!wait_unlink)
1141                         return;
1142
1143                 i++;
1144                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1145                        "Waiting for rc buffers to unlink\n");
1146                 set_current_state(TASK_UNINTERRUPTIBLE);
1147                 schedule_timeout(cfs_time_seconds(1) / 4);
1148
1149                 lnet_net_lock(LNET_LOCK_EX);
1150         }
1151
1152         lnet_net_unlock(LNET_LOCK_EX);
1153 }
1154
1155 static int
1156 lnet_router_checker(void *arg)
1157 {
1158         lnet_peer_t *rtr;
1159         struct list_head *entry;
1160
1161         cfs_block_allsigs();
1162
1163         LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1164
1165         while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
1166                 __u64 version;
1167                 int cpt;
1168                 int cpt2;
1169
1170                 cpt = lnet_net_lock_current();
1171 rescan:
1172                 version = the_lnet.ln_routers_version;
1173
1174                 list_for_each(entry, &the_lnet.ln_routers) {
1175                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
1176
1177                         cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
1178                         if (cpt != cpt2) {
1179                                 lnet_net_unlock(cpt);
1180                                 cpt = cpt2;
1181                                 lnet_net_lock(cpt);
1182                                 /* the routers list has changed */
1183                                 if (version != the_lnet.ln_routers_version)
1184                                         goto rescan;
1185                         }
1186
1187                         lnet_ping_router_locked(rtr);
1188
1189                         /* NB dropped lock */
1190                         if (version != the_lnet.ln_routers_version) {
1191                                 /* the routers list has changed */
1192                                 goto rescan;
1193                         }
1194                 }
1195
1196                 if (the_lnet.ln_routing)
1197                         lnet_update_ni_status_locked();
1198
1199                 lnet_net_unlock(cpt);
1200
1201                 lnet_prune_rc_data(0); /* don't wait for UNLINK */
1202
1203                 /* Call schedule_timeout() here always adds 1 to load average
1204                  * because kernel counts # active tasks as nr_running
1205                  * + nr_uninterruptible. */
1206                 set_current_state(TASK_INTERRUPTIBLE);
1207                 schedule_timeout(cfs_time_seconds(1));
1208         }
1209
1210         LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
1211
1212         lnet_prune_rc_data(1); /* wait for UNLINK */
1213
1214         the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1215         up(&the_lnet.ln_rc_signal);
1216         /* The unlink event callback will signal final completion */
1217         return 0;
1218 }
1219
1220 static void
1221 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
1222 {
1223         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1224
1225         while (--npages >= 0)
1226                 __free_page(rb->rb_kiov[npages].kiov_page);
1227
1228         LIBCFS_FREE(rb, sz);
1229 }
1230
1231 static lnet_rtrbuf_t *
1232 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1233 {
1234         int npages = rbp->rbp_npages;
1235         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1236         struct page *page;
1237         lnet_rtrbuf_t *rb;
1238         int i;
1239
1240         LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1241         if (rb == NULL)
1242                 return NULL;
1243
1244         rb->rb_pool = rbp;
1245
1246         for (i = 0; i < npages; i++) {
1247                 page = alloc_pages_node(
1248                                 cfs_cpt_spread_node(lnet_cpt_table(), cpt),
1249                                 __GFP_ZERO | GFP_IOFS, 0);
1250                 if (page == NULL) {
1251                         while (--i >= 0)
1252                                 __free_page(rb->rb_kiov[i].kiov_page);
1253
1254                         LIBCFS_FREE(rb, sz);
1255                         return NULL;
1256                 }
1257
1258                 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
1259                 rb->rb_kiov[i].kiov_offset = 0;
1260                 rb->rb_kiov[i].kiov_page = page;
1261         }
1262
1263         return rb;
1264 }
1265
1266 static void
1267 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
1268 {
1269         int npages = rbp->rbp_npages;
1270         int nbuffers = 0;
1271         lnet_rtrbuf_t *rb;
1272
1273         if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1274                 return;
1275
1276         LASSERT(list_empty(&rbp->rbp_msgs));
1277         LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers);
1278
1279         while (!list_empty(&rbp->rbp_bufs)) {
1280                 LASSERT(rbp->rbp_credits > 0);
1281
1282                 rb = list_entry(rbp->rbp_bufs.next,
1283                                     lnet_rtrbuf_t, rb_list);
1284                 list_del(&rb->rb_list);
1285                 lnet_destroy_rtrbuf(rb, npages);
1286                 nbuffers++;
1287         }
1288
1289         LASSERT(rbp->rbp_nbuffers == nbuffers);
1290         LASSERT(rbp->rbp_credits == nbuffers);
1291
1292         rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1293 }
1294
1295 static int
1296 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
1297 {
1298         lnet_rtrbuf_t *rb;
1299         int i;
1300
1301         if (rbp->rbp_nbuffers != 0) {
1302                 LASSERT(rbp->rbp_nbuffers == nbufs);
1303                 return 0;
1304         }
1305
1306         for (i = 0; i < nbufs; i++) {
1307                 rb = lnet_new_rtrbuf(rbp, cpt);
1308
1309                 if (rb == NULL) {
1310                         CERROR("Failed to allocate %d router bufs of %d pages\n",
1311                                nbufs, rbp->rbp_npages);
1312                         return -ENOMEM;
1313                 }
1314
1315                 rbp->rbp_nbuffers++;
1316                 rbp->rbp_credits++;
1317                 rbp->rbp_mincredits++;
1318                 list_add(&rb->rb_list, &rbp->rbp_bufs);
1319
1320                 /* No allocation "under fire" */
1321                 /* Otherwise we'd need code to schedule blocked msgs etc */
1322                 LASSERT(!the_lnet.ln_routing);
1323         }
1324
1325         LASSERT(rbp->rbp_credits == nbufs);
1326         return 0;
1327 }
1328
1329 static void
1330 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
1331 {
1332         INIT_LIST_HEAD(&rbp->rbp_msgs);
1333         INIT_LIST_HEAD(&rbp->rbp_bufs);
1334
1335         rbp->rbp_npages = npages;
1336         rbp->rbp_credits = 0;
1337         rbp->rbp_mincredits = 0;
1338 }
1339
1340 void
1341 lnet_rtrpools_free(void)
1342 {
1343         lnet_rtrbufpool_t *rtrp;
1344         int i;
1345
1346         if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1347                 return;
1348
1349         cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1350                 lnet_rtrpool_free_bufs(&rtrp[0]);
1351                 lnet_rtrpool_free_bufs(&rtrp[1]);
1352                 lnet_rtrpool_free_bufs(&rtrp[2]);
1353         }
1354
1355         cfs_percpt_free(the_lnet.ln_rtrpools);
1356         the_lnet.ln_rtrpools = NULL;
1357 }
1358
1359 static int
1360 lnet_nrb_tiny_calculate(int npages)
1361 {
1362         int nrbs = LNET_NRB_TINY;
1363
1364         if (tiny_router_buffers < 0) {
1365                 LCONSOLE_ERROR_MSG(0x10c,
1366                                    "tiny_router_buffers=%d invalid when routing enabled\n",
1367                                    tiny_router_buffers);
1368                 return -1;
1369         }
1370
1371         if (tiny_router_buffers > 0)
1372                 nrbs = tiny_router_buffers;
1373
1374         nrbs /= LNET_CPT_NUMBER;
1375         return max(nrbs, LNET_NRB_TINY_MIN);
1376 }
1377
1378 static int
1379 lnet_nrb_small_calculate(int npages)
1380 {
1381         int nrbs = LNET_NRB_SMALL;
1382
1383         if (small_router_buffers < 0) {
1384                 LCONSOLE_ERROR_MSG(0x10c,
1385                                    "small_router_buffers=%d invalid when routing enabled\n",
1386                                    small_router_buffers);
1387                 return -1;
1388         }
1389
1390         if (small_router_buffers > 0)
1391                 nrbs = small_router_buffers;
1392
1393         nrbs /= LNET_CPT_NUMBER;
1394         return max(nrbs, LNET_NRB_SMALL_MIN);
1395 }
1396
1397 static int
1398 lnet_nrb_large_calculate(int npages)
1399 {
1400         int nrbs = LNET_NRB_LARGE;
1401
1402         if (large_router_buffers < 0) {
1403                 LCONSOLE_ERROR_MSG(0x10c,
1404                                    "large_router_buffers=%d invalid when routing enabled\n",
1405                                    large_router_buffers);
1406                 return -1;
1407         }
1408
1409         if (large_router_buffers > 0)
1410                 nrbs = large_router_buffers;
1411
1412         nrbs /= LNET_CPT_NUMBER;
1413         return max(nrbs, LNET_NRB_LARGE_MIN);
1414 }
1415
1416 int
1417 lnet_rtrpools_alloc(int im_a_router)
1418 {
1419         lnet_rtrbufpool_t *rtrp;
1420         int large_pages;
1421         int small_pages = 1;
1422         int nrb_tiny;
1423         int nrb_small;
1424         int nrb_large;
1425         int rc;
1426         int i;
1427
1428         large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1429
1430         if (!strcmp(forwarding, "")) {
1431                 /* not set either way */
1432                 if (!im_a_router)
1433                         return 0;
1434         } else if (!strcmp(forwarding, "disabled")) {
1435                 /* explicitly disabled */
1436                 return 0;
1437         } else if (!strcmp(forwarding, "enabled")) {
1438                 /* explicitly enabled */
1439         } else {
1440                 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either 'enabled' or 'disabled'\n");
1441                 return -EINVAL;
1442         }
1443
1444         nrb_tiny = lnet_nrb_tiny_calculate(0);
1445         if (nrb_tiny < 0)
1446                 return -EINVAL;
1447
1448         nrb_small = lnet_nrb_small_calculate(small_pages);
1449         if (nrb_small < 0)
1450                 return -EINVAL;
1451
1452         nrb_large = lnet_nrb_large_calculate(large_pages);
1453         if (nrb_large < 0)
1454                 return -EINVAL;
1455
1456         the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1457                                                 LNET_NRBPOOLS *
1458                                                 sizeof(lnet_rtrbufpool_t));
1459         if (the_lnet.ln_rtrpools == NULL) {
1460                 LCONSOLE_ERROR_MSG(0x10c,
1461                                    "Failed to initialize router buffe pool\n");
1462                 return -ENOMEM;
1463         }
1464
1465         cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1466                 lnet_rtrpool_init(&rtrp[0], 0);
1467                 rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
1468                 if (rc != 0)
1469                         goto failed;
1470
1471                 lnet_rtrpool_init(&rtrp[1], small_pages);
1472                 rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
1473                 if (rc != 0)
1474                         goto failed;
1475
1476                 lnet_rtrpool_init(&rtrp[2], large_pages);
1477                 rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
1478                 if (rc != 0)
1479                         goto failed;
1480         }
1481
1482         lnet_net_lock(LNET_LOCK_EX);
1483         the_lnet.ln_routing = 1;
1484         lnet_net_unlock(LNET_LOCK_EX);
1485
1486         return 0;
1487
1488  failed:
1489         lnet_rtrpools_free();
1490         return rc;
1491 }
1492
1493 int
1494 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
1495 {
1496         struct lnet_peer *lp = NULL;
1497         unsigned long now = cfs_time_current();
1498         int cpt = lnet_cpt_of_nid(nid);
1499
1500         LASSERT(!in_interrupt ());
1501
1502         CDEBUG(D_NET, "%s notifying %s: %s\n",
1503                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1504                 libcfs_nid2str(nid),
1505                 alive ? "up" : "down");
1506
1507         if (ni != NULL &&
1508             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1509                 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1510                         libcfs_nid2str(nid), alive ? "birth" : "death",
1511                         libcfs_nid2str(ni->ni_nid));
1512                 return -EINVAL;
1513         }
1514
1515         /* can't do predictions... */
1516         if (cfs_time_after(when, now)) {
1517                 CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
1518                       (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1519                       libcfs_nid2str(nid), alive ? "up" : "down",
1520                       cfs_duration_sec(cfs_time_sub(when, now)));
1521                 return -EINVAL;
1522         }
1523
1524         if (ni != NULL && !alive &&          /* LND telling me she's down */
1525             !auto_down) {                      /* auto-down disabled */
1526                 CDEBUG(D_NET, "Auto-down disabled\n");
1527                 return 0;
1528         }
1529
1530         lnet_net_lock(cpt);
1531
1532         if (the_lnet.ln_shutdown) {
1533                 lnet_net_unlock(cpt);
1534                 return -ESHUTDOWN;
1535         }
1536
1537         lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
1538         if (lp == NULL) {
1539                 /* nid not found */
1540                 lnet_net_unlock(cpt);
1541                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1542                 return 0;
1543         }
1544
1545         /* We can't fully trust LND on reporting exact peer last_alive
1546          * if he notifies us about dead peer. For example ksocklnd can
1547          * call us with when == _time_when_the_node_was_booted_ if
1548          * no connections were successfully established */
1549         if (ni != NULL && !alive && when < lp->lp_last_alive)
1550                 when = lp->lp_last_alive;
1551
1552         lnet_notify_locked(lp, ni == NULL, alive, when);
1553
1554         lnet_ni_notify_locked(ni, lp);
1555
1556         lnet_peer_decref_locked(lp);
1557
1558         lnet_net_unlock(cpt);
1559         return 0;
1560 }
1561 EXPORT_SYMBOL(lnet_notify);