]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/rds/connection.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / net / rds / connection.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <net/inet_hashtables.h>
38
39 #include "rds.h"
40 #include "loop.h"
41
42 #define RDS_CONNECTION_HASH_BITS 12
43 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
44 #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
45
46 /* converting this to RCU is a chore for another day.. */
47 static DEFINE_SPINLOCK(rds_conn_lock);
48 static unsigned long rds_conn_count;
49 static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
50 static struct kmem_cache *rds_conn_slab;
51
52 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
53 {
54         static u32 rds_hash_secret __read_mostly;
55
56         unsigned long hash;
57
58         net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
59
60         /* Pass NULL, don't need struct net for hash */
61         hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
62                               be32_to_cpu(faddr), 0,
63                               rds_hash_secret);
64         return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
65 }
66
67 #define rds_conn_info_set(var, test, suffix) do {               \
68         if (test)                                               \
69                 var |= RDS_INFO_CONNECTION_FLAG_##suffix;       \
70 } while (0)
71
72 /* rcu read lock must be held or the connection spinlock */
73 static struct rds_connection *rds_conn_lookup(struct net *net,
74                                               struct hlist_head *head,
75                                               __be32 laddr, __be32 faddr,
76                                               struct rds_transport *trans)
77 {
78         struct rds_connection *conn, *ret = NULL;
79
80         hlist_for_each_entry_rcu(conn, head, c_hash_node) {
81                 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
82                     conn->c_trans == trans && net == rds_conn_net(conn)) {
83                         ret = conn;
84                         break;
85                 }
86         }
87         rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
88                  &laddr, &faddr);
89         return ret;
90 }
91
92 /*
93  * This is called by transports as they're bringing down a connection.
94  * It clears partial message state so that the transport can start sending
95  * and receiving over this connection again in the future.  It is up to
96  * the transport to have serialized this call with its send and recv.
97  */
98 static void rds_conn_path_reset(struct rds_conn_path *cp)
99 {
100         struct rds_connection *conn = cp->cp_conn;
101
102         rdsdebug("connection %pI4 to %pI4 reset\n",
103           &conn->c_laddr, &conn->c_faddr);
104
105         rds_stats_inc(s_conn_reset);
106         rds_send_path_reset(cp);
107         cp->cp_flags = 0;
108
109         /* Do not clear next_rx_seq here, else we cannot distinguish
110          * retransmitted packets from new packets, and will hand all
111          * of them to the application. That is not consistent with the
112          * reliability guarantees of RDS. */
113 }
114
115 static void __rds_conn_path_init(struct rds_connection *conn,
116                                  struct rds_conn_path *cp, bool is_outgoing)
117 {
118         spin_lock_init(&cp->cp_lock);
119         cp->cp_next_tx_seq = 1;
120         init_waitqueue_head(&cp->cp_waitq);
121         INIT_LIST_HEAD(&cp->cp_send_queue);
122         INIT_LIST_HEAD(&cp->cp_retrans);
123
124         cp->cp_conn = conn;
125         atomic_set(&cp->cp_state, RDS_CONN_DOWN);
126         cp->cp_send_gen = 0;
127         cp->cp_reconnect_jiffies = 0;
128         INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
129         INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
130         INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
131         INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
132         mutex_init(&cp->cp_cm_lock);
133         cp->cp_flags = 0;
134 }
135
136 /*
137  * There is only every one 'conn' for a given pair of addresses in the
138  * system at a time.  They contain messages to be retransmitted and so
139  * span the lifetime of the actual underlying transport connections.
140  *
141  * For now they are not garbage collected once they're created.  They
142  * are torn down as the module is removed, if ever.
143  */
144 static struct rds_connection *__rds_conn_create(struct net *net,
145                                                 __be32 laddr, __be32 faddr,
146                                        struct rds_transport *trans, gfp_t gfp,
147                                        int is_outgoing)
148 {
149         struct rds_connection *conn, *parent = NULL;
150         struct hlist_head *head = rds_conn_bucket(laddr, faddr);
151         struct rds_transport *loop_trans;
152         unsigned long flags;
153         int ret, i;
154
155         rcu_read_lock();
156         conn = rds_conn_lookup(net, head, laddr, faddr, trans);
157         if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
158             laddr == faddr && !is_outgoing) {
159                 /* This is a looped back IB connection, and we're
160                  * called by the code handling the incoming connect.
161                  * We need a second connection object into which we
162                  * can stick the other QP. */
163                 parent = conn;
164                 conn = parent->c_passive;
165         }
166         rcu_read_unlock();
167         if (conn)
168                 goto out;
169
170         conn = kmem_cache_zalloc(rds_conn_slab, gfp);
171         if (!conn) {
172                 conn = ERR_PTR(-ENOMEM);
173                 goto out;
174         }
175
176         INIT_HLIST_NODE(&conn->c_hash_node);
177         conn->c_laddr = laddr;
178         conn->c_faddr = faddr;
179
180         rds_conn_net_set(conn, net);
181
182         ret = rds_cong_get_maps(conn);
183         if (ret) {
184                 kmem_cache_free(rds_conn_slab, conn);
185                 conn = ERR_PTR(ret);
186                 goto out;
187         }
188
189         /*
190          * This is where a connection becomes loopback.  If *any* RDS sockets
191          * can bind to the destination address then we'd rather the messages
192          * flow through loopback rather than either transport.
193          */
194         loop_trans = rds_trans_get_preferred(net, faddr);
195         if (loop_trans) {
196                 rds_trans_put(loop_trans);
197                 conn->c_loopback = 1;
198                 if (is_outgoing && trans->t_prefer_loopback) {
199                         /* "outgoing" connection - and the transport
200                          * says it wants the connection handled by the
201                          * loopback transport. This is what TCP does.
202                          */
203                         trans = &rds_loop_transport;
204                 }
205         }
206
207         conn->c_trans = trans;
208
209         init_waitqueue_head(&conn->c_hs_waitq);
210         for (i = 0; i < RDS_MPATH_WORKERS; i++) {
211                 __rds_conn_path_init(conn, &conn->c_path[i],
212                                      is_outgoing);
213                 conn->c_path[i].cp_index = i;
214         }
215         ret = trans->conn_alloc(conn, gfp);
216         if (ret) {
217                 kmem_cache_free(rds_conn_slab, conn);
218                 conn = ERR_PTR(ret);
219                 goto out;
220         }
221
222         rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
223           conn, &laddr, &faddr,
224           trans->t_name ? trans->t_name : "[unknown]",
225           is_outgoing ? "(outgoing)" : "");
226
227         /*
228          * Since we ran without holding the conn lock, someone could
229          * have created the same conn (either normal or passive) in the
230          * interim. We check while holding the lock. If we won, we complete
231          * init and return our conn. If we lost, we rollback and return the
232          * other one.
233          */
234         spin_lock_irqsave(&rds_conn_lock, flags);
235         if (parent) {
236                 /* Creating passive conn */
237                 if (parent->c_passive) {
238                         trans->conn_free(conn->c_path[0].cp_transport_data);
239                         kmem_cache_free(rds_conn_slab, conn);
240                         conn = parent->c_passive;
241                 } else {
242                         parent->c_passive = conn;
243                         rds_cong_add_conn(conn);
244                         rds_conn_count++;
245                 }
246         } else {
247                 /* Creating normal conn */
248                 struct rds_connection *found;
249
250                 found = rds_conn_lookup(net, head, laddr, faddr, trans);
251                 if (found) {
252                         struct rds_conn_path *cp;
253                         int i;
254
255                         for (i = 0; i < RDS_MPATH_WORKERS; i++) {
256                                 cp = &conn->c_path[i];
257                                 /* The ->conn_alloc invocation may have
258                                  * allocated resource for all paths, so all
259                                  * of them may have to be freed here.
260                                  */
261                                 if (cp->cp_transport_data)
262                                         trans->conn_free(cp->cp_transport_data);
263                         }
264                         kmem_cache_free(rds_conn_slab, conn);
265                         conn = found;
266                 } else {
267                         conn->c_my_gen_num = rds_gen_num;
268                         conn->c_peer_gen_num = 0;
269                         hlist_add_head_rcu(&conn->c_hash_node, head);
270                         rds_cong_add_conn(conn);
271                         rds_conn_count++;
272                 }
273         }
274         spin_unlock_irqrestore(&rds_conn_lock, flags);
275
276 out:
277         return conn;
278 }
279
280 struct rds_connection *rds_conn_create(struct net *net,
281                                        __be32 laddr, __be32 faddr,
282                                        struct rds_transport *trans, gfp_t gfp)
283 {
284         return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
285 }
286 EXPORT_SYMBOL_GPL(rds_conn_create);
287
288 struct rds_connection *rds_conn_create_outgoing(struct net *net,
289                                                 __be32 laddr, __be32 faddr,
290                                        struct rds_transport *trans, gfp_t gfp)
291 {
292         return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
293 }
294 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
295
296 void rds_conn_shutdown(struct rds_conn_path *cp)
297 {
298         struct rds_connection *conn = cp->cp_conn;
299
300         /* shut it down unless it's down already */
301         if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
302                 /*
303                  * Quiesce the connection mgmt handlers before we start tearing
304                  * things down. We don't hold the mutex for the entire
305                  * duration of the shutdown operation, else we may be
306                  * deadlocking with the CM handler. Instead, the CM event
307                  * handler is supposed to check for state DISCONNECTING
308                  */
309                 mutex_lock(&cp->cp_cm_lock);
310                 if (!rds_conn_path_transition(cp, RDS_CONN_UP,
311                                               RDS_CONN_DISCONNECTING) &&
312                     !rds_conn_path_transition(cp, RDS_CONN_ERROR,
313                                               RDS_CONN_DISCONNECTING)) {
314                         rds_conn_path_error(cp,
315                                             "shutdown called in state %d\n",
316                                             atomic_read(&cp->cp_state));
317                         mutex_unlock(&cp->cp_cm_lock);
318                         return;
319                 }
320                 mutex_unlock(&cp->cp_cm_lock);
321
322                 wait_event(cp->cp_waitq,
323                            !test_bit(RDS_IN_XMIT, &cp->cp_flags));
324                 wait_event(cp->cp_waitq,
325                            !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
326
327                 conn->c_trans->conn_path_shutdown(cp);
328                 rds_conn_path_reset(cp);
329
330                 if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
331                                               RDS_CONN_DOWN) &&
332                     !rds_conn_path_transition(cp, RDS_CONN_ERROR,
333                                               RDS_CONN_DOWN)) {
334                         /* This can happen - eg when we're in the middle of tearing
335                          * down the connection, and someone unloads the rds module.
336                          * Quite reproducible with loopback connections.
337                          * Mostly harmless.
338                          *
339                          * Note that this also happens with rds-tcp because
340                          * we could have triggered rds_conn_path_drop in irq
341                          * mode from rds_tcp_state change on the receipt of
342                          * a FIN, thus we need to recheck for RDS_CONN_ERROR
343                          * here.
344                          */
345                         rds_conn_path_error(cp, "%s: failed to transition "
346                                             "to state DOWN, current state "
347                                             "is %d\n", __func__,
348                                             atomic_read(&cp->cp_state));
349                         return;
350                 }
351         }
352
353         /* Then reconnect if it's still live.
354          * The passive side of an IB loopback connection is never added
355          * to the conn hash, so we never trigger a reconnect on this
356          * conn - the reconnect is always triggered by the active peer. */
357         cancel_delayed_work_sync(&cp->cp_conn_w);
358         rcu_read_lock();
359         if (!hlist_unhashed(&conn->c_hash_node)) {
360                 rcu_read_unlock();
361                 rds_queue_reconnect(cp);
362         } else {
363                 rcu_read_unlock();
364         }
365 }
366
367 /* destroy a single rds_conn_path. rds_conn_destroy() iterates over
368  * all paths using rds_conn_path_destroy()
369  */
370 static void rds_conn_path_destroy(struct rds_conn_path *cp)
371 {
372         struct rds_message *rm, *rtmp;
373
374         if (!cp->cp_transport_data)
375                 return;
376
377         rds_conn_path_drop(cp);
378         flush_work(&cp->cp_down_w);
379
380         /* make sure lingering queued work won't try to ref the conn */
381         cancel_delayed_work_sync(&cp->cp_send_w);
382         cancel_delayed_work_sync(&cp->cp_recv_w);
383
384         /* tear down queued messages */
385         list_for_each_entry_safe(rm, rtmp,
386                                  &cp->cp_send_queue,
387                                  m_conn_item) {
388                 list_del_init(&rm->m_conn_item);
389                 BUG_ON(!list_empty(&rm->m_sock_item));
390                 rds_message_put(rm);
391         }
392         if (cp->cp_xmit_rm)
393                 rds_message_put(cp->cp_xmit_rm);
394
395         cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
396 }
397
398 /*
399  * Stop and free a connection.
400  *
401  * This can only be used in very limited circumstances.  It assumes that once
402  * the conn has been shutdown that no one else is referencing the connection.
403  * We can only ensure this in the rmmod path in the current code.
404  */
405 void rds_conn_destroy(struct rds_connection *conn)
406 {
407         unsigned long flags;
408         int i;
409         struct rds_conn_path *cp;
410
411         rdsdebug("freeing conn %p for %pI4 -> "
412                  "%pI4\n", conn, &conn->c_laddr,
413                  &conn->c_faddr);
414
415         conn->c_destroy_in_prog = 1;
416         /* Ensure conn will not be scheduled for reconnect */
417         spin_lock_irq(&rds_conn_lock);
418         hlist_del_init_rcu(&conn->c_hash_node);
419         spin_unlock_irq(&rds_conn_lock);
420         synchronize_rcu();
421
422         /* shut the connection down */
423         for (i = 0; i < RDS_MPATH_WORKERS; i++) {
424                 cp = &conn->c_path[i];
425                 rds_conn_path_destroy(cp);
426                 BUG_ON(!list_empty(&cp->cp_retrans));
427         }
428
429         /*
430          * The congestion maps aren't freed up here.  They're
431          * freed by rds_cong_exit() after all the connections
432          * have been freed.
433          */
434         rds_cong_remove_conn(conn);
435
436         put_net(conn->c_net);
437         kmem_cache_free(rds_conn_slab, conn);
438
439         spin_lock_irqsave(&rds_conn_lock, flags);
440         rds_conn_count--;
441         spin_unlock_irqrestore(&rds_conn_lock, flags);
442 }
443 EXPORT_SYMBOL_GPL(rds_conn_destroy);
444
445 static void rds_conn_message_info(struct socket *sock, unsigned int len,
446                                   struct rds_info_iterator *iter,
447                                   struct rds_info_lengths *lens,
448                                   int want_send)
449 {
450         struct hlist_head *head;
451         struct list_head *list;
452         struct rds_connection *conn;
453         struct rds_message *rm;
454         unsigned int total = 0;
455         unsigned long flags;
456         size_t i;
457         int j;
458
459         len /= sizeof(struct rds_info_message);
460
461         rcu_read_lock();
462
463         for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
464              i++, head++) {
465                 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
466                         struct rds_conn_path *cp;
467
468                         for (j = 0; j < RDS_MPATH_WORKERS; j++) {
469                                 cp = &conn->c_path[j];
470                                 if (want_send)
471                                         list = &cp->cp_send_queue;
472                                 else
473                                         list = &cp->cp_retrans;
474
475                                 spin_lock_irqsave(&cp->cp_lock, flags);
476
477                                 /* XXX too lazy to maintain counts.. */
478                                 list_for_each_entry(rm, list, m_conn_item) {
479                                         total++;
480                                         if (total <= len)
481                                                 rds_inc_info_copy(&rm->m_inc,
482                                                                   iter,
483                                                                   conn->c_laddr,
484                                                                   conn->c_faddr,
485                                                                   0);
486                                 }
487
488                                 spin_unlock_irqrestore(&cp->cp_lock, flags);
489                                 if (!conn->c_trans->t_mp_capable)
490                                         break;
491                         }
492                 }
493         }
494         rcu_read_unlock();
495
496         lens->nr = total;
497         lens->each = sizeof(struct rds_info_message);
498 }
499
500 static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
501                                        struct rds_info_iterator *iter,
502                                        struct rds_info_lengths *lens)
503 {
504         rds_conn_message_info(sock, len, iter, lens, 1);
505 }
506
507 static void rds_conn_message_info_retrans(struct socket *sock,
508                                           unsigned int len,
509                                           struct rds_info_iterator *iter,
510                                           struct rds_info_lengths *lens)
511 {
512         rds_conn_message_info(sock, len, iter, lens, 0);
513 }
514
515 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
516                           struct rds_info_iterator *iter,
517                           struct rds_info_lengths *lens,
518                           int (*visitor)(struct rds_connection *, void *),
519                           size_t item_len)
520 {
521         uint64_t buffer[(item_len + 7) / 8];
522         struct hlist_head *head;
523         struct rds_connection *conn;
524         size_t i;
525
526         rcu_read_lock();
527
528         lens->nr = 0;
529         lens->each = item_len;
530
531         for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
532              i++, head++) {
533                 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
534
535                         /* XXX no c_lock usage.. */
536                         if (!visitor(conn, buffer))
537                                 continue;
538
539                         /* We copy as much as we can fit in the buffer,
540                          * but we count all items so that the caller
541                          * can resize the buffer. */
542                         if (len >= item_len) {
543                                 rds_info_copy(iter, buffer, item_len);
544                                 len -= item_len;
545                         }
546                         lens->nr++;
547                 }
548         }
549         rcu_read_unlock();
550 }
551 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
552
553 static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
554                                     struct rds_info_iterator *iter,
555                                     struct rds_info_lengths *lens,
556                                     int (*visitor)(struct rds_conn_path *, void *),
557                                     size_t item_len)
558 {
559         u64  buffer[(item_len + 7) / 8];
560         struct hlist_head *head;
561         struct rds_connection *conn;
562         size_t i;
563         int j;
564
565         rcu_read_lock();
566
567         lens->nr = 0;
568         lens->each = item_len;
569
570         for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
571              i++, head++) {
572                 hlist_for_each_entry_rcu(conn, head, c_hash_node) {
573                         struct rds_conn_path *cp;
574
575                         for (j = 0; j < RDS_MPATH_WORKERS; j++) {
576                                 cp = &conn->c_path[j];
577
578                                 /* XXX no cp_lock usage.. */
579                                 if (!visitor(cp, buffer))
580                                         continue;
581                                 if (!conn->c_trans->t_mp_capable)
582                                         break;
583                         }
584
585                         /* We copy as much as we can fit in the buffer,
586                          * but we count all items so that the caller
587                          * can resize the buffer.
588                          */
589                         if (len >= item_len) {
590                                 rds_info_copy(iter, buffer, item_len);
591                                 len -= item_len;
592                         }
593                         lens->nr++;
594                 }
595         }
596         rcu_read_unlock();
597 }
598
599 static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
600 {
601         struct rds_info_connection *cinfo = buffer;
602
603         cinfo->next_tx_seq = cp->cp_next_tx_seq;
604         cinfo->next_rx_seq = cp->cp_next_rx_seq;
605         cinfo->laddr = cp->cp_conn->c_laddr;
606         cinfo->faddr = cp->cp_conn->c_faddr;
607         strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name,
608                 sizeof(cinfo->transport));
609         cinfo->flags = 0;
610
611         rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
612                           SENDING);
613         /* XXX Future: return the state rather than these funky bits */
614         rds_conn_info_set(cinfo->flags,
615                           atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
616                           CONNECTING);
617         rds_conn_info_set(cinfo->flags,
618                           atomic_read(&cp->cp_state) == RDS_CONN_UP,
619                           CONNECTED);
620         return 1;
621 }
622
623 static void rds_conn_info(struct socket *sock, unsigned int len,
624                           struct rds_info_iterator *iter,
625                           struct rds_info_lengths *lens)
626 {
627         rds_walk_conn_path_info(sock, len, iter, lens,
628                                 rds_conn_info_visitor,
629                                 sizeof(struct rds_info_connection));
630 }
631
632 int rds_conn_init(void)
633 {
634         rds_conn_slab = kmem_cache_create("rds_connection",
635                                           sizeof(struct rds_connection),
636                                           0, 0, NULL);
637         if (!rds_conn_slab)
638                 return -ENOMEM;
639
640         rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
641         rds_info_register_func(RDS_INFO_SEND_MESSAGES,
642                                rds_conn_message_info_send);
643         rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
644                                rds_conn_message_info_retrans);
645
646         return 0;
647 }
648
649 void rds_conn_exit(void)
650 {
651         rds_loop_exit();
652
653         WARN_ON(!hlist_empty(rds_conn_hash));
654
655         kmem_cache_destroy(rds_conn_slab);
656
657         rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
658         rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
659                                  rds_conn_message_info_send);
660         rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
661                                  rds_conn_message_info_retrans);
662 }
663
664 /*
665  * Force a disconnect
666  */
667 void rds_conn_path_drop(struct rds_conn_path *cp)
668 {
669         atomic_set(&cp->cp_state, RDS_CONN_ERROR);
670         queue_work(rds_wq, &cp->cp_down_w);
671 }
672 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
673
674 void rds_conn_drop(struct rds_connection *conn)
675 {
676         WARN_ON(conn->c_trans->t_mp_capable);
677         rds_conn_path_drop(&conn->c_path[0]);
678 }
679 EXPORT_SYMBOL_GPL(rds_conn_drop);
680
681 /*
682  * If the connection is down, trigger a connect. We may have scheduled a
683  * delayed reconnect however - in this case we should not interfere.
684  */
685 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
686 {
687         if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
688             !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
689                 queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
690 }
691 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
692
693 void rds_conn_connect_if_down(struct rds_connection *conn)
694 {
695         WARN_ON(conn->c_trans->t_mp_capable);
696         rds_conn_path_connect_if_down(&conn->c_path[0]);
697 }
698 EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
699
700 void
701 __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
702 {
703         va_list ap;
704
705         va_start(ap, fmt);
706         vprintk(fmt, ap);
707         va_end(ap);
708
709         rds_conn_path_drop(cp);
710 }