]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/core/cm.c
IB/cm: Use correct reject code for invalid GID
[karo-tx-linux.git] / drivers / infiniband / core / cm.c
1 /*
2  * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $Id: cm.c 4311 2005-12-05 18:42:01Z sean.hefty $
36  */
37
38 #include <linux/completion.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/pci.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/workqueue.h>
47
48 #include <rdma/ib_cache.h>
49 #include <rdma/ib_cm.h>
50 #include "cm_msgs.h"
51
52 MODULE_AUTHOR("Sean Hefty");
53 MODULE_DESCRIPTION("InfiniBand CM");
54 MODULE_LICENSE("Dual BSD/GPL");
55
56 static void cm_add_one(struct ib_device *device);
57 static void cm_remove_one(struct ib_device *device);
58
59 static struct ib_client cm_client = {
60         .name   = "cm",
61         .add    = cm_add_one,
62         .remove = cm_remove_one
63 };
64
65 static struct ib_cm {
66         spinlock_t lock;
67         struct list_head device_list;
68         rwlock_t device_lock;
69         struct rb_root listen_service_table;
70         u64 listen_service_id;
71         /* struct rb_root peer_service_table; todo: fix peer to peer */
72         struct rb_root remote_qp_table;
73         struct rb_root remote_id_table;
74         struct rb_root remote_sidr_table;
75         struct idr local_id_table;
76         struct workqueue_struct *wq;
77 } cm;
78
79 struct cm_port {
80         struct cm_device *cm_dev;
81         struct ib_mad_agent *mad_agent;
82         u8 port_num;
83 };
84
85 struct cm_device {
86         struct list_head list;
87         struct ib_device *device;
88         __be64 ca_guid;
89         struct cm_port port[0];
90 };
91
92 struct cm_av {
93         struct cm_port *port;
94         union ib_gid dgid;
95         struct ib_ah_attr ah_attr;
96         u16 pkey_index;
97         u8 packet_life_time;
98 };
99
100 struct cm_work {
101         struct work_struct work;
102         struct list_head list;
103         struct cm_port *port;
104         struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
105         __be32 local_id;                        /* Established / timewait */
106         __be32 remote_id;
107         struct ib_cm_event cm_event;
108         struct ib_sa_path_rec path[0];
109 };
110
111 struct cm_timewait_info {
112         struct cm_work work;                    /* Must be first. */
113         struct rb_node remote_qp_node;
114         struct rb_node remote_id_node;
115         __be64 remote_ca_guid;
116         __be32 remote_qpn;
117         u8 inserted_remote_qp;
118         u8 inserted_remote_id;
119 };
120
121 struct cm_id_private {
122         struct ib_cm_id id;
123
124         struct rb_node service_node;
125         struct rb_node sidr_id_node;
126         spinlock_t lock;        /* Do not acquire inside cm.lock */
127         struct completion comp;
128         atomic_t refcount;
129
130         struct ib_mad_send_buf *msg;
131         struct cm_timewait_info *timewait_info;
132         /* todo: use alternate port on send failure */
133         struct cm_av av;
134         struct cm_av alt_av;
135         struct ib_cm_compare_data *compare_data;
136
137         void *private_data;
138         __be64 tid;
139         __be32 local_qpn;
140         __be32 remote_qpn;
141         enum ib_qp_type qp_type;
142         __be32 sq_psn;
143         __be32 rq_psn;
144         int timeout_ms;
145         enum ib_mtu path_mtu;
146         u8 private_data_len;
147         u8 max_cm_retries;
148         u8 peer_to_peer;
149         u8 responder_resources;
150         u8 initiator_depth;
151         u8 local_ack_timeout;
152         u8 retry_count;
153         u8 rnr_retry_count;
154         u8 service_timeout;
155
156         struct list_head work_list;
157         atomic_t work_count;
158 };
159
160 static void cm_work_handler(void *data);
161
162 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
163 {
164         if (atomic_dec_and_test(&cm_id_priv->refcount))
165                 complete(&cm_id_priv->comp);
166 }
167
168 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
169                         struct ib_mad_send_buf **msg)
170 {
171         struct ib_mad_agent *mad_agent;
172         struct ib_mad_send_buf *m;
173         struct ib_ah *ah;
174
175         mad_agent = cm_id_priv->av.port->mad_agent;
176         ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
177         if (IS_ERR(ah))
178                 return PTR_ERR(ah);
179
180         m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 
181                                cm_id_priv->av.pkey_index,
182                                0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
183                                GFP_ATOMIC);
184         if (IS_ERR(m)) {
185                 ib_destroy_ah(ah);
186                 return PTR_ERR(m);
187         }
188
189         /* Timeout set by caller if response is expected. */
190         m->ah = ah;
191         m->retries = cm_id_priv->max_cm_retries;
192
193         atomic_inc(&cm_id_priv->refcount);
194         m->context[0] = cm_id_priv;
195         *msg = m;
196         return 0;
197 }
198
199 static int cm_alloc_response_msg(struct cm_port *port,
200                                  struct ib_mad_recv_wc *mad_recv_wc,
201                                  struct ib_mad_send_buf **msg)
202 {
203         struct ib_mad_send_buf *m;
204         struct ib_ah *ah;
205
206         ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
207                                   mad_recv_wc->recv_buf.grh, port->port_num);
208         if (IS_ERR(ah))
209                 return PTR_ERR(ah);
210
211         m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
212                                0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
213                                GFP_ATOMIC);
214         if (IS_ERR(m)) {
215                 ib_destroy_ah(ah);
216                 return PTR_ERR(m);
217         }
218         m->ah = ah;
219         *msg = m;
220         return 0;
221 }
222
223 static void cm_free_msg(struct ib_mad_send_buf *msg)
224 {
225         ib_destroy_ah(msg->ah);
226         if (msg->context[0])
227                 cm_deref_id(msg->context[0]);
228         ib_free_send_mad(msg);
229 }
230
231 static void * cm_copy_private_data(const void *private_data,
232                                    u8 private_data_len)
233 {
234         void *data;
235
236         if (!private_data || !private_data_len)
237                 return NULL;
238
239         data = kmalloc(private_data_len, GFP_KERNEL);
240         if (!data)
241                 return ERR_PTR(-ENOMEM);
242
243         memcpy(data, private_data, private_data_len);
244         return data;
245 }
246
247 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
248                                  void *private_data, u8 private_data_len)
249 {
250         if (cm_id_priv->private_data && cm_id_priv->private_data_len)
251                 kfree(cm_id_priv->private_data);
252
253         cm_id_priv->private_data = private_data;
254         cm_id_priv->private_data_len = private_data_len;
255 }
256
257 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
258                                     struct ib_grh *grh, struct cm_av *av)
259 {
260         av->port = port;
261         av->pkey_index = wc->pkey_index;
262         ib_init_ah_from_wc(port->cm_dev->device, port->port_num, wc,
263                            grh, &av->ah_attr);
264 }
265
266 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
267 {
268         struct cm_device *cm_dev;
269         struct cm_port *port = NULL;
270         unsigned long flags;
271         int ret;
272         u8 p;
273
274         read_lock_irqsave(&cm.device_lock, flags);
275         list_for_each_entry(cm_dev, &cm.device_list, list) {
276                 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
277                                         &p, NULL)) {
278                         port = &cm_dev->port[p-1];
279                         break;
280                 }
281         }
282         read_unlock_irqrestore(&cm.device_lock, flags);
283
284         if (!port)
285                 return -EINVAL;
286
287         ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
288                                   be16_to_cpu(path->pkey), &av->pkey_index);
289         if (ret)
290                 return ret;
291
292         av->port = port;
293         ib_init_ah_from_path(cm_dev->device, port->port_num, path,
294                              &av->ah_attr);
295         av->packet_life_time = path->packet_life_time;
296         return 0;
297 }
298
299 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
300 {
301         unsigned long flags;
302         int ret;
303         static int next_id;
304
305         do {
306                 spin_lock_irqsave(&cm.lock, flags);
307                 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
308                                         (__force int *) &cm_id_priv->id.local_id);
309                 spin_unlock_irqrestore(&cm.lock, flags);
310         } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
311         return ret;
312 }
313
314 static void cm_free_id(__be32 local_id)
315 {
316         unsigned long flags;
317
318         spin_lock_irqsave(&cm.lock, flags);
319         idr_remove(&cm.local_id_table, (__force int) local_id);
320         spin_unlock_irqrestore(&cm.lock, flags);
321 }
322
323 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
324 {
325         struct cm_id_private *cm_id_priv;
326
327         cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
328         if (cm_id_priv) {
329                 if (cm_id_priv->id.remote_id == remote_id)
330                         atomic_inc(&cm_id_priv->refcount);
331                 else
332                         cm_id_priv = NULL;
333         }
334
335         return cm_id_priv;
336 }
337
338 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
339 {
340         struct cm_id_private *cm_id_priv;
341         unsigned long flags;
342
343         spin_lock_irqsave(&cm.lock, flags);
344         cm_id_priv = cm_get_id(local_id, remote_id);
345         spin_unlock_irqrestore(&cm.lock, flags);
346
347         return cm_id_priv;
348 }
349
350 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
351 {
352         int i;
353
354         for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
355                 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
356                                              ((unsigned long *) mask)[i];
357 }
358
359 static int cm_compare_data(struct ib_cm_compare_data *src_data,
360                            struct ib_cm_compare_data *dst_data)
361 {
362         u8 src[IB_CM_COMPARE_SIZE];
363         u8 dst[IB_CM_COMPARE_SIZE];
364
365         if (!src_data || !dst_data)
366                 return 0;
367
368         cm_mask_copy(src, src_data->data, dst_data->mask);
369         cm_mask_copy(dst, dst_data->data, src_data->mask);
370         return memcmp(src, dst, IB_CM_COMPARE_SIZE);
371 }
372
373 static int cm_compare_private_data(u8 *private_data,
374                                    struct ib_cm_compare_data *dst_data)
375 {
376         u8 src[IB_CM_COMPARE_SIZE];
377
378         if (!dst_data)
379                 return 0;
380
381         cm_mask_copy(src, private_data, dst_data->mask);
382         return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
383 }
384
385 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
386 {
387         struct rb_node **link = &cm.listen_service_table.rb_node;
388         struct rb_node *parent = NULL;
389         struct cm_id_private *cur_cm_id_priv;
390         __be64 service_id = cm_id_priv->id.service_id;
391         __be64 service_mask = cm_id_priv->id.service_mask;
392         int data_cmp;
393
394         while (*link) {
395                 parent = *link;
396                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
397                                           service_node);
398                 data_cmp = cm_compare_data(cm_id_priv->compare_data,
399                                            cur_cm_id_priv->compare_data);
400                 if ((cur_cm_id_priv->id.service_mask & service_id) ==
401                     (service_mask & cur_cm_id_priv->id.service_id) &&
402                     (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
403                     !data_cmp)
404                         return cur_cm_id_priv;
405
406                 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
407                         link = &(*link)->rb_left;
408                 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
409                         link = &(*link)->rb_right;
410                 else if (service_id < cur_cm_id_priv->id.service_id)
411                         link = &(*link)->rb_left;
412                 else if (service_id > cur_cm_id_priv->id.service_id)
413                         link = &(*link)->rb_right;
414                 else if (data_cmp < 0)
415                         link = &(*link)->rb_left;
416                 else
417                         link = &(*link)->rb_right;
418         }
419         rb_link_node(&cm_id_priv->service_node, parent, link);
420         rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
421         return NULL;
422 }
423
424 static struct cm_id_private * cm_find_listen(struct ib_device *device,
425                                              __be64 service_id,
426                                              u8 *private_data)
427 {
428         struct rb_node *node = cm.listen_service_table.rb_node;
429         struct cm_id_private *cm_id_priv;
430         int data_cmp;
431
432         while (node) {
433                 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
434                 data_cmp = cm_compare_private_data(private_data,
435                                                    cm_id_priv->compare_data);
436                 if ((cm_id_priv->id.service_mask & service_id) ==
437                      cm_id_priv->id.service_id &&
438                     (cm_id_priv->id.device == device) && !data_cmp)
439                         return cm_id_priv;
440
441                 if (device < cm_id_priv->id.device)
442                         node = node->rb_left;
443                 else if (device > cm_id_priv->id.device)
444                         node = node->rb_right;
445                 else if (service_id < cm_id_priv->id.service_id)
446                         node = node->rb_left;
447                 else if (service_id > cm_id_priv->id.service_id)
448                         node = node->rb_right;
449                 else if (data_cmp < 0)
450                         node = node->rb_left;
451                 else
452                         node = node->rb_right;
453         }
454         return NULL;
455 }
456
457 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
458                                                      *timewait_info)
459 {
460         struct rb_node **link = &cm.remote_id_table.rb_node;
461         struct rb_node *parent = NULL;
462         struct cm_timewait_info *cur_timewait_info;
463         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
464         __be32 remote_id = timewait_info->work.remote_id;
465
466         while (*link) {
467                 parent = *link;
468                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
469                                              remote_id_node);
470                 if (remote_id < cur_timewait_info->work.remote_id)
471                         link = &(*link)->rb_left;
472                 else if (remote_id > cur_timewait_info->work.remote_id)
473                         link = &(*link)->rb_right;
474                 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
475                         link = &(*link)->rb_left;
476                 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
477                         link = &(*link)->rb_right;
478                 else
479                         return cur_timewait_info;
480         }
481         timewait_info->inserted_remote_id = 1;
482         rb_link_node(&timewait_info->remote_id_node, parent, link);
483         rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
484         return NULL;
485 }
486
487 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
488                                                    __be32 remote_id)
489 {
490         struct rb_node *node = cm.remote_id_table.rb_node;
491         struct cm_timewait_info *timewait_info;
492
493         while (node) {
494                 timewait_info = rb_entry(node, struct cm_timewait_info,
495                                          remote_id_node);
496                 if (remote_id < timewait_info->work.remote_id)
497                         node = node->rb_left;
498                 else if (remote_id > timewait_info->work.remote_id)
499                         node = node->rb_right;
500                 else if (remote_ca_guid < timewait_info->remote_ca_guid)
501                         node = node->rb_left;
502                 else if (remote_ca_guid > timewait_info->remote_ca_guid)
503                         node = node->rb_right;
504                 else
505                         return timewait_info;
506         }
507         return NULL;
508 }
509
510 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
511                                                       *timewait_info)
512 {
513         struct rb_node **link = &cm.remote_qp_table.rb_node;
514         struct rb_node *parent = NULL;
515         struct cm_timewait_info *cur_timewait_info;
516         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
517         __be32 remote_qpn = timewait_info->remote_qpn;
518
519         while (*link) {
520                 parent = *link;
521                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
522                                              remote_qp_node);
523                 if (remote_qpn < cur_timewait_info->remote_qpn)
524                         link = &(*link)->rb_left;
525                 else if (remote_qpn > cur_timewait_info->remote_qpn)
526                         link = &(*link)->rb_right;
527                 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
528                         link = &(*link)->rb_left;
529                 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
530                         link = &(*link)->rb_right;
531                 else
532                         return cur_timewait_info;
533         }
534         timewait_info->inserted_remote_qp = 1;
535         rb_link_node(&timewait_info->remote_qp_node, parent, link);
536         rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
537         return NULL;
538 }
539
540 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
541                                                     *cm_id_priv)
542 {
543         struct rb_node **link = &cm.remote_sidr_table.rb_node;
544         struct rb_node *parent = NULL;
545         struct cm_id_private *cur_cm_id_priv;
546         union ib_gid *port_gid = &cm_id_priv->av.dgid;
547         __be32 remote_id = cm_id_priv->id.remote_id;
548
549         while (*link) {
550                 parent = *link;
551                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
552                                           sidr_id_node);
553                 if (remote_id < cur_cm_id_priv->id.remote_id)
554                         link = &(*link)->rb_left;
555                 else if (remote_id > cur_cm_id_priv->id.remote_id)
556                         link = &(*link)->rb_right;
557                 else {
558                         int cmp;
559                         cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
560                                      sizeof *port_gid);
561                         if (cmp < 0)
562                                 link = &(*link)->rb_left;
563                         else if (cmp > 0)
564                                 link = &(*link)->rb_right;
565                         else
566                                 return cur_cm_id_priv;
567                 }
568         }
569         rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
570         rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
571         return NULL;
572 }
573
574 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
575                                enum ib_cm_sidr_status status)
576 {
577         struct ib_cm_sidr_rep_param param;
578
579         memset(&param, 0, sizeof param);
580         param.status = status;
581         ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
582 }
583
584 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
585                                  ib_cm_handler cm_handler,
586                                  void *context)
587 {
588         struct cm_id_private *cm_id_priv;
589         int ret;
590
591         cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
592         if (!cm_id_priv)
593                 return ERR_PTR(-ENOMEM);
594
595         cm_id_priv->id.state = IB_CM_IDLE;
596         cm_id_priv->id.device = device;
597         cm_id_priv->id.cm_handler = cm_handler;
598         cm_id_priv->id.context = context;
599         cm_id_priv->id.remote_cm_qpn = 1;
600         ret = cm_alloc_id(cm_id_priv);
601         if (ret)
602                 goto error;
603
604         spin_lock_init(&cm_id_priv->lock);
605         init_completion(&cm_id_priv->comp);
606         INIT_LIST_HEAD(&cm_id_priv->work_list);
607         atomic_set(&cm_id_priv->work_count, -1);
608         atomic_set(&cm_id_priv->refcount, 1);
609         return &cm_id_priv->id;
610
611 error:
612         kfree(cm_id_priv);
613         return ERR_PTR(-ENOMEM);
614 }
615 EXPORT_SYMBOL(ib_create_cm_id);
616
617 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
618 {
619         struct cm_work *work;
620
621         if (list_empty(&cm_id_priv->work_list))
622                 return NULL;
623
624         work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
625         list_del(&work->list);
626         return work;
627 }
628
629 static void cm_free_work(struct cm_work *work)
630 {
631         if (work->mad_recv_wc)
632                 ib_free_recv_mad(work->mad_recv_wc);
633         kfree(work);
634 }
635
636 static inline int cm_convert_to_ms(int iba_time)
637 {
638         /* approximate conversion to ms from 4.096us x 2^iba_time */
639         return 1 << max(iba_time - 8, 0);
640 }
641
642 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
643 {
644         unsigned long flags;
645
646         if (!timewait_info->inserted_remote_id &&
647             !timewait_info->inserted_remote_qp)
648             return;
649
650         spin_lock_irqsave(&cm.lock, flags);
651         if (timewait_info->inserted_remote_id) {
652                 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
653                 timewait_info->inserted_remote_id = 0;
654         }
655
656         if (timewait_info->inserted_remote_qp) {
657                 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
658                 timewait_info->inserted_remote_qp = 0;
659         }
660         spin_unlock_irqrestore(&cm.lock, flags);
661 }
662
663 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
664 {
665         struct cm_timewait_info *timewait_info;
666
667         timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
668         if (!timewait_info)
669                 return ERR_PTR(-ENOMEM);
670
671         timewait_info->work.local_id = local_id;
672         INIT_WORK(&timewait_info->work.work, cm_work_handler,
673                   &timewait_info->work);
674         timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
675         return timewait_info;
676 }
677
678 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
679 {
680         int wait_time;
681
682         /*
683          * The cm_id could be destroyed by the user before we exit timewait.
684          * To protect against this, we search for the cm_id after exiting
685          * timewait before notifying the user that we've exited timewait.
686          */
687         cm_id_priv->id.state = IB_CM_TIMEWAIT;
688         wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
689         queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
690                            msecs_to_jiffies(wait_time));
691         cm_id_priv->timewait_info = NULL;
692 }
693
694 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
695 {
696         cm_id_priv->id.state = IB_CM_IDLE;
697         if (cm_id_priv->timewait_info) {
698                 cm_cleanup_timewait(cm_id_priv->timewait_info);
699                 kfree(cm_id_priv->timewait_info);
700                 cm_id_priv->timewait_info = NULL;
701         }
702 }
703
704 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
705 {
706         struct cm_id_private *cm_id_priv;
707         struct cm_work *work;
708         unsigned long flags;
709
710         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
711 retest:
712         spin_lock_irqsave(&cm_id_priv->lock, flags);
713         switch (cm_id->state) {
714         case IB_CM_LISTEN:
715                 cm_id->state = IB_CM_IDLE;
716                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
717                 spin_lock_irqsave(&cm.lock, flags);
718                 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
719                 spin_unlock_irqrestore(&cm.lock, flags);
720                 break;
721         case IB_CM_SIDR_REQ_SENT:
722                 cm_id->state = IB_CM_IDLE;
723                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
724                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
725                 break;
726         case IB_CM_SIDR_REQ_RCVD:
727                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
728                 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
729                 break;
730         case IB_CM_REQ_SENT:
731                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
732                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
733                 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
734                                &cm_id_priv->av.port->cm_dev->ca_guid,
735                                sizeof cm_id_priv->av.port->cm_dev->ca_guid,
736                                NULL, 0);
737                 break;
738         case IB_CM_REQ_RCVD:
739                 if (err == -ENOMEM) {
740                         /* Do not reject to allow future retries. */
741                         cm_reset_to_idle(cm_id_priv);
742                         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
743                 } else {
744                         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
745                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
746                                        NULL, 0, NULL, 0);
747                 }
748                 break;
749         case IB_CM_MRA_REQ_RCVD:
750         case IB_CM_REP_SENT:
751         case IB_CM_MRA_REP_RCVD:
752                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
753                 /* Fall through */
754         case IB_CM_MRA_REQ_SENT:
755         case IB_CM_REP_RCVD:
756         case IB_CM_MRA_REP_SENT:
757                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
758                 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
759                                NULL, 0, NULL, 0);
760                 break;
761         case IB_CM_ESTABLISHED:
762                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
763                 ib_send_cm_dreq(cm_id, NULL, 0);
764                 goto retest;
765         case IB_CM_DREQ_SENT:
766                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
767                 cm_enter_timewait(cm_id_priv);
768                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
769                 break;
770         case IB_CM_DREQ_RCVD:
771                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
772                 ib_send_cm_drep(cm_id, NULL, 0);
773                 break;
774         default:
775                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
776                 break;
777         }
778
779         cm_free_id(cm_id->local_id);
780         cm_deref_id(cm_id_priv);
781         wait_for_completion(&cm_id_priv->comp);
782         while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
783                 cm_free_work(work);
784         kfree(cm_id_priv->compare_data);
785         kfree(cm_id_priv->private_data);
786         kfree(cm_id_priv);
787 }
788
789 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
790 {
791         cm_destroy_id(cm_id, 0);
792 }
793 EXPORT_SYMBOL(ib_destroy_cm_id);
794
795 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
796                  struct ib_cm_compare_data *compare_data)
797 {
798         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
799         unsigned long flags;
800         int ret = 0;
801
802         service_mask = service_mask ? service_mask :
803                        __constant_cpu_to_be64(~0ULL);
804         service_id &= service_mask;
805         if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
806             (service_id != IB_CM_ASSIGN_SERVICE_ID))
807                 return -EINVAL;
808
809         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
810         if (cm_id->state != IB_CM_IDLE)
811                 return -EINVAL;
812
813         if (compare_data) {
814                 cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
815                                                    GFP_KERNEL);
816                 if (!cm_id_priv->compare_data)
817                         return -ENOMEM;
818                 cm_mask_copy(cm_id_priv->compare_data->data,
819                              compare_data->data, compare_data->mask);
820                 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
821                        IB_CM_COMPARE_SIZE);
822         }
823
824         cm_id->state = IB_CM_LISTEN;
825
826         spin_lock_irqsave(&cm.lock, flags);
827         if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
828                 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
829                 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
830         } else {
831                 cm_id->service_id = service_id;
832                 cm_id->service_mask = service_mask;
833         }
834         cur_cm_id_priv = cm_insert_listen(cm_id_priv);
835         spin_unlock_irqrestore(&cm.lock, flags);
836
837         if (cur_cm_id_priv) {
838                 cm_id->state = IB_CM_IDLE;
839                 kfree(cm_id_priv->compare_data);
840                 cm_id_priv->compare_data = NULL;
841                 ret = -EBUSY;
842         }
843         return ret;
844 }
845 EXPORT_SYMBOL(ib_cm_listen);
846
847 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
848                           enum cm_msg_sequence msg_seq)
849 {
850         u64 hi_tid, low_tid;
851
852         hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
853         low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
854                           (msg_seq << 30));
855         return cpu_to_be64(hi_tid | low_tid);
856 }
857
858 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
859                               __be16 attr_id, __be64 tid)
860 {
861         hdr->base_version  = IB_MGMT_BASE_VERSION;
862         hdr->mgmt_class    = IB_MGMT_CLASS_CM;
863         hdr->class_version = IB_CM_CLASS_VERSION;
864         hdr->method        = IB_MGMT_METHOD_SEND;
865         hdr->attr_id       = attr_id;
866         hdr->tid           = tid;
867 }
868
869 static void cm_format_req(struct cm_req_msg *req_msg,
870                           struct cm_id_private *cm_id_priv,
871                           struct ib_cm_req_param *param)
872 {
873         cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
874                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
875
876         req_msg->local_comm_id = cm_id_priv->id.local_id;
877         req_msg->service_id = param->service_id;
878         req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
879         cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
880         cm_req_set_resp_res(req_msg, param->responder_resources);
881         cm_req_set_init_depth(req_msg, param->initiator_depth);
882         cm_req_set_remote_resp_timeout(req_msg,
883                                        param->remote_cm_response_timeout);
884         cm_req_set_qp_type(req_msg, param->qp_type);
885         cm_req_set_flow_ctrl(req_msg, param->flow_control);
886         cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
887         cm_req_set_local_resp_timeout(req_msg,
888                                       param->local_cm_response_timeout);
889         cm_req_set_retry_count(req_msg, param->retry_count);
890         req_msg->pkey = param->primary_path->pkey;
891         cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
892         cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
893         cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
894         cm_req_set_srq(req_msg, param->srq);
895
896         req_msg->primary_local_lid = param->primary_path->slid;
897         req_msg->primary_remote_lid = param->primary_path->dlid;
898         req_msg->primary_local_gid = param->primary_path->sgid;
899         req_msg->primary_remote_gid = param->primary_path->dgid;
900         cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
901         cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
902         req_msg->primary_traffic_class = param->primary_path->traffic_class;
903         req_msg->primary_hop_limit = param->primary_path->hop_limit;
904         cm_req_set_primary_sl(req_msg, param->primary_path->sl);
905         cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
906         cm_req_set_primary_local_ack_timeout(req_msg,
907                 min(31, param->primary_path->packet_life_time + 1));
908
909         if (param->alternate_path) {
910                 req_msg->alt_local_lid = param->alternate_path->slid;
911                 req_msg->alt_remote_lid = param->alternate_path->dlid;
912                 req_msg->alt_local_gid = param->alternate_path->sgid;
913                 req_msg->alt_remote_gid = param->alternate_path->dgid;
914                 cm_req_set_alt_flow_label(req_msg,
915                                           param->alternate_path->flow_label);
916                 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
917                 req_msg->alt_traffic_class = param->alternate_path->traffic_class;
918                 req_msg->alt_hop_limit = param->alternate_path->hop_limit;
919                 cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
920                 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
921                 cm_req_set_alt_local_ack_timeout(req_msg,
922                         min(31, param->alternate_path->packet_life_time + 1));
923         }
924
925         if (param->private_data && param->private_data_len)
926                 memcpy(req_msg->private_data, param->private_data,
927                        param->private_data_len);
928 }
929
930 static int cm_validate_req_param(struct ib_cm_req_param *param)
931 {
932         /* peer-to-peer not supported */
933         if (param->peer_to_peer)
934                 return -EINVAL;
935
936         if (!param->primary_path)
937                 return -EINVAL;
938
939         if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
940                 return -EINVAL;
941
942         if (param->private_data &&
943             param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
944                 return -EINVAL;
945
946         if (param->alternate_path &&
947             (param->alternate_path->pkey != param->primary_path->pkey ||
948              param->alternate_path->mtu != param->primary_path->mtu))
949                 return -EINVAL;
950
951         return 0;
952 }
953
954 int ib_send_cm_req(struct ib_cm_id *cm_id,
955                    struct ib_cm_req_param *param)
956 {
957         struct cm_id_private *cm_id_priv;
958         struct cm_req_msg *req_msg;
959         unsigned long flags;
960         int ret;
961
962         ret = cm_validate_req_param(param);
963         if (ret)
964                 return ret;
965
966         /* Verify that we're not in timewait. */
967         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
968         spin_lock_irqsave(&cm_id_priv->lock, flags);
969         if (cm_id->state != IB_CM_IDLE) {
970                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
971                 ret = -EINVAL;
972                 goto out;
973         }
974         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
975
976         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
977                                                             id.local_id);
978         if (IS_ERR(cm_id_priv->timewait_info)) {
979                 ret = PTR_ERR(cm_id_priv->timewait_info);
980                 goto out;
981         }
982
983         ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
984         if (ret)
985                 goto error1;
986         if (param->alternate_path) {
987                 ret = cm_init_av_by_path(param->alternate_path,
988                                          &cm_id_priv->alt_av);
989                 if (ret)
990                         goto error1;
991         }
992         cm_id->service_id = param->service_id;
993         cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
994         cm_id_priv->timeout_ms = cm_convert_to_ms(
995                                     param->primary_path->packet_life_time) * 2 +
996                                  cm_convert_to_ms(
997                                     param->remote_cm_response_timeout);
998         cm_id_priv->max_cm_retries = param->max_cm_retries;
999         cm_id_priv->initiator_depth = param->initiator_depth;
1000         cm_id_priv->responder_resources = param->responder_resources;
1001         cm_id_priv->retry_count = param->retry_count;
1002         cm_id_priv->path_mtu = param->primary_path->mtu;
1003         cm_id_priv->qp_type = param->qp_type;
1004
1005         ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1006         if (ret)
1007                 goto error1;
1008
1009         req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1010         cm_format_req(req_msg, cm_id_priv, param);
1011         cm_id_priv->tid = req_msg->hdr.tid;
1012         cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1013         cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1014
1015         cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1016         cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1017         cm_id_priv->local_ack_timeout =
1018                                 cm_req_get_primary_local_ack_timeout(req_msg);
1019
1020         spin_lock_irqsave(&cm_id_priv->lock, flags);
1021         ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1022         if (ret) {
1023                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1024                 goto error2;
1025         }
1026         BUG_ON(cm_id->state != IB_CM_IDLE);
1027         cm_id->state = IB_CM_REQ_SENT;
1028         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1029         return 0;
1030
1031 error2: cm_free_msg(cm_id_priv->msg);
1032 error1: kfree(cm_id_priv->timewait_info);
1033 out:    return ret;
1034 }
1035 EXPORT_SYMBOL(ib_send_cm_req);
1036
1037 static int cm_issue_rej(struct cm_port *port,
1038                         struct ib_mad_recv_wc *mad_recv_wc,
1039                         enum ib_cm_rej_reason reason,
1040                         enum cm_msg_response msg_rejected,
1041                         void *ari, u8 ari_length)
1042 {
1043         struct ib_mad_send_buf *msg = NULL;
1044         struct cm_rej_msg *rej_msg, *rcv_msg;
1045         int ret;
1046
1047         ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1048         if (ret)
1049                 return ret;
1050
1051         /* We just need common CM header information.  Cast to any message. */
1052         rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1053         rej_msg = (struct cm_rej_msg *) msg->mad;
1054
1055         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1056         rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1057         rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1058         cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1059         rej_msg->reason = cpu_to_be16(reason);
1060
1061         if (ari && ari_length) {
1062                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1063                 memcpy(rej_msg->ari, ari, ari_length);
1064         }
1065
1066         ret = ib_post_send_mad(msg, NULL);
1067         if (ret)
1068                 cm_free_msg(msg);
1069
1070         return ret;
1071 }
1072
1073 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1074                                     __be32 local_qpn, __be32 remote_qpn)
1075 {
1076         return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1077                 ((local_ca_guid == remote_ca_guid) &&
1078                  (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1079 }
1080
1081 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1082                                             struct ib_sa_path_rec *primary_path,
1083                                             struct ib_sa_path_rec *alt_path)
1084 {
1085         memset(primary_path, 0, sizeof *primary_path);
1086         primary_path->dgid = req_msg->primary_local_gid;
1087         primary_path->sgid = req_msg->primary_remote_gid;
1088         primary_path->dlid = req_msg->primary_local_lid;
1089         primary_path->slid = req_msg->primary_remote_lid;
1090         primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1091         primary_path->hop_limit = req_msg->primary_hop_limit;
1092         primary_path->traffic_class = req_msg->primary_traffic_class;
1093         primary_path->reversible = 1;
1094         primary_path->pkey = req_msg->pkey;
1095         primary_path->sl = cm_req_get_primary_sl(req_msg);
1096         primary_path->mtu_selector = IB_SA_EQ;
1097         primary_path->mtu = cm_req_get_path_mtu(req_msg);
1098         primary_path->rate_selector = IB_SA_EQ;
1099         primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1100         primary_path->packet_life_time_selector = IB_SA_EQ;
1101         primary_path->packet_life_time =
1102                 cm_req_get_primary_local_ack_timeout(req_msg);
1103         primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1104
1105         if (req_msg->alt_local_lid) {
1106                 memset(alt_path, 0, sizeof *alt_path);
1107                 alt_path->dgid = req_msg->alt_local_gid;
1108                 alt_path->sgid = req_msg->alt_remote_gid;
1109                 alt_path->dlid = req_msg->alt_local_lid;
1110                 alt_path->slid = req_msg->alt_remote_lid;
1111                 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1112                 alt_path->hop_limit = req_msg->alt_hop_limit;
1113                 alt_path->traffic_class = req_msg->alt_traffic_class;
1114                 alt_path->reversible = 1;
1115                 alt_path->pkey = req_msg->pkey;
1116                 alt_path->sl = cm_req_get_alt_sl(req_msg);
1117                 alt_path->mtu_selector = IB_SA_EQ;
1118                 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1119                 alt_path->rate_selector = IB_SA_EQ;
1120                 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1121                 alt_path->packet_life_time_selector = IB_SA_EQ;
1122                 alt_path->packet_life_time =
1123                         cm_req_get_alt_local_ack_timeout(req_msg);
1124                 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1125         }
1126 }
1127
1128 static void cm_format_req_event(struct cm_work *work,
1129                                 struct cm_id_private *cm_id_priv,
1130                                 struct ib_cm_id *listen_id)
1131 {
1132         struct cm_req_msg *req_msg;
1133         struct ib_cm_req_event_param *param;
1134
1135         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1136         param = &work->cm_event.param.req_rcvd;
1137         param->listen_id = listen_id;
1138         param->port = cm_id_priv->av.port->port_num;
1139         param->primary_path = &work->path[0];
1140         if (req_msg->alt_local_lid)
1141                 param->alternate_path = &work->path[1];
1142         else
1143                 param->alternate_path = NULL;
1144         param->remote_ca_guid = req_msg->local_ca_guid;
1145         param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1146         param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1147         param->qp_type = cm_req_get_qp_type(req_msg);
1148         param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1149         param->responder_resources = cm_req_get_init_depth(req_msg);
1150         param->initiator_depth = cm_req_get_resp_res(req_msg);
1151         param->local_cm_response_timeout =
1152                                         cm_req_get_remote_resp_timeout(req_msg);
1153         param->flow_control = cm_req_get_flow_ctrl(req_msg);
1154         param->remote_cm_response_timeout =
1155                                         cm_req_get_local_resp_timeout(req_msg);
1156         param->retry_count = cm_req_get_retry_count(req_msg);
1157         param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1158         param->srq = cm_req_get_srq(req_msg);
1159         work->cm_event.private_data = &req_msg->private_data;
1160 }
1161
1162 static void cm_process_work(struct cm_id_private *cm_id_priv,
1163                             struct cm_work *work)
1164 {
1165         unsigned long flags;
1166         int ret;
1167
1168         /* We will typically only have the current event to report. */
1169         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1170         cm_free_work(work);
1171
1172         while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1173                 spin_lock_irqsave(&cm_id_priv->lock, flags);
1174                 work = cm_dequeue_work(cm_id_priv);
1175                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1176                 BUG_ON(!work);
1177                 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1178                                                 &work->cm_event);
1179                 cm_free_work(work);
1180         }
1181         cm_deref_id(cm_id_priv);
1182         if (ret)
1183                 cm_destroy_id(&cm_id_priv->id, ret);
1184 }
1185
1186 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1187                           struct cm_id_private *cm_id_priv,
1188                           enum cm_msg_response msg_mraed, u8 service_timeout,
1189                           const void *private_data, u8 private_data_len)
1190 {
1191         cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1192         cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1193         mra_msg->local_comm_id = cm_id_priv->id.local_id;
1194         mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1195         cm_mra_set_service_timeout(mra_msg, service_timeout);
1196
1197         if (private_data && private_data_len)
1198                 memcpy(mra_msg->private_data, private_data, private_data_len);
1199 }
1200
1201 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1202                           struct cm_id_private *cm_id_priv,
1203                           enum ib_cm_rej_reason reason,
1204                           void *ari,
1205                           u8 ari_length,
1206                           const void *private_data,
1207                           u8 private_data_len)
1208 {
1209         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1210         rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1211
1212         switch(cm_id_priv->id.state) {
1213         case IB_CM_REQ_RCVD:
1214                 rej_msg->local_comm_id = 0;
1215                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1216                 break;
1217         case IB_CM_MRA_REQ_SENT:
1218                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1219                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1220                 break;
1221         case IB_CM_REP_RCVD:
1222         case IB_CM_MRA_REP_SENT:
1223                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1224                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1225                 break;
1226         default:
1227                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1228                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1229                 break;
1230         }
1231
1232         rej_msg->reason = cpu_to_be16(reason);
1233         if (ari && ari_length) {
1234                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1235                 memcpy(rej_msg->ari, ari, ari_length);
1236         }
1237
1238         if (private_data && private_data_len)
1239                 memcpy(rej_msg->private_data, private_data, private_data_len);
1240 }
1241
1242 static void cm_dup_req_handler(struct cm_work *work,
1243                                struct cm_id_private *cm_id_priv)
1244 {
1245         struct ib_mad_send_buf *msg = NULL;
1246         unsigned long flags;
1247         int ret;
1248
1249         /* Quick state check to discard duplicate REQs. */
1250         if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1251                 return;
1252
1253         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1254         if (ret)
1255                 return;
1256
1257         spin_lock_irqsave(&cm_id_priv->lock, flags);
1258         switch (cm_id_priv->id.state) {
1259         case IB_CM_MRA_REQ_SENT:
1260                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1261                               CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1262                               cm_id_priv->private_data,
1263                               cm_id_priv->private_data_len);
1264                 break;
1265         case IB_CM_TIMEWAIT:
1266                 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1267                               IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1268                 break;
1269         default:
1270                 goto unlock;
1271         }
1272         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1273
1274         ret = ib_post_send_mad(msg, NULL);
1275         if (ret)
1276                 goto free;
1277         return;
1278
1279 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1280 free:   cm_free_msg(msg);
1281 }
1282
1283 static struct cm_id_private * cm_match_req(struct cm_work *work,
1284                                            struct cm_id_private *cm_id_priv)
1285 {
1286         struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1287         struct cm_timewait_info *timewait_info;
1288         struct cm_req_msg *req_msg;
1289         unsigned long flags;
1290
1291         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1292
1293         /* Check for duplicate REQ and stale connections. */
1294         spin_lock_irqsave(&cm.lock, flags);
1295         timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1296         if (!timewait_info)
1297                 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1298
1299         if (timewait_info) {
1300                 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1301                                            timewait_info->work.remote_id);
1302                 spin_unlock_irqrestore(&cm.lock, flags);
1303                 if (cur_cm_id_priv) {
1304                         cm_dup_req_handler(work, cur_cm_id_priv);
1305                         cm_deref_id(cur_cm_id_priv);
1306                 } else
1307                         cm_issue_rej(work->port, work->mad_recv_wc,
1308                                      IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1309                                      NULL, 0);
1310                 goto error;
1311         }
1312
1313         /* Find matching listen request. */
1314         listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1315                                            req_msg->service_id,
1316                                            req_msg->private_data);
1317         if (!listen_cm_id_priv) {
1318                 spin_unlock_irqrestore(&cm.lock, flags);
1319                 cm_issue_rej(work->port, work->mad_recv_wc,
1320                              IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1321                              NULL, 0);
1322                 goto error;
1323         }
1324         atomic_inc(&listen_cm_id_priv->refcount);
1325         atomic_inc(&cm_id_priv->refcount);
1326         cm_id_priv->id.state = IB_CM_REQ_RCVD;
1327         atomic_inc(&cm_id_priv->work_count);
1328         spin_unlock_irqrestore(&cm.lock, flags);
1329         return listen_cm_id_priv;
1330
1331 error:  cm_cleanup_timewait(cm_id_priv->timewait_info);
1332         return NULL;
1333 }
1334
1335 static int cm_req_handler(struct cm_work *work)
1336 {
1337         struct ib_cm_id *cm_id;
1338         struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1339         struct cm_req_msg *req_msg;
1340         int ret;
1341
1342         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1343
1344         cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
1345         if (IS_ERR(cm_id))
1346                 return PTR_ERR(cm_id);
1347
1348         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1349         cm_id_priv->id.remote_id = req_msg->local_comm_id;
1350         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1351                                 work->mad_recv_wc->recv_buf.grh,
1352                                 &cm_id_priv->av);
1353         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1354                                                             id.local_id);
1355         if (IS_ERR(cm_id_priv->timewait_info)) {
1356                 ret = PTR_ERR(cm_id_priv->timewait_info);
1357                 goto destroy;
1358         }
1359         cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1360         cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1361         cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1362
1363         listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1364         if (!listen_cm_id_priv) {
1365                 ret = -EINVAL;
1366                 kfree(cm_id_priv->timewait_info);
1367                 goto destroy;
1368         }
1369
1370         cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1371         cm_id_priv->id.context = listen_cm_id_priv->id.context;
1372         cm_id_priv->id.service_id = req_msg->service_id;
1373         cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1374
1375         cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1376         ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1377         if (ret) {
1378                 ib_get_cached_gid(work->port->cm_dev->device,
1379                                   work->port->port_num, 0, &work->path[0].sgid);
1380                 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1381                                &work->path[0].sgid, sizeof work->path[0].sgid,
1382                                NULL, 0);
1383                 goto rejected;
1384         }
1385         if (req_msg->alt_local_lid) {
1386                 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1387                 if (ret) {
1388                         ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1389                                        &work->path[0].sgid,
1390                                        sizeof work->path[0].sgid, NULL, 0);
1391                         goto rejected;
1392                 }
1393         }
1394         cm_id_priv->tid = req_msg->hdr.tid;
1395         cm_id_priv->timeout_ms = cm_convert_to_ms(
1396                                         cm_req_get_local_resp_timeout(req_msg));
1397         cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1398         cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1399         cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1400         cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1401         cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1402         cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1403         cm_id_priv->local_ack_timeout =
1404                                 cm_req_get_primary_local_ack_timeout(req_msg);
1405         cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1406         cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1407         cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1408
1409         cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1410         cm_process_work(cm_id_priv, work);
1411         cm_deref_id(listen_cm_id_priv);
1412         return 0;
1413
1414 rejected:
1415         atomic_dec(&cm_id_priv->refcount);
1416         cm_deref_id(listen_cm_id_priv);
1417 destroy:
1418         ib_destroy_cm_id(cm_id);
1419         return ret;
1420 }
1421
1422 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1423                           struct cm_id_private *cm_id_priv,
1424                           struct ib_cm_rep_param *param)
1425 {
1426         cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1427         rep_msg->local_comm_id = cm_id_priv->id.local_id;
1428         rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1429         cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1430         cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1431         rep_msg->resp_resources = param->responder_resources;
1432         rep_msg->initiator_depth = param->initiator_depth;
1433         cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1434         cm_rep_set_failover(rep_msg, param->failover_accepted);
1435         cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1436         cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1437         cm_rep_set_srq(rep_msg, param->srq);
1438         rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1439
1440         if (param->private_data && param->private_data_len)
1441                 memcpy(rep_msg->private_data, param->private_data,
1442                        param->private_data_len);
1443 }
1444
1445 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1446                    struct ib_cm_rep_param *param)
1447 {
1448         struct cm_id_private *cm_id_priv;
1449         struct ib_mad_send_buf *msg;
1450         struct cm_rep_msg *rep_msg;
1451         unsigned long flags;
1452         int ret;
1453
1454         if (param->private_data &&
1455             param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1456                 return -EINVAL;
1457
1458         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1459         spin_lock_irqsave(&cm_id_priv->lock, flags);
1460         if (cm_id->state != IB_CM_REQ_RCVD &&
1461             cm_id->state != IB_CM_MRA_REQ_SENT) {
1462                 ret = -EINVAL;
1463                 goto out;
1464         }
1465
1466         ret = cm_alloc_msg(cm_id_priv, &msg);
1467         if (ret)
1468                 goto out;
1469
1470         rep_msg = (struct cm_rep_msg *) msg->mad;
1471         cm_format_rep(rep_msg, cm_id_priv, param);
1472         msg->timeout_ms = cm_id_priv->timeout_ms;
1473         msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1474
1475         ret = ib_post_send_mad(msg, NULL);
1476         if (ret) {
1477                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1478                 cm_free_msg(msg);
1479                 return ret;
1480         }
1481
1482         cm_id->state = IB_CM_REP_SENT;
1483         cm_id_priv->msg = msg;
1484         cm_id_priv->initiator_depth = param->initiator_depth;
1485         cm_id_priv->responder_resources = param->responder_resources;
1486         cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1487         cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1488
1489 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1490         return ret;
1491 }
1492 EXPORT_SYMBOL(ib_send_cm_rep);
1493
1494 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1495                           struct cm_id_private *cm_id_priv,
1496                           const void *private_data,
1497                           u8 private_data_len)
1498 {
1499         cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1500         rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1501         rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1502
1503         if (private_data && private_data_len)
1504                 memcpy(rtu_msg->private_data, private_data, private_data_len);
1505 }
1506
1507 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1508                    const void *private_data,
1509                    u8 private_data_len)
1510 {
1511         struct cm_id_private *cm_id_priv;
1512         struct ib_mad_send_buf *msg;
1513         unsigned long flags;
1514         void *data;
1515         int ret;
1516
1517         if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1518                 return -EINVAL;
1519
1520         data = cm_copy_private_data(private_data, private_data_len);
1521         if (IS_ERR(data))
1522                 return PTR_ERR(data);
1523
1524         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1525         spin_lock_irqsave(&cm_id_priv->lock, flags);
1526         if (cm_id->state != IB_CM_REP_RCVD &&
1527             cm_id->state != IB_CM_MRA_REP_SENT) {
1528                 ret = -EINVAL;
1529                 goto error;
1530         }
1531
1532         ret = cm_alloc_msg(cm_id_priv, &msg);
1533         if (ret)
1534                 goto error;
1535
1536         cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1537                       private_data, private_data_len);
1538
1539         ret = ib_post_send_mad(msg, NULL);
1540         if (ret) {
1541                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1542                 cm_free_msg(msg);
1543                 kfree(data);
1544                 return ret;
1545         }
1546
1547         cm_id->state = IB_CM_ESTABLISHED;
1548         cm_set_private_data(cm_id_priv, data, private_data_len);
1549         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1550         return 0;
1551
1552 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1553         kfree(data);
1554         return ret;
1555 }
1556 EXPORT_SYMBOL(ib_send_cm_rtu);
1557
1558 static void cm_format_rep_event(struct cm_work *work)
1559 {
1560         struct cm_rep_msg *rep_msg;
1561         struct ib_cm_rep_event_param *param;
1562
1563         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1564         param = &work->cm_event.param.rep_rcvd;
1565         param->remote_ca_guid = rep_msg->local_ca_guid;
1566         param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1567         param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1568         param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1569         param->responder_resources = rep_msg->initiator_depth;
1570         param->initiator_depth = rep_msg->resp_resources;
1571         param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1572         param->failover_accepted = cm_rep_get_failover(rep_msg);
1573         param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1574         param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1575         param->srq = cm_rep_get_srq(rep_msg);
1576         work->cm_event.private_data = &rep_msg->private_data;
1577 }
1578
1579 static void cm_dup_rep_handler(struct cm_work *work)
1580 {
1581         struct cm_id_private *cm_id_priv;
1582         struct cm_rep_msg *rep_msg;
1583         struct ib_mad_send_buf *msg = NULL;
1584         unsigned long flags;
1585         int ret;
1586
1587         rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1588         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1589                                    rep_msg->local_comm_id);
1590         if (!cm_id_priv)
1591                 return;
1592
1593         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1594         if (ret)
1595                 goto deref;
1596
1597         spin_lock_irqsave(&cm_id_priv->lock, flags);
1598         if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1599                 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1600                               cm_id_priv->private_data,
1601                               cm_id_priv->private_data_len);
1602         else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1603                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1604                               CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1605                               cm_id_priv->private_data,
1606                               cm_id_priv->private_data_len);
1607         else
1608                 goto unlock;
1609         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1610
1611         ret = ib_post_send_mad(msg, NULL);
1612         if (ret)
1613                 goto free;
1614         goto deref;
1615
1616 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1617 free:   cm_free_msg(msg);
1618 deref:  cm_deref_id(cm_id_priv);
1619 }
1620
1621 static int cm_rep_handler(struct cm_work *work)
1622 {
1623         struct cm_id_private *cm_id_priv;
1624         struct cm_rep_msg *rep_msg;
1625         unsigned long flags;
1626         int ret;
1627
1628         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1629         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1630         if (!cm_id_priv) {
1631                 cm_dup_rep_handler(work);
1632                 return -EINVAL;
1633         }
1634
1635         cm_format_rep_event(work);
1636
1637         spin_lock_irqsave(&cm_id_priv->lock, flags);
1638         switch (cm_id_priv->id.state) {
1639         case IB_CM_REQ_SENT:
1640         case IB_CM_MRA_REQ_RCVD:
1641                 break;
1642         default:
1643                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1644                 ret = -EINVAL;
1645                 goto error;
1646         }
1647
1648         cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1649         cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1650         cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1651
1652         spin_lock(&cm.lock);
1653         /* Check for duplicate REP. */
1654         if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1655                 spin_unlock(&cm.lock);
1656                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1657                 ret = -EINVAL;
1658                 goto error;
1659         }
1660         /* Check for a stale connection. */
1661         if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1662                 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1663                          &cm.remote_id_table);
1664                 cm_id_priv->timewait_info->inserted_remote_id = 0;
1665                 spin_unlock(&cm.lock);
1666                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1667                 cm_issue_rej(work->port, work->mad_recv_wc,
1668                              IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1669                              NULL, 0);
1670                 ret = -EINVAL;
1671                 goto error;
1672         }
1673         spin_unlock(&cm.lock);
1674
1675         cm_id_priv->id.state = IB_CM_REP_RCVD;
1676         cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1677         cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1678         cm_id_priv->initiator_depth = rep_msg->resp_resources;
1679         cm_id_priv->responder_resources = rep_msg->initiator_depth;
1680         cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1681         cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1682
1683         /* todo: handle peer_to_peer */
1684
1685         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1686         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1687         if (!ret)
1688                 list_add_tail(&work->list, &cm_id_priv->work_list);
1689         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1690
1691         if (ret)
1692                 cm_process_work(cm_id_priv, work);
1693         else
1694                 cm_deref_id(cm_id_priv);
1695         return 0;
1696
1697 error:
1698         cm_deref_id(cm_id_priv);
1699         return ret;
1700 }
1701
1702 static int cm_establish_handler(struct cm_work *work)
1703 {
1704         struct cm_id_private *cm_id_priv;
1705         unsigned long flags;
1706         int ret;
1707
1708         /* See comment in ib_cm_establish about lookup. */
1709         cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1710         if (!cm_id_priv)
1711                 return -EINVAL;
1712
1713         spin_lock_irqsave(&cm_id_priv->lock, flags);
1714         if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1715                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1716                 goto out;
1717         }
1718
1719         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1720         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1721         if (!ret)
1722                 list_add_tail(&work->list, &cm_id_priv->work_list);
1723         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1724
1725         if (ret)
1726                 cm_process_work(cm_id_priv, work);
1727         else
1728                 cm_deref_id(cm_id_priv);
1729         return 0;
1730 out:
1731         cm_deref_id(cm_id_priv);
1732         return -EINVAL;
1733 }
1734
1735 static int cm_rtu_handler(struct cm_work *work)
1736 {
1737         struct cm_id_private *cm_id_priv;
1738         struct cm_rtu_msg *rtu_msg;
1739         unsigned long flags;
1740         int ret;
1741
1742         rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1743         cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1744                                    rtu_msg->local_comm_id);
1745         if (!cm_id_priv)
1746                 return -EINVAL;
1747
1748         work->cm_event.private_data = &rtu_msg->private_data;
1749
1750         spin_lock_irqsave(&cm_id_priv->lock, flags);
1751         if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1752             cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1753                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1754                 goto out;
1755         }
1756         cm_id_priv->id.state = IB_CM_ESTABLISHED;
1757
1758         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1759         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1760         if (!ret)
1761                 list_add_tail(&work->list, &cm_id_priv->work_list);
1762         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1763
1764         if (ret)
1765                 cm_process_work(cm_id_priv, work);
1766         else
1767                 cm_deref_id(cm_id_priv);
1768         return 0;
1769 out:
1770         cm_deref_id(cm_id_priv);
1771         return -EINVAL;
1772 }
1773
1774 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1775                           struct cm_id_private *cm_id_priv,
1776                           const void *private_data,
1777                           u8 private_data_len)
1778 {
1779         cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1780                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1781         dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1782         dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1783         cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1784
1785         if (private_data && private_data_len)
1786                 memcpy(dreq_msg->private_data, private_data, private_data_len);
1787 }
1788
1789 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1790                     const void *private_data,
1791                     u8 private_data_len)
1792 {
1793         struct cm_id_private *cm_id_priv;
1794         struct ib_mad_send_buf *msg;
1795         unsigned long flags;
1796         int ret;
1797
1798         if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1799                 return -EINVAL;
1800
1801         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1802         spin_lock_irqsave(&cm_id_priv->lock, flags);
1803         if (cm_id->state != IB_CM_ESTABLISHED) {
1804                 ret = -EINVAL;
1805                 goto out;
1806         }
1807
1808         ret = cm_alloc_msg(cm_id_priv, &msg);
1809         if (ret) {
1810                 cm_enter_timewait(cm_id_priv);
1811                 goto out;
1812         }
1813
1814         cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1815                        private_data, private_data_len);
1816         msg->timeout_ms = cm_id_priv->timeout_ms;
1817         msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1818
1819         ret = ib_post_send_mad(msg, NULL);
1820         if (ret) {
1821                 cm_enter_timewait(cm_id_priv);
1822                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1823                 cm_free_msg(msg);
1824                 return ret;
1825         }
1826
1827         cm_id->state = IB_CM_DREQ_SENT;
1828         cm_id_priv->msg = msg;
1829 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1830         return ret;
1831 }
1832 EXPORT_SYMBOL(ib_send_cm_dreq);
1833
1834 static void cm_format_drep(struct cm_drep_msg *drep_msg,
1835                           struct cm_id_private *cm_id_priv,
1836                           const void *private_data,
1837                           u8 private_data_len)
1838 {
1839         cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1840         drep_msg->local_comm_id = cm_id_priv->id.local_id;
1841         drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1842
1843         if (private_data && private_data_len)
1844                 memcpy(drep_msg->private_data, private_data, private_data_len);
1845 }
1846
1847 int ib_send_cm_drep(struct ib_cm_id *cm_id,
1848                     const void *private_data,
1849                     u8 private_data_len)
1850 {
1851         struct cm_id_private *cm_id_priv;
1852         struct ib_mad_send_buf *msg;
1853         unsigned long flags;
1854         void *data;
1855         int ret;
1856
1857         if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1858                 return -EINVAL;
1859
1860         data = cm_copy_private_data(private_data, private_data_len);
1861         if (IS_ERR(data))
1862                 return PTR_ERR(data);
1863
1864         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1865         spin_lock_irqsave(&cm_id_priv->lock, flags);
1866         if (cm_id->state != IB_CM_DREQ_RCVD) {
1867                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1868                 kfree(data);
1869                 return -EINVAL;
1870         }
1871
1872         cm_set_private_data(cm_id_priv, data, private_data_len);
1873         cm_enter_timewait(cm_id_priv);
1874
1875         ret = cm_alloc_msg(cm_id_priv, &msg);
1876         if (ret)
1877                 goto out;
1878
1879         cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1880                        private_data, private_data_len);
1881
1882         ret = ib_post_send_mad(msg, NULL);
1883         if (ret) {
1884                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1885                 cm_free_msg(msg);
1886                 return ret;
1887         }
1888
1889 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1890         return ret;
1891 }
1892 EXPORT_SYMBOL(ib_send_cm_drep);
1893
1894 static int cm_dreq_handler(struct cm_work *work)
1895 {
1896         struct cm_id_private *cm_id_priv;
1897         struct cm_dreq_msg *dreq_msg;
1898         struct ib_mad_send_buf *msg = NULL;
1899         unsigned long flags;
1900         int ret;
1901
1902         dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1903         cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1904                                    dreq_msg->local_comm_id);
1905         if (!cm_id_priv)
1906                 return -EINVAL;
1907
1908         work->cm_event.private_data = &dreq_msg->private_data;
1909
1910         spin_lock_irqsave(&cm_id_priv->lock, flags);
1911         if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1912                 goto unlock;
1913
1914         switch (cm_id_priv->id.state) {
1915         case IB_CM_REP_SENT:
1916         case IB_CM_DREQ_SENT:
1917                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1918                 break;
1919         case IB_CM_ESTABLISHED:
1920         case IB_CM_MRA_REP_RCVD:
1921                 break;
1922         case IB_CM_TIMEWAIT:
1923                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1924                         goto unlock;
1925
1926                 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1927                                cm_id_priv->private_data,
1928                                cm_id_priv->private_data_len);
1929                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1930
1931                 if (ib_post_send_mad(msg, NULL))
1932                         cm_free_msg(msg);
1933                 goto deref;
1934         default:
1935                 goto unlock;
1936         }
1937         cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1938         cm_id_priv->tid = dreq_msg->hdr.tid;
1939         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1940         if (!ret)
1941                 list_add_tail(&work->list, &cm_id_priv->work_list);
1942         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1943
1944         if (ret)
1945                 cm_process_work(cm_id_priv, work);
1946         else
1947                 cm_deref_id(cm_id_priv);
1948         return 0;
1949
1950 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1951 deref:  cm_deref_id(cm_id_priv);
1952         return -EINVAL;
1953 }
1954
1955 static int cm_drep_handler(struct cm_work *work)
1956 {
1957         struct cm_id_private *cm_id_priv;
1958         struct cm_drep_msg *drep_msg;
1959         unsigned long flags;
1960         int ret;
1961
1962         drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1963         cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1964                                    drep_msg->local_comm_id);
1965         if (!cm_id_priv)
1966                 return -EINVAL;
1967
1968         work->cm_event.private_data = &drep_msg->private_data;
1969
1970         spin_lock_irqsave(&cm_id_priv->lock, flags);
1971         if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1972             cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1973                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1974                 goto out;
1975         }
1976         cm_enter_timewait(cm_id_priv);
1977
1978         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1979         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1980         if (!ret)
1981                 list_add_tail(&work->list, &cm_id_priv->work_list);
1982         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1983
1984         if (ret)
1985                 cm_process_work(cm_id_priv, work);
1986         else
1987                 cm_deref_id(cm_id_priv);
1988         return 0;
1989 out:
1990         cm_deref_id(cm_id_priv);
1991         return -EINVAL;
1992 }
1993
1994 int ib_send_cm_rej(struct ib_cm_id *cm_id,
1995                    enum ib_cm_rej_reason reason,
1996                    void *ari,
1997                    u8 ari_length,
1998                    const void *private_data,
1999                    u8 private_data_len)
2000 {
2001         struct cm_id_private *cm_id_priv;
2002         struct ib_mad_send_buf *msg;
2003         unsigned long flags;
2004         int ret;
2005
2006         if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2007             (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2008                 return -EINVAL;
2009
2010         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2011
2012         spin_lock_irqsave(&cm_id_priv->lock, flags);
2013         switch (cm_id->state) {
2014         case IB_CM_REQ_SENT:
2015         case IB_CM_MRA_REQ_RCVD:
2016         case IB_CM_REQ_RCVD:
2017         case IB_CM_MRA_REQ_SENT:
2018         case IB_CM_REP_RCVD:
2019         case IB_CM_MRA_REP_SENT:
2020                 ret = cm_alloc_msg(cm_id_priv, &msg);
2021                 if (!ret)
2022                         cm_format_rej((struct cm_rej_msg *) msg->mad,
2023                                       cm_id_priv, reason, ari, ari_length,
2024                                       private_data, private_data_len);
2025
2026                 cm_reset_to_idle(cm_id_priv);
2027                 break;
2028         case IB_CM_REP_SENT:
2029         case IB_CM_MRA_REP_RCVD:
2030                 ret = cm_alloc_msg(cm_id_priv, &msg);
2031                 if (!ret)
2032                         cm_format_rej((struct cm_rej_msg *) msg->mad,
2033                                       cm_id_priv, reason, ari, ari_length,
2034                                       private_data, private_data_len);
2035
2036                 cm_enter_timewait(cm_id_priv);
2037                 break;
2038         default:
2039                 ret = -EINVAL;
2040                 goto out;
2041         }
2042
2043         if (ret)
2044                 goto out;
2045
2046         ret = ib_post_send_mad(msg, NULL);
2047         if (ret)
2048                 cm_free_msg(msg);
2049
2050 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2051         return ret;
2052 }
2053 EXPORT_SYMBOL(ib_send_cm_rej);
2054
2055 static void cm_format_rej_event(struct cm_work *work)
2056 {
2057         struct cm_rej_msg *rej_msg;
2058         struct ib_cm_rej_event_param *param;
2059
2060         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2061         param = &work->cm_event.param.rej_rcvd;
2062         param->ari = rej_msg->ari;
2063         param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2064         param->reason = __be16_to_cpu(rej_msg->reason);
2065         work->cm_event.private_data = &rej_msg->private_data;
2066 }
2067
2068 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2069 {
2070         struct cm_timewait_info *timewait_info;
2071         struct cm_id_private *cm_id_priv;
2072         unsigned long flags;
2073         __be32 remote_id;
2074
2075         remote_id = rej_msg->local_comm_id;
2076
2077         if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2078                 spin_lock_irqsave(&cm.lock, flags);
2079                 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2080                                                   remote_id);
2081                 if (!timewait_info) {
2082                         spin_unlock_irqrestore(&cm.lock, flags);
2083                         return NULL;
2084                 }
2085                 cm_id_priv = idr_find(&cm.local_id_table,
2086                                       (__force int) timewait_info->work.local_id);
2087                 if (cm_id_priv) {
2088                         if (cm_id_priv->id.remote_id == remote_id)
2089                                 atomic_inc(&cm_id_priv->refcount);
2090                         else
2091                                 cm_id_priv = NULL;
2092                 }
2093                 spin_unlock_irqrestore(&cm.lock, flags);
2094         } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2095                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2096         else
2097                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2098
2099         return cm_id_priv;
2100 }
2101
2102 static int cm_rej_handler(struct cm_work *work)
2103 {
2104         struct cm_id_private *cm_id_priv;
2105         struct cm_rej_msg *rej_msg;
2106         unsigned long flags;
2107         int ret;
2108
2109         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2110         cm_id_priv = cm_acquire_rejected_id(rej_msg);
2111         if (!cm_id_priv)
2112                 return -EINVAL;
2113
2114         cm_format_rej_event(work);
2115
2116         spin_lock_irqsave(&cm_id_priv->lock, flags);
2117         switch (cm_id_priv->id.state) {
2118         case IB_CM_REQ_SENT:
2119         case IB_CM_MRA_REQ_RCVD:
2120         case IB_CM_REP_SENT:
2121         case IB_CM_MRA_REP_RCVD:
2122                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2123                 /* fall through */
2124         case IB_CM_REQ_RCVD:
2125         case IB_CM_MRA_REQ_SENT:
2126                 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2127                         cm_enter_timewait(cm_id_priv);
2128                 else
2129                         cm_reset_to_idle(cm_id_priv);
2130                 break;
2131         case IB_CM_DREQ_SENT:
2132                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2133                 /* fall through */
2134         case IB_CM_REP_RCVD:
2135         case IB_CM_MRA_REP_SENT:
2136         case IB_CM_ESTABLISHED:
2137                 cm_enter_timewait(cm_id_priv);
2138                 break;
2139         default:
2140                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2141                 ret = -EINVAL;
2142                 goto out;
2143         }
2144
2145         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2146         if (!ret)
2147                 list_add_tail(&work->list, &cm_id_priv->work_list);
2148         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2149
2150         if (ret)
2151                 cm_process_work(cm_id_priv, work);
2152         else
2153                 cm_deref_id(cm_id_priv);
2154         return 0;
2155 out:
2156         cm_deref_id(cm_id_priv);
2157         return -EINVAL;
2158 }
2159
2160 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2161                    u8 service_timeout,
2162                    const void *private_data,
2163                    u8 private_data_len)
2164 {
2165         struct cm_id_private *cm_id_priv;
2166         struct ib_mad_send_buf *msg;
2167         void *data;
2168         unsigned long flags;
2169         int ret;
2170
2171         if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2172                 return -EINVAL;
2173
2174         data = cm_copy_private_data(private_data, private_data_len);
2175         if (IS_ERR(data))
2176                 return PTR_ERR(data);
2177
2178         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2179
2180         spin_lock_irqsave(&cm_id_priv->lock, flags);
2181         switch(cm_id_priv->id.state) {
2182         case IB_CM_REQ_RCVD:
2183                 ret = cm_alloc_msg(cm_id_priv, &msg);
2184                 if (ret)
2185                         goto error1;
2186
2187                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2188                               CM_MSG_RESPONSE_REQ, service_timeout,
2189                               private_data, private_data_len);
2190                 ret = ib_post_send_mad(msg, NULL);
2191                 if (ret)
2192                         goto error2;
2193                 cm_id->state = IB_CM_MRA_REQ_SENT;
2194                 break;
2195         case IB_CM_REP_RCVD:
2196                 ret = cm_alloc_msg(cm_id_priv, &msg);
2197                 if (ret)
2198                         goto error1;
2199
2200                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2201                               CM_MSG_RESPONSE_REP, service_timeout,
2202                               private_data, private_data_len);
2203                 ret = ib_post_send_mad(msg, NULL);
2204                 if (ret)
2205                         goto error2;
2206                 cm_id->state = IB_CM_MRA_REP_SENT;
2207                 break;
2208         case IB_CM_ESTABLISHED:
2209                 ret = cm_alloc_msg(cm_id_priv, &msg);
2210                 if (ret)
2211                         goto error1;
2212
2213                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2214                               CM_MSG_RESPONSE_OTHER, service_timeout,
2215                               private_data, private_data_len);
2216                 ret = ib_post_send_mad(msg, NULL);
2217                 if (ret)
2218                         goto error2;
2219                 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2220                 break;
2221         default:
2222                 ret = -EINVAL;
2223                 goto error1;
2224         }
2225         cm_id_priv->service_timeout = service_timeout;
2226         cm_set_private_data(cm_id_priv, data, private_data_len);
2227         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2228         return 0;
2229
2230 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2231         kfree(data);
2232         return ret;
2233
2234 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2235         kfree(data);
2236         cm_free_msg(msg);
2237         return ret;
2238 }
2239 EXPORT_SYMBOL(ib_send_cm_mra);
2240
2241 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2242 {
2243         switch (cm_mra_get_msg_mraed(mra_msg)) {
2244         case CM_MSG_RESPONSE_REQ:
2245                 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2246         case CM_MSG_RESPONSE_REP:
2247         case CM_MSG_RESPONSE_OTHER:
2248                 return cm_acquire_id(mra_msg->remote_comm_id,
2249                                      mra_msg->local_comm_id);
2250         default:
2251                 return NULL;
2252         }
2253 }
2254
2255 static int cm_mra_handler(struct cm_work *work)
2256 {
2257         struct cm_id_private *cm_id_priv;
2258         struct cm_mra_msg *mra_msg;
2259         unsigned long flags;
2260         int timeout, ret;
2261
2262         mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2263         cm_id_priv = cm_acquire_mraed_id(mra_msg);
2264         if (!cm_id_priv)
2265                 return -EINVAL;
2266
2267         work->cm_event.private_data = &mra_msg->private_data;
2268         work->cm_event.param.mra_rcvd.service_timeout =
2269                                         cm_mra_get_service_timeout(mra_msg);
2270         timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2271                   cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2272
2273         spin_lock_irqsave(&cm_id_priv->lock, flags);
2274         switch (cm_id_priv->id.state) {
2275         case IB_CM_REQ_SENT:
2276                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2277                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2278                                   cm_id_priv->msg, timeout))
2279                         goto out;
2280                 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2281                 break;
2282         case IB_CM_REP_SENT:
2283                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2284                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2285                                   cm_id_priv->msg, timeout))
2286                         goto out;
2287                 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2288                 break;
2289         case IB_CM_ESTABLISHED:
2290                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2291                     cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2292                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2293                                   cm_id_priv->msg, timeout))
2294                         goto out;
2295                 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2296                 break;
2297         default:
2298                 goto out;
2299         }
2300
2301         cm_id_priv->msg->context[1] = (void *) (unsigned long)
2302                                       cm_id_priv->id.state;
2303         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2304         if (!ret)
2305                 list_add_tail(&work->list, &cm_id_priv->work_list);
2306         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2307
2308         if (ret)
2309                 cm_process_work(cm_id_priv, work);
2310         else
2311                 cm_deref_id(cm_id_priv);
2312         return 0;
2313 out:
2314         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2315         cm_deref_id(cm_id_priv);
2316         return -EINVAL;
2317 }
2318
2319 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2320                           struct cm_id_private *cm_id_priv,
2321                           struct ib_sa_path_rec *alternate_path,
2322                           const void *private_data,
2323                           u8 private_data_len)
2324 {
2325         cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2326                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2327         lap_msg->local_comm_id = cm_id_priv->id.local_id;
2328         lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2329         cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2330         /* todo: need remote CM response timeout */
2331         cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2332         lap_msg->alt_local_lid = alternate_path->slid;
2333         lap_msg->alt_remote_lid = alternate_path->dlid;
2334         lap_msg->alt_local_gid = alternate_path->sgid;
2335         lap_msg->alt_remote_gid = alternate_path->dgid;
2336         cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2337         cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2338         lap_msg->alt_hop_limit = alternate_path->hop_limit;
2339         cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2340         cm_lap_set_sl(lap_msg, alternate_path->sl);
2341         cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2342         cm_lap_set_local_ack_timeout(lap_msg,
2343                 min(31, alternate_path->packet_life_time + 1));
2344
2345         if (private_data && private_data_len)
2346                 memcpy(lap_msg->private_data, private_data, private_data_len);
2347 }
2348
2349 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2350                    struct ib_sa_path_rec *alternate_path,
2351                    const void *private_data,
2352                    u8 private_data_len)
2353 {
2354         struct cm_id_private *cm_id_priv;
2355         struct ib_mad_send_buf *msg;
2356         unsigned long flags;
2357         int ret;
2358
2359         if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2360                 return -EINVAL;
2361
2362         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2363         spin_lock_irqsave(&cm_id_priv->lock, flags);
2364         if (cm_id->state != IB_CM_ESTABLISHED ||
2365             cm_id->lap_state != IB_CM_LAP_IDLE) {
2366                 ret = -EINVAL;
2367                 goto out;
2368         }
2369
2370         ret = cm_alloc_msg(cm_id_priv, &msg);
2371         if (ret)
2372                 goto out;
2373
2374         cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2375                       alternate_path, private_data, private_data_len);
2376         msg->timeout_ms = cm_id_priv->timeout_ms;
2377         msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2378
2379         ret = ib_post_send_mad(msg, NULL);
2380         if (ret) {
2381                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2382                 cm_free_msg(msg);
2383                 return ret;
2384         }
2385
2386         cm_id->lap_state = IB_CM_LAP_SENT;
2387         cm_id_priv->msg = msg;
2388
2389 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2390         return ret;
2391 }
2392 EXPORT_SYMBOL(ib_send_cm_lap);
2393
2394 static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2395                                     struct cm_lap_msg *lap_msg)
2396 {
2397         memset(path, 0, sizeof *path);
2398         path->dgid = lap_msg->alt_local_gid;
2399         path->sgid = lap_msg->alt_remote_gid;
2400         path->dlid = lap_msg->alt_local_lid;
2401         path->slid = lap_msg->alt_remote_lid;
2402         path->flow_label = cm_lap_get_flow_label(lap_msg);
2403         path->hop_limit = lap_msg->alt_hop_limit;
2404         path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2405         path->reversible = 1;
2406         /* pkey is same as in REQ */
2407         path->sl = cm_lap_get_sl(lap_msg);
2408         path->mtu_selector = IB_SA_EQ;
2409         /* mtu is same as in REQ */
2410         path->rate_selector = IB_SA_EQ;
2411         path->rate = cm_lap_get_packet_rate(lap_msg);
2412         path->packet_life_time_selector = IB_SA_EQ;
2413         path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2414         path->packet_life_time -= (path->packet_life_time > 0);
2415 }
2416
2417 static int cm_lap_handler(struct cm_work *work)
2418 {
2419         struct cm_id_private *cm_id_priv;
2420         struct cm_lap_msg *lap_msg;
2421         struct ib_cm_lap_event_param *param;
2422         struct ib_mad_send_buf *msg = NULL;
2423         unsigned long flags;
2424         int ret;
2425
2426         /* todo: verify LAP request and send reject APR if invalid. */
2427         lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2428         cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2429                                    lap_msg->local_comm_id);
2430         if (!cm_id_priv)
2431                 return -EINVAL;
2432
2433         param = &work->cm_event.param.lap_rcvd;
2434         param->alternate_path = &work->path[0];
2435         cm_format_path_from_lap(param->alternate_path, lap_msg);
2436         work->cm_event.private_data = &lap_msg->private_data;
2437
2438         spin_lock_irqsave(&cm_id_priv->lock, flags);
2439         if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2440                 goto unlock;
2441
2442         switch (cm_id_priv->id.lap_state) {
2443         case IB_CM_LAP_IDLE:
2444                 break;
2445         case IB_CM_MRA_LAP_SENT:
2446                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2447                         goto unlock;
2448
2449                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2450                               CM_MSG_RESPONSE_OTHER,
2451                               cm_id_priv->service_timeout,
2452                               cm_id_priv->private_data,
2453                               cm_id_priv->private_data_len);
2454                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2455
2456                 if (ib_post_send_mad(msg, NULL))
2457                         cm_free_msg(msg);
2458                 goto deref;
2459         default:
2460                 goto unlock;
2461         }
2462
2463         cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2464         cm_id_priv->tid = lap_msg->hdr.tid;
2465         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2466         if (!ret)
2467                 list_add_tail(&work->list, &cm_id_priv->work_list);
2468         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2469
2470         if (ret)
2471                 cm_process_work(cm_id_priv, work);
2472         else
2473                 cm_deref_id(cm_id_priv);
2474         return 0;
2475
2476 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2477 deref:  cm_deref_id(cm_id_priv);
2478         return -EINVAL;
2479 }
2480
2481 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2482                           struct cm_id_private *cm_id_priv,
2483                           enum ib_cm_apr_status status,
2484                           void *info,
2485                           u8 info_length,
2486                           const void *private_data,
2487                           u8 private_data_len)
2488 {
2489         cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2490         apr_msg->local_comm_id = cm_id_priv->id.local_id;
2491         apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2492         apr_msg->ap_status = (u8) status;
2493
2494         if (info && info_length) {
2495                 apr_msg->info_length = info_length;
2496                 memcpy(apr_msg->info, info, info_length);
2497         }
2498
2499         if (private_data && private_data_len)
2500                 memcpy(apr_msg->private_data, private_data, private_data_len);
2501 }
2502
2503 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2504                    enum ib_cm_apr_status status,
2505                    void *info,
2506                    u8 info_length,
2507                    const void *private_data,
2508                    u8 private_data_len)
2509 {
2510         struct cm_id_private *cm_id_priv;
2511         struct ib_mad_send_buf *msg;
2512         unsigned long flags;
2513         int ret;
2514
2515         if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2516             (info && info_length > IB_CM_APR_INFO_LENGTH))
2517                 return -EINVAL;
2518
2519         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2520         spin_lock_irqsave(&cm_id_priv->lock, flags);
2521         if (cm_id->state != IB_CM_ESTABLISHED ||
2522             (cm_id->lap_state != IB_CM_LAP_RCVD &&
2523              cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2524                 ret = -EINVAL;
2525                 goto out;
2526         }
2527
2528         ret = cm_alloc_msg(cm_id_priv, &msg);
2529         if (ret)
2530                 goto out;
2531
2532         cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2533                       info, info_length, private_data, private_data_len);
2534         ret = ib_post_send_mad(msg, NULL);
2535         if (ret) {
2536                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2537                 cm_free_msg(msg);
2538                 return ret;
2539         }
2540
2541         cm_id->lap_state = IB_CM_LAP_IDLE;
2542 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2543         return ret;
2544 }
2545 EXPORT_SYMBOL(ib_send_cm_apr);
2546
2547 static int cm_apr_handler(struct cm_work *work)
2548 {
2549         struct cm_id_private *cm_id_priv;
2550         struct cm_apr_msg *apr_msg;
2551         unsigned long flags;
2552         int ret;
2553
2554         apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2555         cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2556                                    apr_msg->local_comm_id);
2557         if (!cm_id_priv)
2558                 return -EINVAL; /* Unmatched reply. */
2559
2560         work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2561         work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2562         work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2563         work->cm_event.private_data = &apr_msg->private_data;
2564
2565         spin_lock_irqsave(&cm_id_priv->lock, flags);
2566         if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2567             (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2568              cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2569                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2570                 goto out;
2571         }
2572         cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2573         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2574         cm_id_priv->msg = NULL;
2575
2576         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2577         if (!ret)
2578                 list_add_tail(&work->list, &cm_id_priv->work_list);
2579         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2580
2581         if (ret)
2582                 cm_process_work(cm_id_priv, work);
2583         else
2584                 cm_deref_id(cm_id_priv);
2585         return 0;
2586 out:
2587         cm_deref_id(cm_id_priv);
2588         return -EINVAL;
2589 }
2590
2591 static int cm_timewait_handler(struct cm_work *work)
2592 {
2593         struct cm_timewait_info *timewait_info;
2594         struct cm_id_private *cm_id_priv;
2595         unsigned long flags;
2596         int ret;
2597
2598         timewait_info = (struct cm_timewait_info *)work;
2599         cm_cleanup_timewait(timewait_info);
2600
2601         cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2602                                    timewait_info->work.remote_id);
2603         if (!cm_id_priv)
2604                 return -EINVAL;
2605
2606         spin_lock_irqsave(&cm_id_priv->lock, flags);
2607         if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2608             cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2609                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2610                 goto out;
2611         }
2612         cm_id_priv->id.state = IB_CM_IDLE;
2613         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2614         if (!ret)
2615                 list_add_tail(&work->list, &cm_id_priv->work_list);
2616         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2617
2618         if (ret)
2619                 cm_process_work(cm_id_priv, work);
2620         else
2621                 cm_deref_id(cm_id_priv);
2622         return 0;
2623 out:
2624         cm_deref_id(cm_id_priv);
2625         return -EINVAL;
2626 }
2627
2628 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2629                                struct cm_id_private *cm_id_priv,
2630                                struct ib_cm_sidr_req_param *param)
2631 {
2632         cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2633                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2634         sidr_req_msg->request_id = cm_id_priv->id.local_id;
2635         sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
2636         sidr_req_msg->service_id = param->service_id;
2637
2638         if (param->private_data && param->private_data_len)
2639                 memcpy(sidr_req_msg->private_data, param->private_data,
2640                        param->private_data_len);
2641 }
2642
2643 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2644                         struct ib_cm_sidr_req_param *param)
2645 {
2646         struct cm_id_private *cm_id_priv;
2647         struct ib_mad_send_buf *msg;
2648         unsigned long flags;
2649         int ret;
2650
2651         if (!param->path || (param->private_data &&
2652              param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2653                 return -EINVAL;
2654
2655         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2656         ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2657         if (ret)
2658                 goto out;
2659
2660         cm_id->service_id = param->service_id;
2661         cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2662         cm_id_priv->timeout_ms = param->timeout_ms;
2663         cm_id_priv->max_cm_retries = param->max_cm_retries;
2664         ret = cm_alloc_msg(cm_id_priv, &msg);
2665         if (ret)
2666                 goto out;
2667
2668         cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2669                            param);
2670         msg->timeout_ms = cm_id_priv->timeout_ms;
2671         msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2672
2673         spin_lock_irqsave(&cm_id_priv->lock, flags);
2674         if (cm_id->state == IB_CM_IDLE)
2675                 ret = ib_post_send_mad(msg, NULL);
2676         else
2677                 ret = -EINVAL;
2678
2679         if (ret) {
2680                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2681                 cm_free_msg(msg);
2682                 goto out;
2683         }
2684         cm_id->state = IB_CM_SIDR_REQ_SENT;
2685         cm_id_priv->msg = msg;
2686         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2687 out:
2688         return ret;
2689 }
2690 EXPORT_SYMBOL(ib_send_cm_sidr_req);
2691
2692 static void cm_format_sidr_req_event(struct cm_work *work,
2693                                      struct ib_cm_id *listen_id)
2694 {
2695         struct cm_sidr_req_msg *sidr_req_msg;
2696         struct ib_cm_sidr_req_event_param *param;
2697
2698         sidr_req_msg = (struct cm_sidr_req_msg *)
2699                                 work->mad_recv_wc->recv_buf.mad;
2700         param = &work->cm_event.param.sidr_req_rcvd;
2701         param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2702         param->listen_id = listen_id;
2703         param->port = work->port->port_num;
2704         work->cm_event.private_data = &sidr_req_msg->private_data;
2705 }
2706
2707 static int cm_sidr_req_handler(struct cm_work *work)
2708 {
2709         struct ib_cm_id *cm_id;
2710         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2711         struct cm_sidr_req_msg *sidr_req_msg;
2712         struct ib_wc *wc;
2713         unsigned long flags;
2714
2715         cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
2716         if (IS_ERR(cm_id))
2717                 return PTR_ERR(cm_id);
2718         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2719
2720         /* Record SGID/SLID and request ID for lookup. */
2721         sidr_req_msg = (struct cm_sidr_req_msg *)
2722                                 work->mad_recv_wc->recv_buf.mad;
2723         wc = work->mad_recv_wc->wc;
2724         cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2725         cm_id_priv->av.dgid.global.interface_id = 0;
2726         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2727                                 work->mad_recv_wc->recv_buf.grh,
2728                                 &cm_id_priv->av);
2729         cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2730         cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2731         cm_id_priv->tid = sidr_req_msg->hdr.tid;
2732         atomic_inc(&cm_id_priv->work_count);
2733
2734         spin_lock_irqsave(&cm.lock, flags);
2735         cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2736         if (cur_cm_id_priv) {
2737                 spin_unlock_irqrestore(&cm.lock, flags);
2738                 goto out; /* Duplicate message. */
2739         }
2740         cur_cm_id_priv = cm_find_listen(cm_id->device,
2741                                         sidr_req_msg->service_id,
2742                                         sidr_req_msg->private_data);
2743         if (!cur_cm_id_priv) {
2744                 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2745                 spin_unlock_irqrestore(&cm.lock, flags);
2746                 /* todo: reply with no match */
2747                 goto out; /* No match. */
2748         }
2749         atomic_inc(&cur_cm_id_priv->refcount);
2750         spin_unlock_irqrestore(&cm.lock, flags);
2751
2752         cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2753         cm_id_priv->id.context = cur_cm_id_priv->id.context;
2754         cm_id_priv->id.service_id = sidr_req_msg->service_id;
2755         cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2756
2757         cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2758         cm_process_work(cm_id_priv, work);
2759         cm_deref_id(cur_cm_id_priv);
2760         return 0;
2761 out:
2762         ib_destroy_cm_id(&cm_id_priv->id);
2763         return -EINVAL;
2764 }
2765
2766 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2767                                struct cm_id_private *cm_id_priv,
2768                                struct ib_cm_sidr_rep_param *param)
2769 {
2770         cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2771                           cm_id_priv->tid);
2772         sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2773         sidr_rep_msg->status = param->status;
2774         cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2775         sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2776         sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2777
2778         if (param->info && param->info_length)
2779                 memcpy(sidr_rep_msg->info, param->info, param->info_length);
2780
2781         if (param->private_data && param->private_data_len)
2782                 memcpy(sidr_rep_msg->private_data, param->private_data,
2783                        param->private_data_len);
2784 }
2785
2786 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2787                         struct ib_cm_sidr_rep_param *param)
2788 {
2789         struct cm_id_private *cm_id_priv;
2790         struct ib_mad_send_buf *msg;
2791         unsigned long flags;
2792         int ret;
2793
2794         if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2795             (param->private_data &&
2796              param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2797                 return -EINVAL;
2798
2799         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2800         spin_lock_irqsave(&cm_id_priv->lock, flags);
2801         if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2802                 ret = -EINVAL;
2803                 goto error;
2804         }
2805
2806         ret = cm_alloc_msg(cm_id_priv, &msg);
2807         if (ret)
2808                 goto error;
2809
2810         cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2811                            param);
2812         ret = ib_post_send_mad(msg, NULL);
2813         if (ret) {
2814                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2815                 cm_free_msg(msg);
2816                 return ret;
2817         }
2818         cm_id->state = IB_CM_IDLE;
2819         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2820
2821         spin_lock_irqsave(&cm.lock, flags);
2822         rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2823         spin_unlock_irqrestore(&cm.lock, flags);
2824         return 0;
2825
2826 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2827         return ret;
2828 }
2829 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2830
2831 static void cm_format_sidr_rep_event(struct cm_work *work)
2832 {
2833         struct cm_sidr_rep_msg *sidr_rep_msg;
2834         struct ib_cm_sidr_rep_event_param *param;
2835
2836         sidr_rep_msg = (struct cm_sidr_rep_msg *)
2837                                 work->mad_recv_wc->recv_buf.mad;
2838         param = &work->cm_event.param.sidr_rep_rcvd;
2839         param->status = sidr_rep_msg->status;
2840         param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2841         param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2842         param->info = &sidr_rep_msg->info;
2843         param->info_len = sidr_rep_msg->info_length;
2844         work->cm_event.private_data = &sidr_rep_msg->private_data;
2845 }
2846
2847 static int cm_sidr_rep_handler(struct cm_work *work)
2848 {
2849         struct cm_sidr_rep_msg *sidr_rep_msg;
2850         struct cm_id_private *cm_id_priv;
2851         unsigned long flags;
2852
2853         sidr_rep_msg = (struct cm_sidr_rep_msg *)
2854                                 work->mad_recv_wc->recv_buf.mad;
2855         cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2856         if (!cm_id_priv)
2857                 return -EINVAL; /* Unmatched reply. */
2858
2859         spin_lock_irqsave(&cm_id_priv->lock, flags);
2860         if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2861                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2862                 goto out;
2863         }
2864         cm_id_priv->id.state = IB_CM_IDLE;
2865         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2866         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2867
2868         cm_format_sidr_rep_event(work);
2869         cm_process_work(cm_id_priv, work);
2870         return 0;
2871 out:
2872         cm_deref_id(cm_id_priv);
2873         return -EINVAL;
2874 }
2875
2876 static void cm_process_send_error(struct ib_mad_send_buf *msg,
2877                                   enum ib_wc_status wc_status)
2878 {
2879         struct cm_id_private *cm_id_priv;
2880         struct ib_cm_event cm_event;
2881         enum ib_cm_state state;
2882         unsigned long flags;
2883         int ret;
2884
2885         memset(&cm_event, 0, sizeof cm_event);
2886         cm_id_priv = msg->context[0];
2887
2888         /* Discard old sends or ones without a response. */
2889         spin_lock_irqsave(&cm_id_priv->lock, flags);
2890         state = (enum ib_cm_state) (unsigned long) msg->context[1];
2891         if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2892                 goto discard;
2893
2894         switch (state) {
2895         case IB_CM_REQ_SENT:
2896         case IB_CM_MRA_REQ_RCVD:
2897                 cm_reset_to_idle(cm_id_priv);
2898                 cm_event.event = IB_CM_REQ_ERROR;
2899                 break;
2900         case IB_CM_REP_SENT:
2901         case IB_CM_MRA_REP_RCVD:
2902                 cm_reset_to_idle(cm_id_priv);
2903                 cm_event.event = IB_CM_REP_ERROR;
2904                 break;
2905         case IB_CM_DREQ_SENT:
2906                 cm_enter_timewait(cm_id_priv);
2907                 cm_event.event = IB_CM_DREQ_ERROR;
2908                 break;
2909         case IB_CM_SIDR_REQ_SENT:
2910                 cm_id_priv->id.state = IB_CM_IDLE;
2911                 cm_event.event = IB_CM_SIDR_REQ_ERROR;
2912                 break;
2913         default:
2914                 goto discard;
2915         }
2916         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2917         cm_event.param.send_status = wc_status;
2918
2919         /* No other events can occur on the cm_id at this point. */
2920         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2921         cm_free_msg(msg);
2922         if (ret)
2923                 ib_destroy_cm_id(&cm_id_priv->id);
2924         return;
2925 discard:
2926         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2927         cm_free_msg(msg);
2928 }
2929
2930 static void cm_send_handler(struct ib_mad_agent *mad_agent,
2931                             struct ib_mad_send_wc *mad_send_wc)
2932 {
2933         struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
2934
2935         switch (mad_send_wc->status) {
2936         case IB_WC_SUCCESS:
2937         case IB_WC_WR_FLUSH_ERR:
2938                 cm_free_msg(msg);
2939                 break;
2940         default:
2941                 if (msg->context[0] && msg->context[1])
2942                         cm_process_send_error(msg, mad_send_wc->status);
2943                 else
2944                         cm_free_msg(msg);
2945                 break;
2946         }
2947 }
2948
2949 static void cm_work_handler(void *data)
2950 {
2951         struct cm_work *work = data;
2952         int ret;
2953
2954         switch (work->cm_event.event) {
2955         case IB_CM_REQ_RECEIVED:
2956                 ret = cm_req_handler(work);
2957                 break;
2958         case IB_CM_MRA_RECEIVED:
2959                 ret = cm_mra_handler(work);
2960                 break;
2961         case IB_CM_REJ_RECEIVED:
2962                 ret = cm_rej_handler(work);
2963                 break;
2964         case IB_CM_REP_RECEIVED:
2965                 ret = cm_rep_handler(work);
2966                 break;
2967         case IB_CM_RTU_RECEIVED:
2968                 ret = cm_rtu_handler(work);
2969                 break;
2970         case IB_CM_USER_ESTABLISHED:
2971                 ret = cm_establish_handler(work);
2972                 break;
2973         case IB_CM_DREQ_RECEIVED:
2974                 ret = cm_dreq_handler(work);
2975                 break;
2976         case IB_CM_DREP_RECEIVED:
2977                 ret = cm_drep_handler(work);
2978                 break;
2979         case IB_CM_SIDR_REQ_RECEIVED:
2980                 ret = cm_sidr_req_handler(work);
2981                 break;
2982         case IB_CM_SIDR_REP_RECEIVED:
2983                 ret = cm_sidr_rep_handler(work);
2984                 break;
2985         case IB_CM_LAP_RECEIVED:
2986                 ret = cm_lap_handler(work);
2987                 break;
2988         case IB_CM_APR_RECEIVED:
2989                 ret = cm_apr_handler(work);
2990                 break;
2991         case IB_CM_TIMEWAIT_EXIT:
2992                 ret = cm_timewait_handler(work);
2993                 break;
2994         default:
2995                 ret = -EINVAL;
2996                 break;
2997         }
2998         if (ret)
2999                 cm_free_work(work);
3000 }
3001
3002 int ib_cm_establish(struct ib_cm_id *cm_id)
3003 {
3004         struct cm_id_private *cm_id_priv;
3005         struct cm_work *work;
3006         unsigned long flags;
3007         int ret = 0;
3008
3009         work = kmalloc(sizeof *work, GFP_ATOMIC);
3010         if (!work)
3011                 return -ENOMEM;
3012
3013         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3014         spin_lock_irqsave(&cm_id_priv->lock, flags);
3015         switch (cm_id->state)
3016         {
3017         case IB_CM_REP_SENT:
3018         case IB_CM_MRA_REP_RCVD:
3019                 cm_id->state = IB_CM_ESTABLISHED;
3020                 break;
3021         case IB_CM_ESTABLISHED:
3022                 ret = -EISCONN;
3023                 break;
3024         default:
3025                 ret = -EINVAL;
3026                 break;
3027         }
3028         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3029
3030         if (ret) {
3031                 kfree(work);
3032                 goto out;
3033         }
3034
3035         /*
3036          * The CM worker thread may try to destroy the cm_id before it
3037          * can execute this work item.  To prevent potential deadlock,
3038          * we need to find the cm_id once we're in the context of the
3039          * worker thread, rather than holding a reference on it.
3040          */
3041         INIT_WORK(&work->work, cm_work_handler, work);
3042         work->local_id = cm_id->local_id;
3043         work->remote_id = cm_id->remote_id;
3044         work->mad_recv_wc = NULL;
3045         work->cm_event.event = IB_CM_USER_ESTABLISHED;
3046         queue_work(cm.wq, &work->work);
3047 out:
3048         return ret;
3049 }
3050 EXPORT_SYMBOL(ib_cm_establish);
3051
3052 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3053                             struct ib_mad_recv_wc *mad_recv_wc)
3054 {
3055         struct cm_work *work;
3056         enum ib_cm_event_type event;
3057         int paths = 0;
3058
3059         switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3060         case CM_REQ_ATTR_ID:
3061                 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3062                                                     alt_local_lid != 0);
3063                 event = IB_CM_REQ_RECEIVED;
3064                 break;
3065         case CM_MRA_ATTR_ID:
3066                 event = IB_CM_MRA_RECEIVED;
3067                 break;
3068         case CM_REJ_ATTR_ID:
3069                 event = IB_CM_REJ_RECEIVED;
3070                 break;
3071         case CM_REP_ATTR_ID:
3072                 event = IB_CM_REP_RECEIVED;
3073                 break;
3074         case CM_RTU_ATTR_ID:
3075                 event = IB_CM_RTU_RECEIVED;
3076                 break;
3077         case CM_DREQ_ATTR_ID:
3078                 event = IB_CM_DREQ_RECEIVED;
3079                 break;
3080         case CM_DREP_ATTR_ID:
3081                 event = IB_CM_DREP_RECEIVED;
3082                 break;
3083         case CM_SIDR_REQ_ATTR_ID:
3084                 event = IB_CM_SIDR_REQ_RECEIVED;
3085                 break;
3086         case CM_SIDR_REP_ATTR_ID:
3087                 event = IB_CM_SIDR_REP_RECEIVED;
3088                 break;
3089         case CM_LAP_ATTR_ID:
3090                 paths = 1;
3091                 event = IB_CM_LAP_RECEIVED;
3092                 break;
3093         case CM_APR_ATTR_ID:
3094                 event = IB_CM_APR_RECEIVED;
3095                 break;
3096         default:
3097                 ib_free_recv_mad(mad_recv_wc);
3098                 return;
3099         }
3100
3101         work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3102                        GFP_KERNEL);
3103         if (!work) {
3104                 ib_free_recv_mad(mad_recv_wc);
3105                 return;
3106         }
3107
3108         INIT_WORK(&work->work, cm_work_handler, work);
3109         work->cm_event.event = event;
3110         work->mad_recv_wc = mad_recv_wc;
3111         work->port = (struct cm_port *)mad_agent->context;
3112         queue_work(cm.wq, &work->work);
3113 }
3114
3115 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3116                                 struct ib_qp_attr *qp_attr,
3117                                 int *qp_attr_mask)
3118 {
3119         unsigned long flags;
3120         int ret;
3121
3122         spin_lock_irqsave(&cm_id_priv->lock, flags);
3123         switch (cm_id_priv->id.state) {
3124         case IB_CM_REQ_SENT:
3125         case IB_CM_MRA_REQ_RCVD:
3126         case IB_CM_REQ_RCVD:
3127         case IB_CM_MRA_REQ_SENT:
3128         case IB_CM_REP_RCVD:
3129         case IB_CM_MRA_REP_SENT:
3130         case IB_CM_REP_SENT:
3131         case IB_CM_MRA_REP_RCVD:
3132         case IB_CM_ESTABLISHED:
3133                 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3134                                 IB_QP_PKEY_INDEX | IB_QP_PORT;
3135                 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3136                                            IB_ACCESS_REMOTE_WRITE;
3137                 if (cm_id_priv->responder_resources)
3138                         qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3139                                                     IB_ACCESS_REMOTE_ATOMIC;
3140                 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3141                 qp_attr->port_num = cm_id_priv->av.port->port_num;
3142                 ret = 0;
3143                 break;
3144         default:
3145                 ret = -EINVAL;
3146                 break;
3147         }
3148         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3149         return ret;
3150 }
3151
3152 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3153                                struct ib_qp_attr *qp_attr,
3154                                int *qp_attr_mask)
3155 {
3156         unsigned long flags;
3157         int ret;
3158
3159         spin_lock_irqsave(&cm_id_priv->lock, flags);
3160         switch (cm_id_priv->id.state) {
3161         case IB_CM_REQ_RCVD:
3162         case IB_CM_MRA_REQ_SENT:
3163         case IB_CM_REP_RCVD:
3164         case IB_CM_MRA_REP_SENT:
3165         case IB_CM_REP_SENT:
3166         case IB_CM_MRA_REP_RCVD:
3167         case IB_CM_ESTABLISHED:
3168                 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3169                                 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3170                 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3171                 qp_attr->path_mtu = cm_id_priv->path_mtu;
3172                 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3173                 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3174                 if (cm_id_priv->qp_type == IB_QPT_RC) {
3175                         *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3176                                          IB_QP_MIN_RNR_TIMER;
3177                         qp_attr->max_dest_rd_atomic =
3178                                         cm_id_priv->responder_resources;
3179                         qp_attr->min_rnr_timer = 0;
3180                 }
3181                 if (cm_id_priv->alt_av.ah_attr.dlid) {
3182                         *qp_attr_mask |= IB_QP_ALT_PATH;
3183                         qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3184                         qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3185                 }
3186                 ret = 0;
3187                 break;
3188         default:
3189                 ret = -EINVAL;
3190                 break;
3191         }
3192         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3193         return ret;
3194 }
3195
3196 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3197                                struct ib_qp_attr *qp_attr,
3198                                int *qp_attr_mask)
3199 {
3200         unsigned long flags;
3201         int ret;
3202
3203         spin_lock_irqsave(&cm_id_priv->lock, flags);
3204         switch (cm_id_priv->id.state) {
3205         case IB_CM_REP_RCVD:
3206         case IB_CM_MRA_REP_SENT:
3207         case IB_CM_REP_SENT:
3208         case IB_CM_MRA_REP_RCVD:
3209         case IB_CM_ESTABLISHED:
3210                 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3211                 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3212                 if (cm_id_priv->qp_type == IB_QPT_RC) {
3213                         *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3214                                          IB_QP_RNR_RETRY |
3215                                          IB_QP_MAX_QP_RD_ATOMIC;
3216                         qp_attr->timeout = cm_id_priv->local_ack_timeout;
3217                         qp_attr->retry_cnt = cm_id_priv->retry_count;
3218                         qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3219                         qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3220                 }
3221                 if (cm_id_priv->alt_av.ah_attr.dlid) {
3222                         *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3223                         qp_attr->path_mig_state = IB_MIG_REARM;
3224                 }
3225                 ret = 0;
3226                 break;
3227         default:
3228                 ret = -EINVAL;
3229                 break;
3230         }
3231         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3232         return ret;
3233 }
3234
3235 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3236                        struct ib_qp_attr *qp_attr,
3237                        int *qp_attr_mask)
3238 {
3239         struct cm_id_private *cm_id_priv;
3240         int ret;
3241
3242         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3243         switch (qp_attr->qp_state) {
3244         case IB_QPS_INIT:
3245                 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3246                 break;
3247         case IB_QPS_RTR:
3248                 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3249                 break;
3250         case IB_QPS_RTS:
3251                 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3252                 break;
3253         default:
3254                 ret = -EINVAL;
3255                 break;
3256         }
3257         return ret;
3258 }
3259 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3260
3261 static void cm_add_one(struct ib_device *device)
3262 {
3263         struct cm_device *cm_dev;
3264         struct cm_port *port;
3265         struct ib_mad_reg_req reg_req = {
3266                 .mgmt_class = IB_MGMT_CLASS_CM,
3267                 .mgmt_class_version = IB_CM_CLASS_VERSION
3268         };
3269         struct ib_port_modify port_modify = {
3270                 .set_port_cap_mask = IB_PORT_CM_SUP
3271         };
3272         unsigned long flags;
3273         int ret;
3274         u8 i;
3275
3276         cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3277                          device->phys_port_cnt, GFP_KERNEL);
3278         if (!cm_dev)
3279                 return;
3280
3281         cm_dev->device = device;
3282         cm_dev->ca_guid = device->node_guid;
3283
3284         set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3285         for (i = 1; i <= device->phys_port_cnt; i++) {
3286                 port = &cm_dev->port[i-1];
3287                 port->cm_dev = cm_dev;
3288                 port->port_num = i;
3289                 port->mad_agent = ib_register_mad_agent(device, i,
3290                                                         IB_QPT_GSI,
3291                                                         &reg_req,
3292                                                         0,
3293                                                         cm_send_handler,
3294                                                         cm_recv_handler,
3295                                                         port);
3296                 if (IS_ERR(port->mad_agent))
3297                         goto error1;
3298
3299                 ret = ib_modify_port(device, i, 0, &port_modify);
3300                 if (ret)
3301                         goto error2;
3302         }
3303         ib_set_client_data(device, &cm_client, cm_dev);
3304
3305         write_lock_irqsave(&cm.device_lock, flags);
3306         list_add_tail(&cm_dev->list, &cm.device_list);
3307         write_unlock_irqrestore(&cm.device_lock, flags);
3308         return;
3309
3310 error2:
3311         ib_unregister_mad_agent(port->mad_agent);
3312 error1:
3313         port_modify.set_port_cap_mask = 0;
3314         port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3315         while (--i) {
3316                 port = &cm_dev->port[i-1];
3317                 ib_modify_port(device, port->port_num, 0, &port_modify);
3318                 ib_unregister_mad_agent(port->mad_agent);
3319         }
3320         kfree(cm_dev);
3321 }
3322
3323 static void cm_remove_one(struct ib_device *device)
3324 {
3325         struct cm_device *cm_dev;
3326         struct cm_port *port;
3327         struct ib_port_modify port_modify = {
3328                 .clr_port_cap_mask = IB_PORT_CM_SUP
3329         };
3330         unsigned long flags;
3331         int i;
3332
3333         cm_dev = ib_get_client_data(device, &cm_client);
3334         if (!cm_dev)
3335                 return;
3336
3337         write_lock_irqsave(&cm.device_lock, flags);
3338         list_del(&cm_dev->list);
3339         write_unlock_irqrestore(&cm.device_lock, flags);
3340
3341         for (i = 1; i <= device->phys_port_cnt; i++) {
3342                 port = &cm_dev->port[i-1];
3343                 ib_modify_port(device, port->port_num, 0, &port_modify);
3344                 ib_unregister_mad_agent(port->mad_agent);
3345         }
3346         kfree(cm_dev);
3347 }
3348
3349 static int __init ib_cm_init(void)
3350 {
3351         int ret;
3352
3353         memset(&cm, 0, sizeof cm);
3354         INIT_LIST_HEAD(&cm.device_list);
3355         rwlock_init(&cm.device_lock);
3356         spin_lock_init(&cm.lock);
3357         cm.listen_service_table = RB_ROOT;
3358         cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3359         cm.remote_id_table = RB_ROOT;
3360         cm.remote_qp_table = RB_ROOT;
3361         cm.remote_sidr_table = RB_ROOT;
3362         idr_init(&cm.local_id_table);
3363         idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3364
3365         cm.wq = create_workqueue("ib_cm");
3366         if (!cm.wq)
3367                 return -ENOMEM;
3368
3369         ret = ib_register_client(&cm_client);
3370         if (ret)
3371                 goto error;
3372
3373         return 0;
3374 error:
3375         destroy_workqueue(cm.wq);
3376         return ret;
3377 }
3378
3379 static void __exit ib_cm_cleanup(void)
3380 {
3381         destroy_workqueue(cm.wq);
3382         ib_unregister_client(&cm_client);
3383         idr_destroy(&cm.local_id_table);
3384 }
3385
3386 module_init(ib_cm_init);
3387 module_exit(ib_cm_cleanup);
3388