2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static void cm_add_one(struct ib_device *device);
61 static void cm_remove_one(struct ib_device *device, void *client_data);
63 static struct ib_client cm_client = {
66 .remove = cm_remove_one
71 struct list_head device_list;
73 struct rb_root listen_service_table;
74 u64 listen_service_id;
75 /* struct rb_root peer_service_table; todo: fix peer to peer */
76 struct rb_root remote_qp_table;
77 struct rb_root remote_id_table;
78 struct rb_root remote_sidr_table;
79 struct idr local_id_table;
80 __be32 random_id_operand;
81 struct list_head timewait_list;
82 struct workqueue_struct *wq;
85 /* Counter indexes ordered by attribute ID */
99 CM_ATTR_ID_OFFSET = 0x0010,
110 static char const counter_group_names[CM_COUNTER_GROUPS]
111 [sizeof("cm_rx_duplicates")] = {
112 "cm_tx_msgs", "cm_tx_retries",
113 "cm_rx_msgs", "cm_rx_duplicates"
116 struct cm_counter_group {
118 atomic_long_t counter[CM_ATTR_COUNT];
121 struct cm_counter_attribute {
122 struct attribute attr;
126 #define CM_COUNTER_ATTR(_name, _index) \
127 struct cm_counter_attribute cm_##_name##_counter_attr = { \
128 .attr = { .name = __stringify(_name), .mode = 0444 }, \
132 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
133 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
134 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
135 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
136 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
137 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
138 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
139 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
140 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
141 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
142 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
144 static struct attribute *cm_counter_default_attrs[] = {
145 &cm_req_counter_attr.attr,
146 &cm_mra_counter_attr.attr,
147 &cm_rej_counter_attr.attr,
148 &cm_rep_counter_attr.attr,
149 &cm_rtu_counter_attr.attr,
150 &cm_dreq_counter_attr.attr,
151 &cm_drep_counter_attr.attr,
152 &cm_sidr_req_counter_attr.attr,
153 &cm_sidr_rep_counter_attr.attr,
154 &cm_lap_counter_attr.attr,
155 &cm_apr_counter_attr.attr,
160 struct cm_device *cm_dev;
161 struct ib_mad_agent *mad_agent;
162 struct kobject port_obj;
164 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
168 struct list_head list;
169 struct ib_device *ib_device;
170 struct device *device;
173 struct cm_port *port[0];
177 struct cm_port *port;
179 struct ib_ah_attr ah_attr;
187 struct delayed_work work;
188 struct list_head list;
189 struct cm_port *port;
190 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
191 __be32 local_id; /* Established / timewait */
193 struct ib_cm_event cm_event;
194 struct ib_sa_path_rec path[0];
197 struct cm_timewait_info {
198 struct cm_work work; /* Must be first. */
199 struct list_head list;
200 struct rb_node remote_qp_node;
201 struct rb_node remote_id_node;
202 __be64 remote_ca_guid;
204 u8 inserted_remote_qp;
205 u8 inserted_remote_id;
208 struct cm_id_private {
211 struct rb_node service_node;
212 struct rb_node sidr_id_node;
213 spinlock_t lock; /* Do not acquire inside cm.lock */
214 struct completion comp;
216 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
217 * Protected by the cm.lock spinlock. */
218 int listen_sharecount;
220 struct ib_mad_send_buf *msg;
221 struct cm_timewait_info *timewait_info;
222 /* todo: use alternate port on send failure */
230 enum ib_qp_type qp_type;
234 enum ib_mtu path_mtu;
239 u8 responder_resources;
246 struct list_head work_list;
250 static void cm_work_handler(struct work_struct *work);
252 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
254 if (atomic_dec_and_test(&cm_id_priv->refcount))
255 complete(&cm_id_priv->comp);
258 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
259 struct ib_mad_send_buf **msg)
261 struct ib_mad_agent *mad_agent;
262 struct ib_mad_send_buf *m;
265 mad_agent = cm_id_priv->av.port->mad_agent;
266 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
270 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
271 cm_id_priv->av.pkey_index,
272 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
274 IB_MGMT_BASE_VERSION);
280 /* Timeout set by caller if response is expected. */
282 m->retries = cm_id_priv->max_cm_retries;
284 atomic_inc(&cm_id_priv->refcount);
285 m->context[0] = cm_id_priv;
290 static int cm_alloc_response_msg(struct cm_port *port,
291 struct ib_mad_recv_wc *mad_recv_wc,
292 struct ib_mad_send_buf **msg)
294 struct ib_mad_send_buf *m;
297 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
298 mad_recv_wc->recv_buf.grh, port->port_num);
302 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
303 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
305 IB_MGMT_BASE_VERSION);
315 static void cm_free_msg(struct ib_mad_send_buf *msg)
317 ib_destroy_ah(msg->ah);
319 cm_deref_id(msg->context[0]);
320 ib_free_send_mad(msg);
323 static void * cm_copy_private_data(const void *private_data,
328 if (!private_data || !private_data_len)
331 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
333 return ERR_PTR(-ENOMEM);
338 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
339 void *private_data, u8 private_data_len)
341 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
342 kfree(cm_id_priv->private_data);
344 cm_id_priv->private_data = private_data;
345 cm_id_priv->private_data_len = private_data_len;
348 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
349 struct ib_grh *grh, struct cm_av *av)
352 av->pkey_index = wc->pkey_index;
353 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
357 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
359 struct cm_device *cm_dev;
360 struct cm_port *port = NULL;
364 struct net_device *ndev = ib_get_ndev_from_path(path);
366 read_lock_irqsave(&cm.device_lock, flags);
367 list_for_each_entry(cm_dev, &cm.device_list, list) {
368 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
370 port = cm_dev->port[p-1];
374 read_unlock_irqrestore(&cm.device_lock, flags);
382 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
383 be16_to_cpu(path->pkey), &av->pkey_index);
388 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
390 av->timeout = path->packet_life_time + 1;
396 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
401 idr_preload(GFP_KERNEL);
402 spin_lock_irqsave(&cm.lock, flags);
404 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
406 spin_unlock_irqrestore(&cm.lock, flags);
409 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
410 return id < 0 ? id : 0;
413 static void cm_free_id(__be32 local_id)
415 spin_lock_irq(&cm.lock);
416 idr_remove(&cm.local_id_table,
417 (__force int) (local_id ^ cm.random_id_operand));
418 spin_unlock_irq(&cm.lock);
421 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
423 struct cm_id_private *cm_id_priv;
425 cm_id_priv = idr_find(&cm.local_id_table,
426 (__force int) (local_id ^ cm.random_id_operand));
428 if (cm_id_priv->id.remote_id == remote_id)
429 atomic_inc(&cm_id_priv->refcount);
437 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
439 struct cm_id_private *cm_id_priv;
441 spin_lock_irq(&cm.lock);
442 cm_id_priv = cm_get_id(local_id, remote_id);
443 spin_unlock_irq(&cm.lock);
449 * Trivial helpers to strip endian annotation and compare; the
450 * endianness doesn't actually matter since we just need a stable
451 * order for the RB tree.
453 static int be32_lt(__be32 a, __be32 b)
455 return (__force u32) a < (__force u32) b;
458 static int be32_gt(__be32 a, __be32 b)
460 return (__force u32) a > (__force u32) b;
463 static int be64_lt(__be64 a, __be64 b)
465 return (__force u64) a < (__force u64) b;
468 static int be64_gt(__be64 a, __be64 b)
470 return (__force u64) a > (__force u64) b;
473 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
475 struct rb_node **link = &cm.listen_service_table.rb_node;
476 struct rb_node *parent = NULL;
477 struct cm_id_private *cur_cm_id_priv;
478 __be64 service_id = cm_id_priv->id.service_id;
479 __be64 service_mask = cm_id_priv->id.service_mask;
483 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
485 if ((cur_cm_id_priv->id.service_mask & service_id) ==
486 (service_mask & cur_cm_id_priv->id.service_id) &&
487 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
488 return cur_cm_id_priv;
490 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
491 link = &(*link)->rb_left;
492 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
493 link = &(*link)->rb_right;
494 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
495 link = &(*link)->rb_left;
496 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
497 link = &(*link)->rb_right;
499 link = &(*link)->rb_right;
501 rb_link_node(&cm_id_priv->service_node, parent, link);
502 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
506 static struct cm_id_private * cm_find_listen(struct ib_device *device,
509 struct rb_node *node = cm.listen_service_table.rb_node;
510 struct cm_id_private *cm_id_priv;
513 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
514 if ((cm_id_priv->id.service_mask & service_id) ==
515 cm_id_priv->id.service_id &&
516 (cm_id_priv->id.device == device))
519 if (device < cm_id_priv->id.device)
520 node = node->rb_left;
521 else if (device > cm_id_priv->id.device)
522 node = node->rb_right;
523 else if (be64_lt(service_id, cm_id_priv->id.service_id))
524 node = node->rb_left;
525 else if (be64_gt(service_id, cm_id_priv->id.service_id))
526 node = node->rb_right;
528 node = node->rb_right;
533 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
536 struct rb_node **link = &cm.remote_id_table.rb_node;
537 struct rb_node *parent = NULL;
538 struct cm_timewait_info *cur_timewait_info;
539 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
540 __be32 remote_id = timewait_info->work.remote_id;
544 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
546 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
547 link = &(*link)->rb_left;
548 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
549 link = &(*link)->rb_right;
550 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
551 link = &(*link)->rb_left;
552 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
553 link = &(*link)->rb_right;
555 return cur_timewait_info;
557 timewait_info->inserted_remote_id = 1;
558 rb_link_node(&timewait_info->remote_id_node, parent, link);
559 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
563 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
566 struct rb_node *node = cm.remote_id_table.rb_node;
567 struct cm_timewait_info *timewait_info;
570 timewait_info = rb_entry(node, struct cm_timewait_info,
572 if (be32_lt(remote_id, timewait_info->work.remote_id))
573 node = node->rb_left;
574 else if (be32_gt(remote_id, timewait_info->work.remote_id))
575 node = node->rb_right;
576 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
577 node = node->rb_left;
578 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
579 node = node->rb_right;
581 return timewait_info;
586 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
589 struct rb_node **link = &cm.remote_qp_table.rb_node;
590 struct rb_node *parent = NULL;
591 struct cm_timewait_info *cur_timewait_info;
592 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
593 __be32 remote_qpn = timewait_info->remote_qpn;
597 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
599 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
600 link = &(*link)->rb_left;
601 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
602 link = &(*link)->rb_right;
603 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
604 link = &(*link)->rb_left;
605 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
606 link = &(*link)->rb_right;
608 return cur_timewait_info;
610 timewait_info->inserted_remote_qp = 1;
611 rb_link_node(&timewait_info->remote_qp_node, parent, link);
612 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
616 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
619 struct rb_node **link = &cm.remote_sidr_table.rb_node;
620 struct rb_node *parent = NULL;
621 struct cm_id_private *cur_cm_id_priv;
622 union ib_gid *port_gid = &cm_id_priv->av.dgid;
623 __be32 remote_id = cm_id_priv->id.remote_id;
627 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
629 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
630 link = &(*link)->rb_left;
631 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
632 link = &(*link)->rb_right;
635 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
638 link = &(*link)->rb_left;
640 link = &(*link)->rb_right;
642 return cur_cm_id_priv;
645 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
646 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
650 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
651 enum ib_cm_sidr_status status)
653 struct ib_cm_sidr_rep_param param;
655 memset(¶m, 0, sizeof param);
656 param.status = status;
657 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
660 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
661 ib_cm_handler cm_handler,
664 struct cm_id_private *cm_id_priv;
667 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
669 return ERR_PTR(-ENOMEM);
671 cm_id_priv->id.state = IB_CM_IDLE;
672 cm_id_priv->id.device = device;
673 cm_id_priv->id.cm_handler = cm_handler;
674 cm_id_priv->id.context = context;
675 cm_id_priv->id.remote_cm_qpn = 1;
676 ret = cm_alloc_id(cm_id_priv);
680 spin_lock_init(&cm_id_priv->lock);
681 init_completion(&cm_id_priv->comp);
682 INIT_LIST_HEAD(&cm_id_priv->work_list);
683 atomic_set(&cm_id_priv->work_count, -1);
684 atomic_set(&cm_id_priv->refcount, 1);
685 return &cm_id_priv->id;
689 return ERR_PTR(-ENOMEM);
691 EXPORT_SYMBOL(ib_create_cm_id);
693 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
695 struct cm_work *work;
697 if (list_empty(&cm_id_priv->work_list))
700 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
701 list_del(&work->list);
705 static void cm_free_work(struct cm_work *work)
707 if (work->mad_recv_wc)
708 ib_free_recv_mad(work->mad_recv_wc);
712 static inline int cm_convert_to_ms(int iba_time)
714 /* approximate conversion to ms from 4.096us x 2^iba_time */
715 return 1 << max(iba_time - 8, 0);
719 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
720 * Because of how ack_timeout is stored, adding one doubles the timeout.
721 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
722 * increment it (round up) only if the other is within 50%.
724 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
726 int ack_timeout = packet_life_time + 1;
728 if (ack_timeout >= ca_ack_delay)
729 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
731 ack_timeout = ca_ack_delay +
732 (ack_timeout >= (ca_ack_delay - 1));
734 return min(31, ack_timeout);
737 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
739 if (timewait_info->inserted_remote_id) {
740 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
741 timewait_info->inserted_remote_id = 0;
744 if (timewait_info->inserted_remote_qp) {
745 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
746 timewait_info->inserted_remote_qp = 0;
750 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
752 struct cm_timewait_info *timewait_info;
754 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
756 return ERR_PTR(-ENOMEM);
758 timewait_info->work.local_id = local_id;
759 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
760 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
761 return timewait_info;
764 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
768 struct cm_device *cm_dev;
770 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
774 spin_lock_irqsave(&cm.lock, flags);
775 cm_cleanup_timewait(cm_id_priv->timewait_info);
776 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
777 spin_unlock_irqrestore(&cm.lock, flags);
780 * The cm_id could be destroyed by the user before we exit timewait.
781 * To protect against this, we search for the cm_id after exiting
782 * timewait before notifying the user that we've exited timewait.
784 cm_id_priv->id.state = IB_CM_TIMEWAIT;
785 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
787 /* Check if the device started its remove_one */
788 spin_lock_irq(&cm.lock);
789 if (!cm_dev->going_down)
790 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
791 msecs_to_jiffies(wait_time));
792 spin_unlock_irq(&cm.lock);
794 cm_id_priv->timewait_info = NULL;
797 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
801 cm_id_priv->id.state = IB_CM_IDLE;
802 if (cm_id_priv->timewait_info) {
803 spin_lock_irqsave(&cm.lock, flags);
804 cm_cleanup_timewait(cm_id_priv->timewait_info);
805 spin_unlock_irqrestore(&cm.lock, flags);
806 kfree(cm_id_priv->timewait_info);
807 cm_id_priv->timewait_info = NULL;
811 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
813 struct cm_id_private *cm_id_priv;
814 struct cm_work *work;
816 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
818 spin_lock_irq(&cm_id_priv->lock);
819 switch (cm_id->state) {
821 spin_unlock_irq(&cm_id_priv->lock);
823 spin_lock_irq(&cm.lock);
824 if (--cm_id_priv->listen_sharecount > 0) {
825 /* The id is still shared. */
826 cm_deref_id(cm_id_priv);
827 spin_unlock_irq(&cm.lock);
830 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
831 spin_unlock_irq(&cm.lock);
833 case IB_CM_SIDR_REQ_SENT:
834 cm_id->state = IB_CM_IDLE;
835 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
836 spin_unlock_irq(&cm_id_priv->lock);
838 case IB_CM_SIDR_REQ_RCVD:
839 spin_unlock_irq(&cm_id_priv->lock);
840 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
841 spin_lock_irq(&cm.lock);
842 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
843 rb_erase(&cm_id_priv->sidr_id_node,
844 &cm.remote_sidr_table);
845 spin_unlock_irq(&cm.lock);
848 case IB_CM_MRA_REQ_RCVD:
849 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
850 spin_unlock_irq(&cm_id_priv->lock);
851 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
852 &cm_id_priv->id.device->node_guid,
853 sizeof cm_id_priv->id.device->node_guid,
857 if (err == -ENOMEM) {
858 /* Do not reject to allow future retries. */
859 cm_reset_to_idle(cm_id_priv);
860 spin_unlock_irq(&cm_id_priv->lock);
862 spin_unlock_irq(&cm_id_priv->lock);
863 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
868 case IB_CM_MRA_REP_RCVD:
869 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
871 case IB_CM_MRA_REQ_SENT:
873 case IB_CM_MRA_REP_SENT:
874 spin_unlock_irq(&cm_id_priv->lock);
875 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
878 case IB_CM_ESTABLISHED:
879 spin_unlock_irq(&cm_id_priv->lock);
880 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
882 ib_send_cm_dreq(cm_id, NULL, 0);
884 case IB_CM_DREQ_SENT:
885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
886 cm_enter_timewait(cm_id_priv);
887 spin_unlock_irq(&cm_id_priv->lock);
889 case IB_CM_DREQ_RCVD:
890 spin_unlock_irq(&cm_id_priv->lock);
891 ib_send_cm_drep(cm_id, NULL, 0);
894 spin_unlock_irq(&cm_id_priv->lock);
898 cm_free_id(cm_id->local_id);
899 cm_deref_id(cm_id_priv);
900 wait_for_completion(&cm_id_priv->comp);
901 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
903 kfree(cm_id_priv->private_data);
907 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
909 cm_destroy_id(cm_id, 0);
911 EXPORT_SYMBOL(ib_destroy_cm_id);
914 * __ib_cm_listen - Initiates listening on the specified service ID for
915 * connection and service ID resolution requests.
916 * @cm_id: Connection identifier associated with the listen request.
917 * @service_id: Service identifier matched against incoming connection
918 * and service ID resolution requests. The service ID should be specified
919 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
920 * assign a service ID to the caller.
921 * @service_mask: Mask applied to service ID used to listen across a
922 * range of service IDs. If set to 0, the service ID is matched
923 * exactly. This parameter is ignored if %service_id is set to
924 * IB_CM_ASSIGN_SERVICE_ID.
926 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
929 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
932 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
933 service_id &= service_mask;
934 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
935 (service_id != IB_CM_ASSIGN_SERVICE_ID))
938 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
939 if (cm_id->state != IB_CM_IDLE)
942 cm_id->state = IB_CM_LISTEN;
943 ++cm_id_priv->listen_sharecount;
945 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
946 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
947 cm_id->service_mask = ~cpu_to_be64(0);
949 cm_id->service_id = service_id;
950 cm_id->service_mask = service_mask;
952 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
954 if (cur_cm_id_priv) {
955 cm_id->state = IB_CM_IDLE;
956 --cm_id_priv->listen_sharecount;
962 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
967 spin_lock_irqsave(&cm.lock, flags);
968 ret = __ib_cm_listen(cm_id, service_id, service_mask);
969 spin_unlock_irqrestore(&cm.lock, flags);
973 EXPORT_SYMBOL(ib_cm_listen);
976 * Create a new listening ib_cm_id and listen on the given service ID.
978 * If there's an existing ID listening on that same device and service ID,
981 * @device: Device associated with the cm_id. All related communication will
982 * be associated with the specified device.
983 * @cm_handler: Callback invoked to notify the user of CM events.
984 * @service_id: Service identifier matched against incoming connection
985 * and service ID resolution requests. The service ID should be specified
986 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
987 * assign a service ID to the caller.
989 * Callers should call ib_destroy_cm_id when done with the listener ID.
991 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
992 ib_cm_handler cm_handler,
995 struct cm_id_private *cm_id_priv;
996 struct ib_cm_id *cm_id;
1000 /* Create an ID in advance, since the creation may sleep */
1001 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1005 spin_lock_irqsave(&cm.lock, flags);
1007 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1010 /* Find an existing ID */
1011 cm_id_priv = cm_find_listen(device, service_id);
1013 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1014 /* Sharing an ib_cm_id with different handlers is not
1016 spin_unlock_irqrestore(&cm.lock, flags);
1017 return ERR_PTR(-EINVAL);
1019 atomic_inc(&cm_id_priv->refcount);
1020 ++cm_id_priv->listen_sharecount;
1021 spin_unlock_irqrestore(&cm.lock, flags);
1023 ib_destroy_cm_id(cm_id);
1024 cm_id = &cm_id_priv->id;
1029 /* Use newly created ID */
1030 err = __ib_cm_listen(cm_id, service_id, 0);
1032 spin_unlock_irqrestore(&cm.lock, flags);
1035 ib_destroy_cm_id(cm_id);
1036 return ERR_PTR(err);
1040 EXPORT_SYMBOL(ib_cm_insert_listen);
1042 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1043 enum cm_msg_sequence msg_seq)
1045 u64 hi_tid, low_tid;
1047 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1048 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1050 return cpu_to_be64(hi_tid | low_tid);
1053 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1054 __be16 attr_id, __be64 tid)
1056 hdr->base_version = IB_MGMT_BASE_VERSION;
1057 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1058 hdr->class_version = IB_CM_CLASS_VERSION;
1059 hdr->method = IB_MGMT_METHOD_SEND;
1060 hdr->attr_id = attr_id;
1064 static void cm_format_req(struct cm_req_msg *req_msg,
1065 struct cm_id_private *cm_id_priv,
1066 struct ib_cm_req_param *param)
1068 struct ib_sa_path_rec *pri_path = param->primary_path;
1069 struct ib_sa_path_rec *alt_path = param->alternate_path;
1071 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1072 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1074 req_msg->local_comm_id = cm_id_priv->id.local_id;
1075 req_msg->service_id = param->service_id;
1076 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1077 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1078 cm_req_set_init_depth(req_msg, param->initiator_depth);
1079 cm_req_set_remote_resp_timeout(req_msg,
1080 param->remote_cm_response_timeout);
1081 cm_req_set_qp_type(req_msg, param->qp_type);
1082 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1083 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1084 cm_req_set_local_resp_timeout(req_msg,
1085 param->local_cm_response_timeout);
1086 req_msg->pkey = param->primary_path->pkey;
1087 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1088 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1090 if (param->qp_type != IB_QPT_XRC_INI) {
1091 cm_req_set_resp_res(req_msg, param->responder_resources);
1092 cm_req_set_retry_count(req_msg, param->retry_count);
1093 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1094 cm_req_set_srq(req_msg, param->srq);
1097 if (pri_path->hop_limit <= 1) {
1098 req_msg->primary_local_lid = pri_path->slid;
1099 req_msg->primary_remote_lid = pri_path->dlid;
1101 /* Work-around until there's a way to obtain remote LID info */
1102 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1103 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1105 req_msg->primary_local_gid = pri_path->sgid;
1106 req_msg->primary_remote_gid = pri_path->dgid;
1107 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1108 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1109 req_msg->primary_traffic_class = pri_path->traffic_class;
1110 req_msg->primary_hop_limit = pri_path->hop_limit;
1111 cm_req_set_primary_sl(req_msg, pri_path->sl);
1112 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1113 cm_req_set_primary_local_ack_timeout(req_msg,
1114 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1115 pri_path->packet_life_time));
1118 if (alt_path->hop_limit <= 1) {
1119 req_msg->alt_local_lid = alt_path->slid;
1120 req_msg->alt_remote_lid = alt_path->dlid;
1122 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1123 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1125 req_msg->alt_local_gid = alt_path->sgid;
1126 req_msg->alt_remote_gid = alt_path->dgid;
1127 cm_req_set_alt_flow_label(req_msg,
1128 alt_path->flow_label);
1129 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1130 req_msg->alt_traffic_class = alt_path->traffic_class;
1131 req_msg->alt_hop_limit = alt_path->hop_limit;
1132 cm_req_set_alt_sl(req_msg, alt_path->sl);
1133 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1134 cm_req_set_alt_local_ack_timeout(req_msg,
1135 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1136 alt_path->packet_life_time));
1139 if (param->private_data && param->private_data_len)
1140 memcpy(req_msg->private_data, param->private_data,
1141 param->private_data_len);
1144 static int cm_validate_req_param(struct ib_cm_req_param *param)
1146 /* peer-to-peer not supported */
1147 if (param->peer_to_peer)
1150 if (!param->primary_path)
1153 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1154 param->qp_type != IB_QPT_XRC_INI)
1157 if (param->private_data &&
1158 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1161 if (param->alternate_path &&
1162 (param->alternate_path->pkey != param->primary_path->pkey ||
1163 param->alternate_path->mtu != param->primary_path->mtu))
1169 int ib_send_cm_req(struct ib_cm_id *cm_id,
1170 struct ib_cm_req_param *param)
1172 struct cm_id_private *cm_id_priv;
1173 struct cm_req_msg *req_msg;
1174 unsigned long flags;
1177 ret = cm_validate_req_param(param);
1181 /* Verify that we're not in timewait. */
1182 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1183 spin_lock_irqsave(&cm_id_priv->lock, flags);
1184 if (cm_id->state != IB_CM_IDLE) {
1185 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1189 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1191 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1193 if (IS_ERR(cm_id_priv->timewait_info)) {
1194 ret = PTR_ERR(cm_id_priv->timewait_info);
1198 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1201 if (param->alternate_path) {
1202 ret = cm_init_av_by_path(param->alternate_path,
1203 &cm_id_priv->alt_av);
1207 cm_id->service_id = param->service_id;
1208 cm_id->service_mask = ~cpu_to_be64(0);
1209 cm_id_priv->timeout_ms = cm_convert_to_ms(
1210 param->primary_path->packet_life_time) * 2 +
1212 param->remote_cm_response_timeout);
1213 cm_id_priv->max_cm_retries = param->max_cm_retries;
1214 cm_id_priv->initiator_depth = param->initiator_depth;
1215 cm_id_priv->responder_resources = param->responder_resources;
1216 cm_id_priv->retry_count = param->retry_count;
1217 cm_id_priv->path_mtu = param->primary_path->mtu;
1218 cm_id_priv->pkey = param->primary_path->pkey;
1219 cm_id_priv->qp_type = param->qp_type;
1221 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1225 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1226 cm_format_req(req_msg, cm_id_priv, param);
1227 cm_id_priv->tid = req_msg->hdr.tid;
1228 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1229 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1231 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1232 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1234 spin_lock_irqsave(&cm_id_priv->lock, flags);
1235 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1237 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1240 BUG_ON(cm_id->state != IB_CM_IDLE);
1241 cm_id->state = IB_CM_REQ_SENT;
1242 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1245 error2: cm_free_msg(cm_id_priv->msg);
1246 error1: kfree(cm_id_priv->timewait_info);
1249 EXPORT_SYMBOL(ib_send_cm_req);
1251 static int cm_issue_rej(struct cm_port *port,
1252 struct ib_mad_recv_wc *mad_recv_wc,
1253 enum ib_cm_rej_reason reason,
1254 enum cm_msg_response msg_rejected,
1255 void *ari, u8 ari_length)
1257 struct ib_mad_send_buf *msg = NULL;
1258 struct cm_rej_msg *rej_msg, *rcv_msg;
1261 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1265 /* We just need common CM header information. Cast to any message. */
1266 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1267 rej_msg = (struct cm_rej_msg *) msg->mad;
1269 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1270 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1271 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1272 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1273 rej_msg->reason = cpu_to_be16(reason);
1275 if (ari && ari_length) {
1276 cm_rej_set_reject_info_len(rej_msg, ari_length);
1277 memcpy(rej_msg->ari, ari, ari_length);
1280 ret = ib_post_send_mad(msg, NULL);
1287 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1288 __be32 local_qpn, __be32 remote_qpn)
1290 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1291 ((local_ca_guid == remote_ca_guid) &&
1292 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1295 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1296 struct ib_sa_path_rec *primary_path,
1297 struct ib_sa_path_rec *alt_path)
1299 memset(primary_path, 0, sizeof *primary_path);
1300 primary_path->dgid = req_msg->primary_local_gid;
1301 primary_path->sgid = req_msg->primary_remote_gid;
1302 primary_path->dlid = req_msg->primary_local_lid;
1303 primary_path->slid = req_msg->primary_remote_lid;
1304 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1305 primary_path->hop_limit = req_msg->primary_hop_limit;
1306 primary_path->traffic_class = req_msg->primary_traffic_class;
1307 primary_path->reversible = 1;
1308 primary_path->pkey = req_msg->pkey;
1309 primary_path->sl = cm_req_get_primary_sl(req_msg);
1310 primary_path->mtu_selector = IB_SA_EQ;
1311 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1312 primary_path->rate_selector = IB_SA_EQ;
1313 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1314 primary_path->packet_life_time_selector = IB_SA_EQ;
1315 primary_path->packet_life_time =
1316 cm_req_get_primary_local_ack_timeout(req_msg);
1317 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1318 primary_path->service_id = req_msg->service_id;
1320 if (req_msg->alt_local_lid) {
1321 memset(alt_path, 0, sizeof *alt_path);
1322 alt_path->dgid = req_msg->alt_local_gid;
1323 alt_path->sgid = req_msg->alt_remote_gid;
1324 alt_path->dlid = req_msg->alt_local_lid;
1325 alt_path->slid = req_msg->alt_remote_lid;
1326 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1327 alt_path->hop_limit = req_msg->alt_hop_limit;
1328 alt_path->traffic_class = req_msg->alt_traffic_class;
1329 alt_path->reversible = 1;
1330 alt_path->pkey = req_msg->pkey;
1331 alt_path->sl = cm_req_get_alt_sl(req_msg);
1332 alt_path->mtu_selector = IB_SA_EQ;
1333 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1334 alt_path->rate_selector = IB_SA_EQ;
1335 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1336 alt_path->packet_life_time_selector = IB_SA_EQ;
1337 alt_path->packet_life_time =
1338 cm_req_get_alt_local_ack_timeout(req_msg);
1339 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1340 alt_path->service_id = req_msg->service_id;
1344 static u16 cm_get_bth_pkey(struct cm_work *work)
1346 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1347 u8 port_num = work->port->port_num;
1348 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1352 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1354 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1355 port_num, pkey_index, ret);
1362 static void cm_format_req_event(struct cm_work *work,
1363 struct cm_id_private *cm_id_priv,
1364 struct ib_cm_id *listen_id)
1366 struct cm_req_msg *req_msg;
1367 struct ib_cm_req_event_param *param;
1369 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1370 param = &work->cm_event.param.req_rcvd;
1371 param->listen_id = listen_id;
1372 param->bth_pkey = cm_get_bth_pkey(work);
1373 param->port = cm_id_priv->av.port->port_num;
1374 param->primary_path = &work->path[0];
1375 if (req_msg->alt_local_lid)
1376 param->alternate_path = &work->path[1];
1378 param->alternate_path = NULL;
1379 param->remote_ca_guid = req_msg->local_ca_guid;
1380 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1381 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1382 param->qp_type = cm_req_get_qp_type(req_msg);
1383 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1384 param->responder_resources = cm_req_get_init_depth(req_msg);
1385 param->initiator_depth = cm_req_get_resp_res(req_msg);
1386 param->local_cm_response_timeout =
1387 cm_req_get_remote_resp_timeout(req_msg);
1388 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1389 param->remote_cm_response_timeout =
1390 cm_req_get_local_resp_timeout(req_msg);
1391 param->retry_count = cm_req_get_retry_count(req_msg);
1392 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1393 param->srq = cm_req_get_srq(req_msg);
1394 work->cm_event.private_data = &req_msg->private_data;
1397 static void cm_process_work(struct cm_id_private *cm_id_priv,
1398 struct cm_work *work)
1402 /* We will typically only have the current event to report. */
1403 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1406 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1407 spin_lock_irq(&cm_id_priv->lock);
1408 work = cm_dequeue_work(cm_id_priv);
1409 spin_unlock_irq(&cm_id_priv->lock);
1411 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1415 cm_deref_id(cm_id_priv);
1417 cm_destroy_id(&cm_id_priv->id, ret);
1420 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1421 struct cm_id_private *cm_id_priv,
1422 enum cm_msg_response msg_mraed, u8 service_timeout,
1423 const void *private_data, u8 private_data_len)
1425 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1426 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1427 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1428 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1429 cm_mra_set_service_timeout(mra_msg, service_timeout);
1431 if (private_data && private_data_len)
1432 memcpy(mra_msg->private_data, private_data, private_data_len);
1435 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1436 struct cm_id_private *cm_id_priv,
1437 enum ib_cm_rej_reason reason,
1440 const void *private_data,
1441 u8 private_data_len)
1443 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1444 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1446 switch(cm_id_priv->id.state) {
1447 case IB_CM_REQ_RCVD:
1448 rej_msg->local_comm_id = 0;
1449 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1451 case IB_CM_MRA_REQ_SENT:
1452 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1453 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1455 case IB_CM_REP_RCVD:
1456 case IB_CM_MRA_REP_SENT:
1457 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1458 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1461 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1462 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1466 rej_msg->reason = cpu_to_be16(reason);
1467 if (ari && ari_length) {
1468 cm_rej_set_reject_info_len(rej_msg, ari_length);
1469 memcpy(rej_msg->ari, ari, ari_length);
1472 if (private_data && private_data_len)
1473 memcpy(rej_msg->private_data, private_data, private_data_len);
1476 static void cm_dup_req_handler(struct cm_work *work,
1477 struct cm_id_private *cm_id_priv)
1479 struct ib_mad_send_buf *msg = NULL;
1482 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1483 counter[CM_REQ_COUNTER]);
1485 /* Quick state check to discard duplicate REQs. */
1486 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1489 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1493 spin_lock_irq(&cm_id_priv->lock);
1494 switch (cm_id_priv->id.state) {
1495 case IB_CM_MRA_REQ_SENT:
1496 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1497 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1498 cm_id_priv->private_data,
1499 cm_id_priv->private_data_len);
1501 case IB_CM_TIMEWAIT:
1502 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1503 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1508 spin_unlock_irq(&cm_id_priv->lock);
1510 ret = ib_post_send_mad(msg, NULL);
1515 unlock: spin_unlock_irq(&cm_id_priv->lock);
1516 free: cm_free_msg(msg);
1519 static struct cm_id_private * cm_match_req(struct cm_work *work,
1520 struct cm_id_private *cm_id_priv)
1522 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1523 struct cm_timewait_info *timewait_info;
1524 struct cm_req_msg *req_msg;
1526 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1528 /* Check for possible duplicate REQ. */
1529 spin_lock_irq(&cm.lock);
1530 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1531 if (timewait_info) {
1532 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1533 timewait_info->work.remote_id);
1534 spin_unlock_irq(&cm.lock);
1535 if (cur_cm_id_priv) {
1536 cm_dup_req_handler(work, cur_cm_id_priv);
1537 cm_deref_id(cur_cm_id_priv);
1542 /* Check for stale connections. */
1543 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1544 if (timewait_info) {
1545 cm_cleanup_timewait(cm_id_priv->timewait_info);
1546 spin_unlock_irq(&cm.lock);
1547 cm_issue_rej(work->port, work->mad_recv_wc,
1548 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1553 /* Find matching listen request. */
1554 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1555 req_msg->service_id);
1556 if (!listen_cm_id_priv) {
1557 cm_cleanup_timewait(cm_id_priv->timewait_info);
1558 spin_unlock_irq(&cm.lock);
1559 cm_issue_rej(work->port, work->mad_recv_wc,
1560 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1564 atomic_inc(&listen_cm_id_priv->refcount);
1565 atomic_inc(&cm_id_priv->refcount);
1566 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1567 atomic_inc(&cm_id_priv->work_count);
1568 spin_unlock_irq(&cm.lock);
1570 return listen_cm_id_priv;
1574 * Work-around for inter-subnet connections. If the LIDs are permissive,
1575 * we need to override the LID/SL data in the REQ with the LID information
1576 * in the work completion.
1578 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1580 if (!cm_req_get_primary_subnet_local(req_msg)) {
1581 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1582 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1583 cm_req_set_primary_sl(req_msg, wc->sl);
1586 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1587 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1590 if (!cm_req_get_alt_subnet_local(req_msg)) {
1591 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1592 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1593 cm_req_set_alt_sl(req_msg, wc->sl);
1596 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1597 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1601 static int cm_req_handler(struct cm_work *work)
1603 struct ib_cm_id *cm_id;
1604 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1605 struct cm_req_msg *req_msg;
1608 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1610 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1612 return PTR_ERR(cm_id);
1614 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1615 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1616 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1617 work->mad_recv_wc->recv_buf.grh,
1619 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1621 if (IS_ERR(cm_id_priv->timewait_info)) {
1622 ret = PTR_ERR(cm_id_priv->timewait_info);
1625 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1626 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1627 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1629 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1630 if (!listen_cm_id_priv) {
1632 kfree(cm_id_priv->timewait_info);
1636 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1637 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1638 cm_id_priv->id.service_id = req_msg->service_id;
1639 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1641 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1642 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1644 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1645 work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id;
1646 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1648 ib_get_cached_gid(work->port->cm_dev->ib_device,
1649 work->port->port_num, 0, &work->path[0].sgid,
1651 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1652 &work->path[0].sgid, sizeof work->path[0].sgid,
1656 if (req_msg->alt_local_lid) {
1657 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1659 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1660 &work->path[0].sgid,
1661 sizeof work->path[0].sgid, NULL, 0);
1665 cm_id_priv->tid = req_msg->hdr.tid;
1666 cm_id_priv->timeout_ms = cm_convert_to_ms(
1667 cm_req_get_local_resp_timeout(req_msg));
1668 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1669 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1670 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1671 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1672 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1673 cm_id_priv->pkey = req_msg->pkey;
1674 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1675 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1676 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1677 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1679 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1680 cm_process_work(cm_id_priv, work);
1681 cm_deref_id(listen_cm_id_priv);
1685 atomic_dec(&cm_id_priv->refcount);
1686 cm_deref_id(listen_cm_id_priv);
1688 ib_destroy_cm_id(cm_id);
1692 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1693 struct cm_id_private *cm_id_priv,
1694 struct ib_cm_rep_param *param)
1696 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1697 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1698 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1699 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1700 rep_msg->resp_resources = param->responder_resources;
1701 cm_rep_set_target_ack_delay(rep_msg,
1702 cm_id_priv->av.port->cm_dev->ack_delay);
1703 cm_rep_set_failover(rep_msg, param->failover_accepted);
1704 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1705 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1707 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1708 rep_msg->initiator_depth = param->initiator_depth;
1709 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1710 cm_rep_set_srq(rep_msg, param->srq);
1711 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1713 cm_rep_set_srq(rep_msg, 1);
1714 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1717 if (param->private_data && param->private_data_len)
1718 memcpy(rep_msg->private_data, param->private_data,
1719 param->private_data_len);
1722 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1723 struct ib_cm_rep_param *param)
1725 struct cm_id_private *cm_id_priv;
1726 struct ib_mad_send_buf *msg;
1727 struct cm_rep_msg *rep_msg;
1728 unsigned long flags;
1731 if (param->private_data &&
1732 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1735 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1736 spin_lock_irqsave(&cm_id_priv->lock, flags);
1737 if (cm_id->state != IB_CM_REQ_RCVD &&
1738 cm_id->state != IB_CM_MRA_REQ_SENT) {
1743 ret = cm_alloc_msg(cm_id_priv, &msg);
1747 rep_msg = (struct cm_rep_msg *) msg->mad;
1748 cm_format_rep(rep_msg, cm_id_priv, param);
1749 msg->timeout_ms = cm_id_priv->timeout_ms;
1750 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1752 ret = ib_post_send_mad(msg, NULL);
1754 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1759 cm_id->state = IB_CM_REP_SENT;
1760 cm_id_priv->msg = msg;
1761 cm_id_priv->initiator_depth = param->initiator_depth;
1762 cm_id_priv->responder_resources = param->responder_resources;
1763 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1764 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1766 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1769 EXPORT_SYMBOL(ib_send_cm_rep);
1771 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1772 struct cm_id_private *cm_id_priv,
1773 const void *private_data,
1774 u8 private_data_len)
1776 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1777 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1778 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1780 if (private_data && private_data_len)
1781 memcpy(rtu_msg->private_data, private_data, private_data_len);
1784 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1785 const void *private_data,
1786 u8 private_data_len)
1788 struct cm_id_private *cm_id_priv;
1789 struct ib_mad_send_buf *msg;
1790 unsigned long flags;
1794 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1797 data = cm_copy_private_data(private_data, private_data_len);
1799 return PTR_ERR(data);
1801 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1802 spin_lock_irqsave(&cm_id_priv->lock, flags);
1803 if (cm_id->state != IB_CM_REP_RCVD &&
1804 cm_id->state != IB_CM_MRA_REP_SENT) {
1809 ret = cm_alloc_msg(cm_id_priv, &msg);
1813 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1814 private_data, private_data_len);
1816 ret = ib_post_send_mad(msg, NULL);
1818 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1824 cm_id->state = IB_CM_ESTABLISHED;
1825 cm_set_private_data(cm_id_priv, data, private_data_len);
1826 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1829 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1833 EXPORT_SYMBOL(ib_send_cm_rtu);
1835 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1837 struct cm_rep_msg *rep_msg;
1838 struct ib_cm_rep_event_param *param;
1840 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1841 param = &work->cm_event.param.rep_rcvd;
1842 param->remote_ca_guid = rep_msg->local_ca_guid;
1843 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1844 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1845 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1846 param->responder_resources = rep_msg->initiator_depth;
1847 param->initiator_depth = rep_msg->resp_resources;
1848 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1849 param->failover_accepted = cm_rep_get_failover(rep_msg);
1850 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1851 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1852 param->srq = cm_rep_get_srq(rep_msg);
1853 work->cm_event.private_data = &rep_msg->private_data;
1856 static void cm_dup_rep_handler(struct cm_work *work)
1858 struct cm_id_private *cm_id_priv;
1859 struct cm_rep_msg *rep_msg;
1860 struct ib_mad_send_buf *msg = NULL;
1863 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1864 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1865 rep_msg->local_comm_id);
1869 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1870 counter[CM_REP_COUNTER]);
1871 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1875 spin_lock_irq(&cm_id_priv->lock);
1876 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1877 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1878 cm_id_priv->private_data,
1879 cm_id_priv->private_data_len);
1880 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1881 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1882 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1883 cm_id_priv->private_data,
1884 cm_id_priv->private_data_len);
1887 spin_unlock_irq(&cm_id_priv->lock);
1889 ret = ib_post_send_mad(msg, NULL);
1894 unlock: spin_unlock_irq(&cm_id_priv->lock);
1895 free: cm_free_msg(msg);
1896 deref: cm_deref_id(cm_id_priv);
1899 static int cm_rep_handler(struct cm_work *work)
1901 struct cm_id_private *cm_id_priv;
1902 struct cm_rep_msg *rep_msg;
1905 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1906 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1908 cm_dup_rep_handler(work);
1912 cm_format_rep_event(work, cm_id_priv->qp_type);
1914 spin_lock_irq(&cm_id_priv->lock);
1915 switch (cm_id_priv->id.state) {
1916 case IB_CM_REQ_SENT:
1917 case IB_CM_MRA_REQ_RCVD:
1920 spin_unlock_irq(&cm_id_priv->lock);
1925 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1926 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1927 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1929 spin_lock(&cm.lock);
1930 /* Check for duplicate REP. */
1931 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1932 spin_unlock(&cm.lock);
1933 spin_unlock_irq(&cm_id_priv->lock);
1937 /* Check for a stale connection. */
1938 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1939 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1940 &cm.remote_id_table);
1941 cm_id_priv->timewait_info->inserted_remote_id = 0;
1942 spin_unlock(&cm.lock);
1943 spin_unlock_irq(&cm_id_priv->lock);
1944 cm_issue_rej(work->port, work->mad_recv_wc,
1945 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1950 spin_unlock(&cm.lock);
1952 cm_id_priv->id.state = IB_CM_REP_RCVD;
1953 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1954 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1955 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1956 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1957 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1958 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1959 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1960 cm_id_priv->av.timeout =
1961 cm_ack_timeout(cm_id_priv->target_ack_delay,
1962 cm_id_priv->av.timeout - 1);
1963 cm_id_priv->alt_av.timeout =
1964 cm_ack_timeout(cm_id_priv->target_ack_delay,
1965 cm_id_priv->alt_av.timeout - 1);
1967 /* todo: handle peer_to_peer */
1969 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1970 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1972 list_add_tail(&work->list, &cm_id_priv->work_list);
1973 spin_unlock_irq(&cm_id_priv->lock);
1976 cm_process_work(cm_id_priv, work);
1978 cm_deref_id(cm_id_priv);
1982 cm_deref_id(cm_id_priv);
1986 static int cm_establish_handler(struct cm_work *work)
1988 struct cm_id_private *cm_id_priv;
1991 /* See comment in cm_establish about lookup. */
1992 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1996 spin_lock_irq(&cm_id_priv->lock);
1997 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1998 spin_unlock_irq(&cm_id_priv->lock);
2002 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2003 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2005 list_add_tail(&work->list, &cm_id_priv->work_list);
2006 spin_unlock_irq(&cm_id_priv->lock);
2009 cm_process_work(cm_id_priv, work);
2011 cm_deref_id(cm_id_priv);
2014 cm_deref_id(cm_id_priv);
2018 static int cm_rtu_handler(struct cm_work *work)
2020 struct cm_id_private *cm_id_priv;
2021 struct cm_rtu_msg *rtu_msg;
2024 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2025 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2026 rtu_msg->local_comm_id);
2030 work->cm_event.private_data = &rtu_msg->private_data;
2032 spin_lock_irq(&cm_id_priv->lock);
2033 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2034 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2035 spin_unlock_irq(&cm_id_priv->lock);
2036 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2037 counter[CM_RTU_COUNTER]);
2040 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2042 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2043 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2045 list_add_tail(&work->list, &cm_id_priv->work_list);
2046 spin_unlock_irq(&cm_id_priv->lock);
2049 cm_process_work(cm_id_priv, work);
2051 cm_deref_id(cm_id_priv);
2054 cm_deref_id(cm_id_priv);
2058 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2059 struct cm_id_private *cm_id_priv,
2060 const void *private_data,
2061 u8 private_data_len)
2063 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2064 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2065 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2066 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2067 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2069 if (private_data && private_data_len)
2070 memcpy(dreq_msg->private_data, private_data, private_data_len);
2073 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2074 const void *private_data,
2075 u8 private_data_len)
2077 struct cm_id_private *cm_id_priv;
2078 struct ib_mad_send_buf *msg;
2079 unsigned long flags;
2082 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2085 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2086 spin_lock_irqsave(&cm_id_priv->lock, flags);
2087 if (cm_id->state != IB_CM_ESTABLISHED) {
2092 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2093 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2094 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2096 ret = cm_alloc_msg(cm_id_priv, &msg);
2098 cm_enter_timewait(cm_id_priv);
2102 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2103 private_data, private_data_len);
2104 msg->timeout_ms = cm_id_priv->timeout_ms;
2105 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2107 ret = ib_post_send_mad(msg, NULL);
2109 cm_enter_timewait(cm_id_priv);
2110 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2115 cm_id->state = IB_CM_DREQ_SENT;
2116 cm_id_priv->msg = msg;
2117 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2120 EXPORT_SYMBOL(ib_send_cm_dreq);
2122 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2123 struct cm_id_private *cm_id_priv,
2124 const void *private_data,
2125 u8 private_data_len)
2127 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2128 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2129 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2131 if (private_data && private_data_len)
2132 memcpy(drep_msg->private_data, private_data, private_data_len);
2135 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2136 const void *private_data,
2137 u8 private_data_len)
2139 struct cm_id_private *cm_id_priv;
2140 struct ib_mad_send_buf *msg;
2141 unsigned long flags;
2145 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2148 data = cm_copy_private_data(private_data, private_data_len);
2150 return PTR_ERR(data);
2152 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2153 spin_lock_irqsave(&cm_id_priv->lock, flags);
2154 if (cm_id->state != IB_CM_DREQ_RCVD) {
2155 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2160 cm_set_private_data(cm_id_priv, data, private_data_len);
2161 cm_enter_timewait(cm_id_priv);
2163 ret = cm_alloc_msg(cm_id_priv, &msg);
2167 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2168 private_data, private_data_len);
2170 ret = ib_post_send_mad(msg, NULL);
2172 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2177 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2180 EXPORT_SYMBOL(ib_send_cm_drep);
2182 static int cm_issue_drep(struct cm_port *port,
2183 struct ib_mad_recv_wc *mad_recv_wc)
2185 struct ib_mad_send_buf *msg = NULL;
2186 struct cm_dreq_msg *dreq_msg;
2187 struct cm_drep_msg *drep_msg;
2190 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2194 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2195 drep_msg = (struct cm_drep_msg *) msg->mad;
2197 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2198 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2199 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2201 ret = ib_post_send_mad(msg, NULL);
2208 static int cm_dreq_handler(struct cm_work *work)
2210 struct cm_id_private *cm_id_priv;
2211 struct cm_dreq_msg *dreq_msg;
2212 struct ib_mad_send_buf *msg = NULL;
2215 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2216 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2217 dreq_msg->local_comm_id);
2219 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2220 counter[CM_DREQ_COUNTER]);
2221 cm_issue_drep(work->port, work->mad_recv_wc);
2225 work->cm_event.private_data = &dreq_msg->private_data;
2227 spin_lock_irq(&cm_id_priv->lock);
2228 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2231 switch (cm_id_priv->id.state) {
2232 case IB_CM_REP_SENT:
2233 case IB_CM_DREQ_SENT:
2234 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2236 case IB_CM_ESTABLISHED:
2237 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2238 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2239 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2241 case IB_CM_MRA_REP_RCVD:
2243 case IB_CM_TIMEWAIT:
2244 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2245 counter[CM_DREQ_COUNTER]);
2246 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2249 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2250 cm_id_priv->private_data,
2251 cm_id_priv->private_data_len);
2252 spin_unlock_irq(&cm_id_priv->lock);
2254 if (ib_post_send_mad(msg, NULL))
2257 case IB_CM_DREQ_RCVD:
2258 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2259 counter[CM_DREQ_COUNTER]);
2264 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2265 cm_id_priv->tid = dreq_msg->hdr.tid;
2266 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2268 list_add_tail(&work->list, &cm_id_priv->work_list);
2269 spin_unlock_irq(&cm_id_priv->lock);
2272 cm_process_work(cm_id_priv, work);
2274 cm_deref_id(cm_id_priv);
2277 unlock: spin_unlock_irq(&cm_id_priv->lock);
2278 deref: cm_deref_id(cm_id_priv);
2282 static int cm_drep_handler(struct cm_work *work)
2284 struct cm_id_private *cm_id_priv;
2285 struct cm_drep_msg *drep_msg;
2288 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2289 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2290 drep_msg->local_comm_id);
2294 work->cm_event.private_data = &drep_msg->private_data;
2296 spin_lock_irq(&cm_id_priv->lock);
2297 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2298 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2299 spin_unlock_irq(&cm_id_priv->lock);
2302 cm_enter_timewait(cm_id_priv);
2304 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2305 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2307 list_add_tail(&work->list, &cm_id_priv->work_list);
2308 spin_unlock_irq(&cm_id_priv->lock);
2311 cm_process_work(cm_id_priv, work);
2313 cm_deref_id(cm_id_priv);
2316 cm_deref_id(cm_id_priv);
2320 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2321 enum ib_cm_rej_reason reason,
2324 const void *private_data,
2325 u8 private_data_len)
2327 struct cm_id_private *cm_id_priv;
2328 struct ib_mad_send_buf *msg;
2329 unsigned long flags;
2332 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2333 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2336 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2338 spin_lock_irqsave(&cm_id_priv->lock, flags);
2339 switch (cm_id->state) {
2340 case IB_CM_REQ_SENT:
2341 case IB_CM_MRA_REQ_RCVD:
2342 case IB_CM_REQ_RCVD:
2343 case IB_CM_MRA_REQ_SENT:
2344 case IB_CM_REP_RCVD:
2345 case IB_CM_MRA_REP_SENT:
2346 ret = cm_alloc_msg(cm_id_priv, &msg);
2348 cm_format_rej((struct cm_rej_msg *) msg->mad,
2349 cm_id_priv, reason, ari, ari_length,
2350 private_data, private_data_len);
2352 cm_reset_to_idle(cm_id_priv);
2354 case IB_CM_REP_SENT:
2355 case IB_CM_MRA_REP_RCVD:
2356 ret = cm_alloc_msg(cm_id_priv, &msg);
2358 cm_format_rej((struct cm_rej_msg *) msg->mad,
2359 cm_id_priv, reason, ari, ari_length,
2360 private_data, private_data_len);
2362 cm_enter_timewait(cm_id_priv);
2372 ret = ib_post_send_mad(msg, NULL);
2376 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2379 EXPORT_SYMBOL(ib_send_cm_rej);
2381 static void cm_format_rej_event(struct cm_work *work)
2383 struct cm_rej_msg *rej_msg;
2384 struct ib_cm_rej_event_param *param;
2386 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2387 param = &work->cm_event.param.rej_rcvd;
2388 param->ari = rej_msg->ari;
2389 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2390 param->reason = __be16_to_cpu(rej_msg->reason);
2391 work->cm_event.private_data = &rej_msg->private_data;
2394 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2396 struct cm_timewait_info *timewait_info;
2397 struct cm_id_private *cm_id_priv;
2400 remote_id = rej_msg->local_comm_id;
2402 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2403 spin_lock_irq(&cm.lock);
2404 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2406 if (!timewait_info) {
2407 spin_unlock_irq(&cm.lock);
2410 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2411 (timewait_info->work.local_id ^
2412 cm.random_id_operand));
2414 if (cm_id_priv->id.remote_id == remote_id)
2415 atomic_inc(&cm_id_priv->refcount);
2419 spin_unlock_irq(&cm.lock);
2420 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2421 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2423 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2428 static int cm_rej_handler(struct cm_work *work)
2430 struct cm_id_private *cm_id_priv;
2431 struct cm_rej_msg *rej_msg;
2434 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2435 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2439 cm_format_rej_event(work);
2441 spin_lock_irq(&cm_id_priv->lock);
2442 switch (cm_id_priv->id.state) {
2443 case IB_CM_REQ_SENT:
2444 case IB_CM_MRA_REQ_RCVD:
2445 case IB_CM_REP_SENT:
2446 case IB_CM_MRA_REP_RCVD:
2447 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2449 case IB_CM_REQ_RCVD:
2450 case IB_CM_MRA_REQ_SENT:
2451 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2452 cm_enter_timewait(cm_id_priv);
2454 cm_reset_to_idle(cm_id_priv);
2456 case IB_CM_DREQ_SENT:
2457 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2459 case IB_CM_REP_RCVD:
2460 case IB_CM_MRA_REP_SENT:
2461 cm_enter_timewait(cm_id_priv);
2463 case IB_CM_ESTABLISHED:
2464 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2465 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2466 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2467 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2469 cm_enter_timewait(cm_id_priv);
2474 spin_unlock_irq(&cm_id_priv->lock);
2479 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2481 list_add_tail(&work->list, &cm_id_priv->work_list);
2482 spin_unlock_irq(&cm_id_priv->lock);
2485 cm_process_work(cm_id_priv, work);
2487 cm_deref_id(cm_id_priv);
2490 cm_deref_id(cm_id_priv);
2494 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2496 const void *private_data,
2497 u8 private_data_len)
2499 struct cm_id_private *cm_id_priv;
2500 struct ib_mad_send_buf *msg;
2501 enum ib_cm_state cm_state;
2502 enum ib_cm_lap_state lap_state;
2503 enum cm_msg_response msg_response;
2505 unsigned long flags;
2508 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2511 data = cm_copy_private_data(private_data, private_data_len);
2513 return PTR_ERR(data);
2515 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2517 spin_lock_irqsave(&cm_id_priv->lock, flags);
2518 switch(cm_id_priv->id.state) {
2519 case IB_CM_REQ_RCVD:
2520 cm_state = IB_CM_MRA_REQ_SENT;
2521 lap_state = cm_id->lap_state;
2522 msg_response = CM_MSG_RESPONSE_REQ;
2524 case IB_CM_REP_RCVD:
2525 cm_state = IB_CM_MRA_REP_SENT;
2526 lap_state = cm_id->lap_state;
2527 msg_response = CM_MSG_RESPONSE_REP;
2529 case IB_CM_ESTABLISHED:
2530 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2531 cm_state = cm_id->state;
2532 lap_state = IB_CM_MRA_LAP_SENT;
2533 msg_response = CM_MSG_RESPONSE_OTHER;
2541 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2542 ret = cm_alloc_msg(cm_id_priv, &msg);
2546 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2547 msg_response, service_timeout,
2548 private_data, private_data_len);
2549 ret = ib_post_send_mad(msg, NULL);
2554 cm_id->state = cm_state;
2555 cm_id->lap_state = lap_state;
2556 cm_id_priv->service_timeout = service_timeout;
2557 cm_set_private_data(cm_id_priv, data, private_data_len);
2558 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2561 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2565 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2570 EXPORT_SYMBOL(ib_send_cm_mra);
2572 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2574 switch (cm_mra_get_msg_mraed(mra_msg)) {
2575 case CM_MSG_RESPONSE_REQ:
2576 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2577 case CM_MSG_RESPONSE_REP:
2578 case CM_MSG_RESPONSE_OTHER:
2579 return cm_acquire_id(mra_msg->remote_comm_id,
2580 mra_msg->local_comm_id);
2586 static int cm_mra_handler(struct cm_work *work)
2588 struct cm_id_private *cm_id_priv;
2589 struct cm_mra_msg *mra_msg;
2592 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2593 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2597 work->cm_event.private_data = &mra_msg->private_data;
2598 work->cm_event.param.mra_rcvd.service_timeout =
2599 cm_mra_get_service_timeout(mra_msg);
2600 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2601 cm_convert_to_ms(cm_id_priv->av.timeout);
2603 spin_lock_irq(&cm_id_priv->lock);
2604 switch (cm_id_priv->id.state) {
2605 case IB_CM_REQ_SENT:
2606 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2607 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2608 cm_id_priv->msg, timeout))
2610 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2612 case IB_CM_REP_SENT:
2613 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2614 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2615 cm_id_priv->msg, timeout))
2617 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2619 case IB_CM_ESTABLISHED:
2620 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2621 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2622 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2623 cm_id_priv->msg, timeout)) {
2624 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2625 atomic_long_inc(&work->port->
2626 counter_group[CM_RECV_DUPLICATES].
2627 counter[CM_MRA_COUNTER]);
2630 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2632 case IB_CM_MRA_REQ_RCVD:
2633 case IB_CM_MRA_REP_RCVD:
2634 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2635 counter[CM_MRA_COUNTER]);
2641 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2642 cm_id_priv->id.state;
2643 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2645 list_add_tail(&work->list, &cm_id_priv->work_list);
2646 spin_unlock_irq(&cm_id_priv->lock);
2649 cm_process_work(cm_id_priv, work);
2651 cm_deref_id(cm_id_priv);
2654 spin_unlock_irq(&cm_id_priv->lock);
2655 cm_deref_id(cm_id_priv);
2659 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2660 struct cm_id_private *cm_id_priv,
2661 struct ib_sa_path_rec *alternate_path,
2662 const void *private_data,
2663 u8 private_data_len)
2665 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2666 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2667 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2668 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2669 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2670 /* todo: need remote CM response timeout */
2671 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2672 lap_msg->alt_local_lid = alternate_path->slid;
2673 lap_msg->alt_remote_lid = alternate_path->dlid;
2674 lap_msg->alt_local_gid = alternate_path->sgid;
2675 lap_msg->alt_remote_gid = alternate_path->dgid;
2676 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2677 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2678 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2679 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2680 cm_lap_set_sl(lap_msg, alternate_path->sl);
2681 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2682 cm_lap_set_local_ack_timeout(lap_msg,
2683 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2684 alternate_path->packet_life_time));
2686 if (private_data && private_data_len)
2687 memcpy(lap_msg->private_data, private_data, private_data_len);
2690 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2691 struct ib_sa_path_rec *alternate_path,
2692 const void *private_data,
2693 u8 private_data_len)
2695 struct cm_id_private *cm_id_priv;
2696 struct ib_mad_send_buf *msg;
2697 unsigned long flags;
2700 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2703 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2704 spin_lock_irqsave(&cm_id_priv->lock, flags);
2705 if (cm_id->state != IB_CM_ESTABLISHED ||
2706 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2707 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2712 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2715 cm_id_priv->alt_av.timeout =
2716 cm_ack_timeout(cm_id_priv->target_ack_delay,
2717 cm_id_priv->alt_av.timeout - 1);
2719 ret = cm_alloc_msg(cm_id_priv, &msg);
2723 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2724 alternate_path, private_data, private_data_len);
2725 msg->timeout_ms = cm_id_priv->timeout_ms;
2726 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2728 ret = ib_post_send_mad(msg, NULL);
2730 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2735 cm_id->lap_state = IB_CM_LAP_SENT;
2736 cm_id_priv->msg = msg;
2738 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2741 EXPORT_SYMBOL(ib_send_cm_lap);
2743 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2744 struct ib_sa_path_rec *path,
2745 struct cm_lap_msg *lap_msg)
2747 memset(path, 0, sizeof *path);
2748 path->dgid = lap_msg->alt_local_gid;
2749 path->sgid = lap_msg->alt_remote_gid;
2750 path->dlid = lap_msg->alt_local_lid;
2751 path->slid = lap_msg->alt_remote_lid;
2752 path->flow_label = cm_lap_get_flow_label(lap_msg);
2753 path->hop_limit = lap_msg->alt_hop_limit;
2754 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2755 path->reversible = 1;
2756 path->pkey = cm_id_priv->pkey;
2757 path->sl = cm_lap_get_sl(lap_msg);
2758 path->mtu_selector = IB_SA_EQ;
2759 path->mtu = cm_id_priv->path_mtu;
2760 path->rate_selector = IB_SA_EQ;
2761 path->rate = cm_lap_get_packet_rate(lap_msg);
2762 path->packet_life_time_selector = IB_SA_EQ;
2763 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2764 path->packet_life_time -= (path->packet_life_time > 0);
2767 static int cm_lap_handler(struct cm_work *work)
2769 struct cm_id_private *cm_id_priv;
2770 struct cm_lap_msg *lap_msg;
2771 struct ib_cm_lap_event_param *param;
2772 struct ib_mad_send_buf *msg = NULL;
2775 /* todo: verify LAP request and send reject APR if invalid. */
2776 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2777 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2778 lap_msg->local_comm_id);
2782 param = &work->cm_event.param.lap_rcvd;
2783 param->alternate_path = &work->path[0];
2784 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2785 work->cm_event.private_data = &lap_msg->private_data;
2787 spin_lock_irq(&cm_id_priv->lock);
2788 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2791 switch (cm_id_priv->id.lap_state) {
2792 case IB_CM_LAP_UNINIT:
2793 case IB_CM_LAP_IDLE:
2795 case IB_CM_MRA_LAP_SENT:
2796 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2797 counter[CM_LAP_COUNTER]);
2798 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2801 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2802 CM_MSG_RESPONSE_OTHER,
2803 cm_id_priv->service_timeout,
2804 cm_id_priv->private_data,
2805 cm_id_priv->private_data_len);
2806 spin_unlock_irq(&cm_id_priv->lock);
2808 if (ib_post_send_mad(msg, NULL))
2811 case IB_CM_LAP_RCVD:
2812 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2813 counter[CM_LAP_COUNTER]);
2819 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2820 cm_id_priv->tid = lap_msg->hdr.tid;
2821 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2822 work->mad_recv_wc->recv_buf.grh,
2824 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2825 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2827 list_add_tail(&work->list, &cm_id_priv->work_list);
2828 spin_unlock_irq(&cm_id_priv->lock);
2831 cm_process_work(cm_id_priv, work);
2833 cm_deref_id(cm_id_priv);
2836 unlock: spin_unlock_irq(&cm_id_priv->lock);
2837 deref: cm_deref_id(cm_id_priv);
2841 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2842 struct cm_id_private *cm_id_priv,
2843 enum ib_cm_apr_status status,
2846 const void *private_data,
2847 u8 private_data_len)
2849 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2850 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2851 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2852 apr_msg->ap_status = (u8) status;
2854 if (info && info_length) {
2855 apr_msg->info_length = info_length;
2856 memcpy(apr_msg->info, info, info_length);
2859 if (private_data && private_data_len)
2860 memcpy(apr_msg->private_data, private_data, private_data_len);
2863 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2864 enum ib_cm_apr_status status,
2867 const void *private_data,
2868 u8 private_data_len)
2870 struct cm_id_private *cm_id_priv;
2871 struct ib_mad_send_buf *msg;
2872 unsigned long flags;
2875 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2876 (info && info_length > IB_CM_APR_INFO_LENGTH))
2879 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2880 spin_lock_irqsave(&cm_id_priv->lock, flags);
2881 if (cm_id->state != IB_CM_ESTABLISHED ||
2882 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2883 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2888 ret = cm_alloc_msg(cm_id_priv, &msg);
2892 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2893 info, info_length, private_data, private_data_len);
2894 ret = ib_post_send_mad(msg, NULL);
2896 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2901 cm_id->lap_state = IB_CM_LAP_IDLE;
2902 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2905 EXPORT_SYMBOL(ib_send_cm_apr);
2907 static int cm_apr_handler(struct cm_work *work)
2909 struct cm_id_private *cm_id_priv;
2910 struct cm_apr_msg *apr_msg;
2913 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2914 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2915 apr_msg->local_comm_id);
2917 return -EINVAL; /* Unmatched reply. */
2919 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2920 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2921 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2922 work->cm_event.private_data = &apr_msg->private_data;
2924 spin_lock_irq(&cm_id_priv->lock);
2925 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2926 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2927 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2928 spin_unlock_irq(&cm_id_priv->lock);
2931 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2932 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2933 cm_id_priv->msg = NULL;
2935 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2937 list_add_tail(&work->list, &cm_id_priv->work_list);
2938 spin_unlock_irq(&cm_id_priv->lock);
2941 cm_process_work(cm_id_priv, work);
2943 cm_deref_id(cm_id_priv);
2946 cm_deref_id(cm_id_priv);
2950 static int cm_timewait_handler(struct cm_work *work)
2952 struct cm_timewait_info *timewait_info;
2953 struct cm_id_private *cm_id_priv;
2956 timewait_info = (struct cm_timewait_info *)work;
2957 spin_lock_irq(&cm.lock);
2958 list_del(&timewait_info->list);
2959 spin_unlock_irq(&cm.lock);
2961 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2962 timewait_info->work.remote_id);
2966 spin_lock_irq(&cm_id_priv->lock);
2967 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2968 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2969 spin_unlock_irq(&cm_id_priv->lock);
2972 cm_id_priv->id.state = IB_CM_IDLE;
2973 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2975 list_add_tail(&work->list, &cm_id_priv->work_list);
2976 spin_unlock_irq(&cm_id_priv->lock);
2979 cm_process_work(cm_id_priv, work);
2981 cm_deref_id(cm_id_priv);
2984 cm_deref_id(cm_id_priv);
2988 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2989 struct cm_id_private *cm_id_priv,
2990 struct ib_cm_sidr_req_param *param)
2992 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2993 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2994 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2995 sidr_req_msg->pkey = param->path->pkey;
2996 sidr_req_msg->service_id = param->service_id;
2998 if (param->private_data && param->private_data_len)
2999 memcpy(sidr_req_msg->private_data, param->private_data,
3000 param->private_data_len);
3003 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3004 struct ib_cm_sidr_req_param *param)
3006 struct cm_id_private *cm_id_priv;
3007 struct ib_mad_send_buf *msg;
3008 unsigned long flags;
3011 if (!param->path || (param->private_data &&
3012 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3015 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3016 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
3020 cm_id->service_id = param->service_id;
3021 cm_id->service_mask = ~cpu_to_be64(0);
3022 cm_id_priv->timeout_ms = param->timeout_ms;
3023 cm_id_priv->max_cm_retries = param->max_cm_retries;
3024 ret = cm_alloc_msg(cm_id_priv, &msg);
3028 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3030 msg->timeout_ms = cm_id_priv->timeout_ms;
3031 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3033 spin_lock_irqsave(&cm_id_priv->lock, flags);
3034 if (cm_id->state == IB_CM_IDLE)
3035 ret = ib_post_send_mad(msg, NULL);
3040 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3044 cm_id->state = IB_CM_SIDR_REQ_SENT;
3045 cm_id_priv->msg = msg;
3046 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3050 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3052 static void cm_format_sidr_req_event(struct cm_work *work,
3053 struct ib_cm_id *listen_id)
3055 struct cm_sidr_req_msg *sidr_req_msg;
3056 struct ib_cm_sidr_req_event_param *param;
3058 sidr_req_msg = (struct cm_sidr_req_msg *)
3059 work->mad_recv_wc->recv_buf.mad;
3060 param = &work->cm_event.param.sidr_req_rcvd;
3061 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3062 param->listen_id = listen_id;
3063 param->service_id = sidr_req_msg->service_id;
3064 param->bth_pkey = cm_get_bth_pkey(work);
3065 param->port = work->port->port_num;
3066 work->cm_event.private_data = &sidr_req_msg->private_data;
3069 static int cm_sidr_req_handler(struct cm_work *work)
3071 struct ib_cm_id *cm_id;
3072 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3073 struct cm_sidr_req_msg *sidr_req_msg;
3076 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3078 return PTR_ERR(cm_id);
3079 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3081 /* Record SGID/SLID and request ID for lookup. */
3082 sidr_req_msg = (struct cm_sidr_req_msg *)
3083 work->mad_recv_wc->recv_buf.mad;
3084 wc = work->mad_recv_wc->wc;
3085 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3086 cm_id_priv->av.dgid.global.interface_id = 0;
3087 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3088 work->mad_recv_wc->recv_buf.grh,
3090 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3091 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3092 atomic_inc(&cm_id_priv->work_count);
3094 spin_lock_irq(&cm.lock);
3095 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3096 if (cur_cm_id_priv) {
3097 spin_unlock_irq(&cm.lock);
3098 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3099 counter[CM_SIDR_REQ_COUNTER]);
3100 goto out; /* Duplicate message. */
3102 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3103 cur_cm_id_priv = cm_find_listen(cm_id->device,
3104 sidr_req_msg->service_id);
3105 if (!cur_cm_id_priv) {
3106 spin_unlock_irq(&cm.lock);
3107 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3108 goto out; /* No match. */
3110 atomic_inc(&cur_cm_id_priv->refcount);
3111 atomic_inc(&cm_id_priv->refcount);
3112 spin_unlock_irq(&cm.lock);
3114 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3115 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3116 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3117 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3119 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3120 cm_process_work(cm_id_priv, work);
3121 cm_deref_id(cur_cm_id_priv);
3124 ib_destroy_cm_id(&cm_id_priv->id);
3128 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3129 struct cm_id_private *cm_id_priv,
3130 struct ib_cm_sidr_rep_param *param)
3132 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3134 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3135 sidr_rep_msg->status = param->status;
3136 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3137 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3138 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3140 if (param->info && param->info_length)
3141 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3143 if (param->private_data && param->private_data_len)
3144 memcpy(sidr_rep_msg->private_data, param->private_data,
3145 param->private_data_len);
3148 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3149 struct ib_cm_sidr_rep_param *param)
3151 struct cm_id_private *cm_id_priv;
3152 struct ib_mad_send_buf *msg;
3153 unsigned long flags;
3156 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3157 (param->private_data &&
3158 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3161 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3162 spin_lock_irqsave(&cm_id_priv->lock, flags);
3163 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3168 ret = cm_alloc_msg(cm_id_priv, &msg);
3172 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3174 ret = ib_post_send_mad(msg, NULL);
3176 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3180 cm_id->state = IB_CM_IDLE;
3181 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3183 spin_lock_irqsave(&cm.lock, flags);
3184 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3185 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3186 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3188 spin_unlock_irqrestore(&cm.lock, flags);
3191 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3194 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3196 static void cm_format_sidr_rep_event(struct cm_work *work)
3198 struct cm_sidr_rep_msg *sidr_rep_msg;
3199 struct ib_cm_sidr_rep_event_param *param;
3201 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3202 work->mad_recv_wc->recv_buf.mad;
3203 param = &work->cm_event.param.sidr_rep_rcvd;
3204 param->status = sidr_rep_msg->status;
3205 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3206 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3207 param->info = &sidr_rep_msg->info;
3208 param->info_len = sidr_rep_msg->info_length;
3209 work->cm_event.private_data = &sidr_rep_msg->private_data;
3212 static int cm_sidr_rep_handler(struct cm_work *work)
3214 struct cm_sidr_rep_msg *sidr_rep_msg;
3215 struct cm_id_private *cm_id_priv;
3217 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3218 work->mad_recv_wc->recv_buf.mad;
3219 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3221 return -EINVAL; /* Unmatched reply. */
3223 spin_lock_irq(&cm_id_priv->lock);
3224 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3225 spin_unlock_irq(&cm_id_priv->lock);
3228 cm_id_priv->id.state = IB_CM_IDLE;
3229 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3230 spin_unlock_irq(&cm_id_priv->lock);
3232 cm_format_sidr_rep_event(work);
3233 cm_process_work(cm_id_priv, work);
3236 cm_deref_id(cm_id_priv);
3240 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3241 enum ib_wc_status wc_status)
3243 struct cm_id_private *cm_id_priv;
3244 struct ib_cm_event cm_event;
3245 enum ib_cm_state state;
3248 memset(&cm_event, 0, sizeof cm_event);
3249 cm_id_priv = msg->context[0];
3251 /* Discard old sends or ones without a response. */
3252 spin_lock_irq(&cm_id_priv->lock);
3253 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3254 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3258 case IB_CM_REQ_SENT:
3259 case IB_CM_MRA_REQ_RCVD:
3260 cm_reset_to_idle(cm_id_priv);
3261 cm_event.event = IB_CM_REQ_ERROR;
3263 case IB_CM_REP_SENT:
3264 case IB_CM_MRA_REP_RCVD:
3265 cm_reset_to_idle(cm_id_priv);
3266 cm_event.event = IB_CM_REP_ERROR;
3268 case IB_CM_DREQ_SENT:
3269 cm_enter_timewait(cm_id_priv);
3270 cm_event.event = IB_CM_DREQ_ERROR;
3272 case IB_CM_SIDR_REQ_SENT:
3273 cm_id_priv->id.state = IB_CM_IDLE;
3274 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3279 spin_unlock_irq(&cm_id_priv->lock);
3280 cm_event.param.send_status = wc_status;
3282 /* No other events can occur on the cm_id at this point. */
3283 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3286 ib_destroy_cm_id(&cm_id_priv->id);
3289 spin_unlock_irq(&cm_id_priv->lock);
3293 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3294 struct ib_mad_send_wc *mad_send_wc)
3296 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3297 struct cm_port *port;
3300 port = mad_agent->context;
3301 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3302 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3305 * If the send was in response to a received message (context[0] is not
3306 * set to a cm_id), and is not a REJ, then it is a send that was
3309 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3312 atomic_long_add(1 + msg->retries,
3313 &port->counter_group[CM_XMIT].counter[attr_index]);
3315 atomic_long_add(msg->retries,
3316 &port->counter_group[CM_XMIT_RETRIES].
3317 counter[attr_index]);
3319 switch (mad_send_wc->status) {
3321 case IB_WC_WR_FLUSH_ERR:
3325 if (msg->context[0] && msg->context[1])
3326 cm_process_send_error(msg, mad_send_wc->status);
3333 static void cm_work_handler(struct work_struct *_work)
3335 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3338 switch (work->cm_event.event) {
3339 case IB_CM_REQ_RECEIVED:
3340 ret = cm_req_handler(work);
3342 case IB_CM_MRA_RECEIVED:
3343 ret = cm_mra_handler(work);
3345 case IB_CM_REJ_RECEIVED:
3346 ret = cm_rej_handler(work);
3348 case IB_CM_REP_RECEIVED:
3349 ret = cm_rep_handler(work);
3351 case IB_CM_RTU_RECEIVED:
3352 ret = cm_rtu_handler(work);
3354 case IB_CM_USER_ESTABLISHED:
3355 ret = cm_establish_handler(work);
3357 case IB_CM_DREQ_RECEIVED:
3358 ret = cm_dreq_handler(work);
3360 case IB_CM_DREP_RECEIVED:
3361 ret = cm_drep_handler(work);
3363 case IB_CM_SIDR_REQ_RECEIVED:
3364 ret = cm_sidr_req_handler(work);
3366 case IB_CM_SIDR_REP_RECEIVED:
3367 ret = cm_sidr_rep_handler(work);
3369 case IB_CM_LAP_RECEIVED:
3370 ret = cm_lap_handler(work);
3372 case IB_CM_APR_RECEIVED:
3373 ret = cm_apr_handler(work);
3375 case IB_CM_TIMEWAIT_EXIT:
3376 ret = cm_timewait_handler(work);
3386 static int cm_establish(struct ib_cm_id *cm_id)
3388 struct cm_id_private *cm_id_priv;
3389 struct cm_work *work;
3390 unsigned long flags;
3392 struct cm_device *cm_dev;
3394 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3398 work = kmalloc(sizeof *work, GFP_ATOMIC);
3402 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3403 spin_lock_irqsave(&cm_id_priv->lock, flags);
3404 switch (cm_id->state)
3406 case IB_CM_REP_SENT:
3407 case IB_CM_MRA_REP_RCVD:
3408 cm_id->state = IB_CM_ESTABLISHED;
3410 case IB_CM_ESTABLISHED:
3417 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3425 * The CM worker thread may try to destroy the cm_id before it
3426 * can execute this work item. To prevent potential deadlock,
3427 * we need to find the cm_id once we're in the context of the
3428 * worker thread, rather than holding a reference on it.
3430 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3431 work->local_id = cm_id->local_id;
3432 work->remote_id = cm_id->remote_id;
3433 work->mad_recv_wc = NULL;
3434 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3436 /* Check if the device started its remove_one */
3437 spin_lock_irq(&cm.lock);
3438 if (!cm_dev->going_down) {
3439 queue_delayed_work(cm.wq, &work->work, 0);
3444 spin_unlock_irq(&cm.lock);
3450 static int cm_migrate(struct ib_cm_id *cm_id)
3452 struct cm_id_private *cm_id_priv;
3453 unsigned long flags;
3456 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3457 spin_lock_irqsave(&cm_id_priv->lock, flags);
3458 if (cm_id->state == IB_CM_ESTABLISHED &&
3459 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3460 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3461 cm_id->lap_state = IB_CM_LAP_IDLE;
3462 cm_id_priv->av = cm_id_priv->alt_av;
3465 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3470 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3475 case IB_EVENT_COMM_EST:
3476 ret = cm_establish(cm_id);
3478 case IB_EVENT_PATH_MIG:
3479 ret = cm_migrate(cm_id);
3486 EXPORT_SYMBOL(ib_cm_notify);
3488 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3489 struct ib_mad_recv_wc *mad_recv_wc)
3491 struct cm_port *port = mad_agent->context;
3492 struct cm_work *work;
3493 enum ib_cm_event_type event;
3498 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3499 case CM_REQ_ATTR_ID:
3500 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3501 alt_local_lid != 0);
3502 event = IB_CM_REQ_RECEIVED;
3504 case CM_MRA_ATTR_ID:
3505 event = IB_CM_MRA_RECEIVED;
3507 case CM_REJ_ATTR_ID:
3508 event = IB_CM_REJ_RECEIVED;
3510 case CM_REP_ATTR_ID:
3511 event = IB_CM_REP_RECEIVED;
3513 case CM_RTU_ATTR_ID:
3514 event = IB_CM_RTU_RECEIVED;
3516 case CM_DREQ_ATTR_ID:
3517 event = IB_CM_DREQ_RECEIVED;
3519 case CM_DREP_ATTR_ID:
3520 event = IB_CM_DREP_RECEIVED;
3522 case CM_SIDR_REQ_ATTR_ID:
3523 event = IB_CM_SIDR_REQ_RECEIVED;
3525 case CM_SIDR_REP_ATTR_ID:
3526 event = IB_CM_SIDR_REP_RECEIVED;
3528 case CM_LAP_ATTR_ID:
3530 event = IB_CM_LAP_RECEIVED;
3532 case CM_APR_ATTR_ID:
3533 event = IB_CM_APR_RECEIVED;
3536 ib_free_recv_mad(mad_recv_wc);
3540 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3541 atomic_long_inc(&port->counter_group[CM_RECV].
3542 counter[attr_id - CM_ATTR_ID_OFFSET]);
3544 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3547 ib_free_recv_mad(mad_recv_wc);
3551 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3552 work->cm_event.event = event;
3553 work->mad_recv_wc = mad_recv_wc;
3556 /* Check if the device started its remove_one */
3557 spin_lock_irq(&cm.lock);
3558 if (!port->cm_dev->going_down)
3559 queue_delayed_work(cm.wq, &work->work, 0);
3562 spin_unlock_irq(&cm.lock);
3566 ib_free_recv_mad(mad_recv_wc);
3570 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3571 struct ib_qp_attr *qp_attr,
3574 unsigned long flags;
3577 spin_lock_irqsave(&cm_id_priv->lock, flags);
3578 switch (cm_id_priv->id.state) {
3579 case IB_CM_REQ_SENT:
3580 case IB_CM_MRA_REQ_RCVD:
3581 case IB_CM_REQ_RCVD:
3582 case IB_CM_MRA_REQ_SENT:
3583 case IB_CM_REP_RCVD:
3584 case IB_CM_MRA_REP_SENT:
3585 case IB_CM_REP_SENT:
3586 case IB_CM_MRA_REP_RCVD:
3587 case IB_CM_ESTABLISHED:
3588 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3589 IB_QP_PKEY_INDEX | IB_QP_PORT;
3590 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3591 if (cm_id_priv->responder_resources)
3592 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3593 IB_ACCESS_REMOTE_ATOMIC;
3594 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3595 qp_attr->port_num = cm_id_priv->av.port->port_num;
3602 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3606 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3607 struct ib_qp_attr *qp_attr,
3610 unsigned long flags;
3613 spin_lock_irqsave(&cm_id_priv->lock, flags);
3614 switch (cm_id_priv->id.state) {
3615 case IB_CM_REQ_RCVD:
3616 case IB_CM_MRA_REQ_SENT:
3617 case IB_CM_REP_RCVD:
3618 case IB_CM_MRA_REP_SENT:
3619 case IB_CM_REP_SENT:
3620 case IB_CM_MRA_REP_RCVD:
3621 case IB_CM_ESTABLISHED:
3622 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3623 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3624 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3625 if (!cm_id_priv->av.valid) {
3626 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3629 if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) {
3630 qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id;
3631 *qp_attr_mask |= IB_QP_VID;
3633 if (!is_zero_ether_addr(cm_id_priv->av.smac)) {
3634 memcpy(qp_attr->smac, cm_id_priv->av.smac,
3635 sizeof(qp_attr->smac));
3636 *qp_attr_mask |= IB_QP_SMAC;
3638 if (cm_id_priv->alt_av.valid) {
3639 if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) {
3640 qp_attr->alt_vlan_id =
3641 cm_id_priv->alt_av.ah_attr.vlan_id;
3642 *qp_attr_mask |= IB_QP_ALT_VID;
3644 if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) {
3645 memcpy(qp_attr->alt_smac,
3646 cm_id_priv->alt_av.smac,
3647 sizeof(qp_attr->alt_smac));
3648 *qp_attr_mask |= IB_QP_ALT_SMAC;
3651 qp_attr->path_mtu = cm_id_priv->path_mtu;
3652 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3653 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3654 if (cm_id_priv->qp_type == IB_QPT_RC ||
3655 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3656 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3657 IB_QP_MIN_RNR_TIMER;
3658 qp_attr->max_dest_rd_atomic =
3659 cm_id_priv->responder_resources;
3660 qp_attr->min_rnr_timer = 0;
3662 if (cm_id_priv->alt_av.ah_attr.dlid) {
3663 *qp_attr_mask |= IB_QP_ALT_PATH;
3664 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3665 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3666 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3667 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3675 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3679 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3680 struct ib_qp_attr *qp_attr,
3683 unsigned long flags;
3686 spin_lock_irqsave(&cm_id_priv->lock, flags);
3687 switch (cm_id_priv->id.state) {
3688 /* Allow transition to RTS before sending REP */
3689 case IB_CM_REQ_RCVD:
3690 case IB_CM_MRA_REQ_SENT:
3692 case IB_CM_REP_RCVD:
3693 case IB_CM_MRA_REP_SENT:
3694 case IB_CM_REP_SENT:
3695 case IB_CM_MRA_REP_RCVD:
3696 case IB_CM_ESTABLISHED:
3697 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3698 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3699 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3700 switch (cm_id_priv->qp_type) {
3702 case IB_QPT_XRC_INI:
3703 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3704 IB_QP_MAX_QP_RD_ATOMIC;
3705 qp_attr->retry_cnt = cm_id_priv->retry_count;
3706 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3707 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3709 case IB_QPT_XRC_TGT:
3710 *qp_attr_mask |= IB_QP_TIMEOUT;
3711 qp_attr->timeout = cm_id_priv->av.timeout;
3716 if (cm_id_priv->alt_av.ah_attr.dlid) {
3717 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3718 qp_attr->path_mig_state = IB_MIG_REARM;
3721 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3722 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3723 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3724 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3725 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3726 qp_attr->path_mig_state = IB_MIG_REARM;
3734 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3738 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3739 struct ib_qp_attr *qp_attr,
3742 struct cm_id_private *cm_id_priv;
3745 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3746 switch (qp_attr->qp_state) {
3748 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3751 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3754 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3762 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3764 static void cm_get_ack_delay(struct cm_device *cm_dev)
3766 struct ib_device_attr attr;
3768 if (ib_query_device(cm_dev->ib_device, &attr))
3769 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3771 cm_dev->ack_delay = attr.local_ca_ack_delay;
3774 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3777 struct cm_counter_group *group;
3778 struct cm_counter_attribute *cm_attr;
3780 group = container_of(obj, struct cm_counter_group, obj);
3781 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3783 return sprintf(buf, "%ld\n",
3784 atomic_long_read(&group->counter[cm_attr->index]));
3787 static const struct sysfs_ops cm_counter_ops = {
3788 .show = cm_show_counter
3791 static struct kobj_type cm_counter_obj_type = {
3792 .sysfs_ops = &cm_counter_ops,
3793 .default_attrs = cm_counter_default_attrs
3796 static void cm_release_port_obj(struct kobject *obj)
3798 struct cm_port *cm_port;
3800 cm_port = container_of(obj, struct cm_port, port_obj);
3804 static struct kobj_type cm_port_obj_type = {
3805 .release = cm_release_port_obj
3808 static char *cm_devnode(struct device *dev, umode_t *mode)
3812 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3815 struct class cm_class = {
3816 .owner = THIS_MODULE,
3817 .name = "infiniband_cm",
3818 .devnode = cm_devnode,
3820 EXPORT_SYMBOL(cm_class);
3822 static int cm_create_port_fs(struct cm_port *port)
3826 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3827 &port->cm_dev->device->kobj,
3828 "%d", port->port_num);
3834 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3835 ret = kobject_init_and_add(&port->counter_group[i].obj,
3836 &cm_counter_obj_type,
3838 "%s", counter_group_names[i]);
3847 kobject_put(&port->counter_group[i].obj);
3848 kobject_put(&port->port_obj);
3853 static void cm_remove_port_fs(struct cm_port *port)
3857 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3858 kobject_put(&port->counter_group[i].obj);
3860 kobject_put(&port->port_obj);
3863 static void cm_add_one(struct ib_device *ib_device)
3865 struct cm_device *cm_dev;
3866 struct cm_port *port;
3867 struct ib_mad_reg_req reg_req = {
3868 .mgmt_class = IB_MGMT_CLASS_CM,
3869 .mgmt_class_version = IB_CM_CLASS_VERSION,
3871 struct ib_port_modify port_modify = {
3872 .set_port_cap_mask = IB_PORT_CM_SUP
3874 unsigned long flags;
3879 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3880 ib_device->phys_port_cnt, GFP_KERNEL);
3884 cm_dev->ib_device = ib_device;
3885 cm_get_ack_delay(cm_dev);
3886 cm_dev->going_down = 0;
3887 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3889 "%s", ib_device->name);
3890 if (IS_ERR(cm_dev->device)) {
3895 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3896 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3897 if (!rdma_cap_ib_cm(ib_device, i))
3900 port = kzalloc(sizeof *port, GFP_KERNEL);
3904 cm_dev->port[i-1] = port;
3905 port->cm_dev = cm_dev;
3908 ret = cm_create_port_fs(port);
3912 port->mad_agent = ib_register_mad_agent(ib_device, i,
3920 if (IS_ERR(port->mad_agent))
3923 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3933 ib_set_client_data(ib_device, &cm_client, cm_dev);
3935 write_lock_irqsave(&cm.device_lock, flags);
3936 list_add_tail(&cm_dev->list, &cm.device_list);
3937 write_unlock_irqrestore(&cm.device_lock, flags);
3941 ib_unregister_mad_agent(port->mad_agent);
3943 cm_remove_port_fs(port);
3945 port_modify.set_port_cap_mask = 0;
3946 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3948 if (!rdma_cap_ib_cm(ib_device, i))
3951 port = cm_dev->port[i-1];
3952 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3953 ib_unregister_mad_agent(port->mad_agent);
3954 cm_remove_port_fs(port);
3957 device_unregister(cm_dev->device);
3961 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
3963 struct cm_device *cm_dev = client_data;
3964 struct cm_port *port;
3965 struct ib_port_modify port_modify = {
3966 .clr_port_cap_mask = IB_PORT_CM_SUP
3968 unsigned long flags;
3974 write_lock_irqsave(&cm.device_lock, flags);
3975 list_del(&cm_dev->list);
3976 write_unlock_irqrestore(&cm.device_lock, flags);
3978 spin_lock_irq(&cm.lock);
3979 cm_dev->going_down = 1;
3980 spin_unlock_irq(&cm.lock);
3982 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3983 if (!rdma_cap_ib_cm(ib_device, i))
3986 port = cm_dev->port[i-1];
3987 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3989 * We flush the queue here after the going_down set, this
3990 * verify that no new works will be queued in the recv handler,
3991 * after that we can call the unregister_mad_agent
3993 flush_workqueue(cm.wq);
3994 ib_unregister_mad_agent(port->mad_agent);
3995 cm_remove_port_fs(port);
3997 device_unregister(cm_dev->device);
4001 static int __init ib_cm_init(void)
4005 memset(&cm, 0, sizeof cm);
4006 INIT_LIST_HEAD(&cm.device_list);
4007 rwlock_init(&cm.device_lock);
4008 spin_lock_init(&cm.lock);
4009 cm.listen_service_table = RB_ROOT;
4010 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4011 cm.remote_id_table = RB_ROOT;
4012 cm.remote_qp_table = RB_ROOT;
4013 cm.remote_sidr_table = RB_ROOT;
4014 idr_init(&cm.local_id_table);
4015 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4016 INIT_LIST_HEAD(&cm.timewait_list);
4018 ret = class_register(&cm_class);
4024 cm.wq = create_workqueue("ib_cm");
4030 ret = ib_register_client(&cm_client);
4036 destroy_workqueue(cm.wq);
4038 class_unregister(&cm_class);
4040 idr_destroy(&cm.local_id_table);
4044 static void __exit ib_cm_cleanup(void)
4046 struct cm_timewait_info *timewait_info, *tmp;
4048 spin_lock_irq(&cm.lock);
4049 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4050 cancel_delayed_work(&timewait_info->work.work);
4051 spin_unlock_irq(&cm.lock);
4053 ib_unregister_client(&cm_client);
4054 destroy_workqueue(cm.wq);
4056 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4057 list_del(&timewait_info->list);
4058 kfree(timewait_info);
4061 class_unregister(&cm_class);
4062 idr_destroy(&cm.local_id_table);
4065 module_init(ib_cm_init);
4066 module_exit(ib_cm_cleanup);