2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/idr.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
69 struct ib_mad_agent *agent;
70 struct ib_sa_sm_ah *sm_ah;
71 struct work_struct update_task;
77 int start_port, end_port;
78 struct ib_event_handler event_handler;
79 struct ib_sa_port port[0];
83 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
84 void (*release)(struct ib_sa_query *);
85 struct ib_sa_client *client;
86 struct ib_sa_port *port;
87 struct ib_mad_send_buf *mad_buf;
88 struct ib_sa_sm_ah *sm_ah;
91 struct list_head list; /* Local svc request list */
92 u32 seq; /* Local svc request sequence number */
93 unsigned long timeout; /* Local svc timeout */
94 u8 path_use; /* How will the pathrecord be used */
97 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
98 #define IB_SA_CANCEL 0x00000002
100 struct ib_sa_service_query {
101 void (*callback)(int, struct ib_sa_service_rec *, void *);
103 struct ib_sa_query sa_query;
106 struct ib_sa_path_query {
107 void (*callback)(int, struct ib_sa_path_rec *, void *);
109 struct ib_sa_query sa_query;
112 struct ib_sa_guidinfo_query {
113 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
115 struct ib_sa_query sa_query;
118 struct ib_sa_mcmember_query {
119 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
121 struct ib_sa_query sa_query;
124 static LIST_HEAD(ib_nl_request_list);
125 static DEFINE_SPINLOCK(ib_nl_request_lock);
126 static atomic_t ib_nl_sa_request_seq;
127 static struct workqueue_struct *ib_nl_wq;
128 static struct delayed_work ib_nl_timed_work;
129 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
130 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
131 .len = sizeof(struct ib_path_rec_data)},
132 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
133 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
134 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
135 .len = sizeof(struct rdma_nla_ls_gid)},
136 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
137 .len = sizeof(struct rdma_nla_ls_gid)},
138 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
139 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
140 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
144 static void ib_sa_add_one(struct ib_device *device);
145 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
147 static struct ib_client sa_client = {
149 .add = ib_sa_add_one,
150 .remove = ib_sa_remove_one
153 static DEFINE_SPINLOCK(idr_lock);
154 static DEFINE_IDR(query_idr);
156 static DEFINE_SPINLOCK(tid_lock);
159 #define PATH_REC_FIELD(field) \
160 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
161 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
162 .field_name = "sa_path_rec:" #field
164 static const struct ib_field path_rec_table[] = {
165 { PATH_REC_FIELD(service_id),
169 { PATH_REC_FIELD(dgid),
173 { PATH_REC_FIELD(sgid),
177 { PATH_REC_FIELD(dlid),
181 { PATH_REC_FIELD(slid),
185 { PATH_REC_FIELD(raw_traffic),
193 { PATH_REC_FIELD(flow_label),
197 { PATH_REC_FIELD(hop_limit),
201 { PATH_REC_FIELD(traffic_class),
205 { PATH_REC_FIELD(reversible),
209 { PATH_REC_FIELD(numb_path),
213 { PATH_REC_FIELD(pkey),
217 { PATH_REC_FIELD(qos_class),
221 { PATH_REC_FIELD(sl),
225 { PATH_REC_FIELD(mtu_selector),
229 { PATH_REC_FIELD(mtu),
233 { PATH_REC_FIELD(rate_selector),
237 { PATH_REC_FIELD(rate),
241 { PATH_REC_FIELD(packet_life_time_selector),
245 { PATH_REC_FIELD(packet_life_time),
249 { PATH_REC_FIELD(preference),
259 #define MCMEMBER_REC_FIELD(field) \
260 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
261 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
262 .field_name = "sa_mcmember_rec:" #field
264 static const struct ib_field mcmember_rec_table[] = {
265 { MCMEMBER_REC_FIELD(mgid),
269 { MCMEMBER_REC_FIELD(port_gid),
273 { MCMEMBER_REC_FIELD(qkey),
277 { MCMEMBER_REC_FIELD(mlid),
281 { MCMEMBER_REC_FIELD(mtu_selector),
285 { MCMEMBER_REC_FIELD(mtu),
289 { MCMEMBER_REC_FIELD(traffic_class),
293 { MCMEMBER_REC_FIELD(pkey),
297 { MCMEMBER_REC_FIELD(rate_selector),
301 { MCMEMBER_REC_FIELD(rate),
305 { MCMEMBER_REC_FIELD(packet_life_time_selector),
309 { MCMEMBER_REC_FIELD(packet_life_time),
313 { MCMEMBER_REC_FIELD(sl),
317 { MCMEMBER_REC_FIELD(flow_label),
321 { MCMEMBER_REC_FIELD(hop_limit),
325 { MCMEMBER_REC_FIELD(scope),
329 { MCMEMBER_REC_FIELD(join_state),
333 { MCMEMBER_REC_FIELD(proxy_join),
343 #define SERVICE_REC_FIELD(field) \
344 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
345 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
346 .field_name = "sa_service_rec:" #field
348 static const struct ib_field service_rec_table[] = {
349 { SERVICE_REC_FIELD(id),
353 { SERVICE_REC_FIELD(gid),
357 { SERVICE_REC_FIELD(pkey),
361 { SERVICE_REC_FIELD(lease),
365 { SERVICE_REC_FIELD(key),
369 { SERVICE_REC_FIELD(name),
373 { SERVICE_REC_FIELD(data8),
377 { SERVICE_REC_FIELD(data16),
381 { SERVICE_REC_FIELD(data32),
385 { SERVICE_REC_FIELD(data64),
391 #define GUIDINFO_REC_FIELD(field) \
392 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
393 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
394 .field_name = "sa_guidinfo_rec:" #field
396 static const struct ib_field guidinfo_rec_table[] = {
397 { GUIDINFO_REC_FIELD(lid),
401 { GUIDINFO_REC_FIELD(block_num),
405 { GUIDINFO_REC_FIELD(res1),
409 { GUIDINFO_REC_FIELD(res2),
413 { GUIDINFO_REC_FIELD(guid_info_list),
419 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
421 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
424 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
426 return (query->flags & IB_SA_CANCEL);
429 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
430 struct ib_sa_query *query)
432 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
433 struct ib_sa_mad *mad = query->mad_buf->mad;
434 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
437 struct rdma_ls_resolve_header *header;
439 query->mad_buf->context[1] = NULL;
441 /* Construct the family header first */
442 header = (struct rdma_ls_resolve_header *)
443 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
444 memcpy(header->device_name, query->port->agent->device->name,
446 header->port_num = query->port->port_num;
448 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
449 sa_rec->reversible != 0)
450 query->path_use = LS_RESOLVE_PATH_USE_GMP;
452 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
453 header->path_use = query->path_use;
455 /* Now build the attributes */
456 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
457 val64 = be64_to_cpu(sa_rec->service_id);
458 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
459 sizeof(val64), &val64);
461 if (comp_mask & IB_SA_PATH_REC_DGID)
462 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
463 sizeof(sa_rec->dgid), &sa_rec->dgid);
464 if (comp_mask & IB_SA_PATH_REC_SGID)
465 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
466 sizeof(sa_rec->sgid), &sa_rec->sgid);
467 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
468 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
469 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
471 if (comp_mask & IB_SA_PATH_REC_PKEY) {
472 val16 = be16_to_cpu(sa_rec->pkey);
473 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
474 sizeof(val16), &val16);
476 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
477 val16 = be16_to_cpu(sa_rec->qos_class);
478 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
479 sizeof(val16), &val16);
483 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
487 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
488 len += nla_total_size(sizeof(u64));
489 if (comp_mask & IB_SA_PATH_REC_DGID)
490 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
491 if (comp_mask & IB_SA_PATH_REC_SGID)
492 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
493 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
494 len += nla_total_size(sizeof(u8));
495 if (comp_mask & IB_SA_PATH_REC_PKEY)
496 len += nla_total_size(sizeof(u16));
497 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
498 len += nla_total_size(sizeof(u16));
501 * Make sure that at least some of the required comp_mask bits are
504 if (WARN_ON(len == 0))
507 /* Add the family header */
508 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
513 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
515 struct sk_buff *skb = NULL;
516 struct nlmsghdr *nlh;
519 struct ib_sa_mad *mad;
522 mad = query->mad_buf->mad;
523 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
527 skb = nlmsg_new(len, gfp_mask);
531 /* Put nlmsg header only for now */
532 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
533 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
540 ib_nl_set_path_rec_attrs(skb, query);
542 /* Repair the nlmsg header length */
545 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
554 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
560 INIT_LIST_HEAD(&query->list);
561 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
563 /* Put the request on the list first.*/
564 spin_lock_irqsave(&ib_nl_request_lock, flags);
565 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
566 query->timeout = delay + jiffies;
567 list_add_tail(&query->list, &ib_nl_request_list);
568 /* Start the timeout if this is the only request */
569 if (ib_nl_request_list.next == &query->list)
570 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
571 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
573 ret = ib_nl_send_msg(query, gfp_mask);
576 /* Remove the request */
577 spin_lock_irqsave(&ib_nl_request_lock, flags);
578 list_del(&query->list);
579 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
587 static int ib_nl_cancel_request(struct ib_sa_query *query)
590 struct ib_sa_query *wait_query;
593 spin_lock_irqsave(&ib_nl_request_lock, flags);
594 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
595 /* Let the timeout to take care of the callback */
596 if (query == wait_query) {
597 query->flags |= IB_SA_CANCEL;
598 query->timeout = jiffies;
599 list_move(&query->list, &ib_nl_request_list);
601 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
605 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
610 static void send_handler(struct ib_mad_agent *agent,
611 struct ib_mad_send_wc *mad_send_wc);
613 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
614 const struct nlmsghdr *nlh)
616 struct ib_mad_send_wc mad_send_wc;
617 struct ib_sa_mad *mad = NULL;
618 const struct nlattr *head, *curr;
619 struct ib_path_rec_data *rec;
624 if (query->callback) {
625 head = (const struct nlattr *) nlmsg_data(nlh);
626 len = nlmsg_len(nlh);
627 switch (query->path_use) {
628 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
629 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
632 case LS_RESOLVE_PATH_USE_ALL:
633 case LS_RESOLVE_PATH_USE_GMP:
635 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
636 IB_PATH_BIDIRECTIONAL;
639 nla_for_each_attr(curr, head, len, rem) {
640 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
641 rec = nla_data(curr);
643 * Get the first one. In the future, we may
644 * need to get up to 6 pathrecords.
646 if ((rec->flags & mask) == mask) {
647 mad = query->mad_buf->mad;
648 mad->mad_hdr.method |=
650 memcpy(mad->data, rec->path_rec,
651 sizeof(rec->path_rec));
657 query->callback(query, status, mad);
660 mad_send_wc.send_buf = query->mad_buf;
661 mad_send_wc.status = IB_WC_SUCCESS;
662 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
665 static void ib_nl_request_timeout(struct work_struct *work)
668 struct ib_sa_query *query;
670 struct ib_mad_send_wc mad_send_wc;
673 spin_lock_irqsave(&ib_nl_request_lock, flags);
674 while (!list_empty(&ib_nl_request_list)) {
675 query = list_entry(ib_nl_request_list.next,
676 struct ib_sa_query, list);
678 if (time_after(query->timeout, jiffies)) {
679 delay = query->timeout - jiffies;
680 if ((long)delay <= 0)
682 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
686 list_del(&query->list);
687 ib_sa_disable_local_svc(query);
688 /* Hold the lock to protect against query cancellation */
689 if (ib_sa_query_cancelled(query))
692 ret = ib_post_send_mad(query->mad_buf, NULL);
694 mad_send_wc.send_buf = query->mad_buf;
695 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
696 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
697 send_handler(query->port->agent, &mad_send_wc);
698 spin_lock_irqsave(&ib_nl_request_lock, flags);
701 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
704 int ib_nl_handle_set_timeout(struct sk_buff *skb,
705 struct netlink_callback *cb)
707 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
708 int timeout, delta, abs_delta;
709 const struct nlattr *attr;
711 struct ib_sa_query *query;
713 struct nlattr *tb[LS_NLA_TYPE_MAX];
716 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
717 !(NETLINK_CB(skb).sk) ||
718 !netlink_capable(skb, CAP_NET_ADMIN))
721 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
722 nlmsg_len(nlh), ib_nl_policy);
723 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
727 timeout = *(int *) nla_data(attr);
728 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
729 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
730 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
731 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
733 delta = timeout - sa_local_svc_timeout_ms;
740 spin_lock_irqsave(&ib_nl_request_lock, flags);
741 sa_local_svc_timeout_ms = timeout;
742 list_for_each_entry(query, &ib_nl_request_list, list) {
743 if (delta < 0 && abs_delta > query->timeout)
746 query->timeout += delta;
748 /* Get the new delay from the first entry */
750 delay = query->timeout - jiffies;
756 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
757 (unsigned long)delay);
758 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
765 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
767 struct nlattr *tb[LS_NLA_TYPE_MAX];
770 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
773 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
774 nlmsg_len(nlh), ib_nl_policy);
781 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
782 struct netlink_callback *cb)
784 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
786 struct ib_sa_query *query;
787 struct ib_mad_send_buf *send_buf;
788 struct ib_mad_send_wc mad_send_wc;
792 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
793 !(NETLINK_CB(skb).sk) ||
794 !netlink_capable(skb, CAP_NET_ADMIN))
797 spin_lock_irqsave(&ib_nl_request_lock, flags);
798 list_for_each_entry(query, &ib_nl_request_list, list) {
800 * If the query is cancelled, let the timeout routine
803 if (nlh->nlmsg_seq == query->seq) {
804 found = !ib_sa_query_cancelled(query);
806 list_del(&query->list);
812 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
816 send_buf = query->mad_buf;
818 if (!ib_nl_is_good_resolve_resp(nlh)) {
819 /* if the result is a failure, send out the packet via IB */
820 ib_sa_disable_local_svc(query);
821 ret = ib_post_send_mad(query->mad_buf, NULL);
822 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
824 mad_send_wc.send_buf = send_buf;
825 mad_send_wc.status = IB_WC_GENERAL_ERR;
826 send_handler(query->port->agent, &mad_send_wc);
829 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
830 ib_nl_process_good_resolve_rsp(query, nlh);
837 static void free_sm_ah(struct kref *kref)
839 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
841 ib_destroy_ah(sm_ah->ah);
845 static void update_sm_ah(struct work_struct *work)
847 struct ib_sa_port *port =
848 container_of(work, struct ib_sa_port, update_task);
849 struct ib_sa_sm_ah *new_ah;
850 struct ib_port_attr port_attr;
851 struct ib_ah_attr ah_attr;
853 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
854 pr_warn("Couldn't query port\n");
858 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
863 kref_init(&new_ah->ref);
864 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
866 new_ah->pkey_index = 0;
867 if (ib_find_pkey(port->agent->device, port->port_num,
868 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
869 pr_err("Couldn't find index for default PKey\n");
871 memset(&ah_attr, 0, sizeof ah_attr);
872 ah_attr.dlid = port_attr.sm_lid;
873 ah_attr.sl = port_attr.sm_sl;
874 ah_attr.port_num = port->port_num;
875 if (port_attr.grh_required) {
876 ah_attr.ah_flags = IB_AH_GRH;
877 ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
878 ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
881 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
882 if (IS_ERR(new_ah->ah)) {
883 pr_warn("Couldn't create new SM AH\n");
888 spin_lock_irq(&port->ah_lock);
890 kref_put(&port->sm_ah->ref, free_sm_ah);
891 port->sm_ah = new_ah;
892 spin_unlock_irq(&port->ah_lock);
896 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
898 if (event->event == IB_EVENT_PORT_ERR ||
899 event->event == IB_EVENT_PORT_ACTIVE ||
900 event->event == IB_EVENT_LID_CHANGE ||
901 event->event == IB_EVENT_PKEY_CHANGE ||
902 event->event == IB_EVENT_SM_CHANGE ||
903 event->event == IB_EVENT_CLIENT_REREGISTER) {
905 struct ib_sa_device *sa_dev =
906 container_of(handler, typeof(*sa_dev), event_handler);
907 struct ib_sa_port *port =
908 &sa_dev->port[event->element.port_num - sa_dev->start_port];
910 if (!rdma_cap_ib_sa(handler->device, port->port_num))
913 spin_lock_irqsave(&port->ah_lock, flags);
915 kref_put(&port->sm_ah->ref, free_sm_ah);
917 spin_unlock_irqrestore(&port->ah_lock, flags);
919 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
920 sa_dev->start_port].update_task);
924 void ib_sa_register_client(struct ib_sa_client *client)
926 atomic_set(&client->users, 1);
927 init_completion(&client->comp);
929 EXPORT_SYMBOL(ib_sa_register_client);
931 void ib_sa_unregister_client(struct ib_sa_client *client)
933 ib_sa_client_put(client);
934 wait_for_completion(&client->comp);
936 EXPORT_SYMBOL(ib_sa_unregister_client);
939 * ib_sa_cancel_query - try to cancel an SA query
940 * @id:ID of query to cancel
941 * @query:query pointer to cancel
943 * Try to cancel an SA query. If the id and query don't match up or
944 * the query has already completed, nothing is done. Otherwise the
945 * query is canceled and will complete with a status of -EINTR.
947 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
950 struct ib_mad_agent *agent;
951 struct ib_mad_send_buf *mad_buf;
953 spin_lock_irqsave(&idr_lock, flags);
954 if (idr_find(&query_idr, id) != query) {
955 spin_unlock_irqrestore(&idr_lock, flags);
958 agent = query->port->agent;
959 mad_buf = query->mad_buf;
960 spin_unlock_irqrestore(&idr_lock, flags);
963 * If the query is still on the netlink request list, schedule
964 * it to be cancelled by the timeout routine. Otherwise, it has been
965 * sent to the MAD layer and has to be cancelled from there.
967 if (!ib_nl_cancel_request(query))
968 ib_cancel_mad(agent, mad_buf);
970 EXPORT_SYMBOL(ib_sa_cancel_query);
972 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
974 struct ib_sa_device *sa_dev;
975 struct ib_sa_port *port;
979 sa_dev = ib_get_client_data(device, &sa_client);
983 port = &sa_dev->port[port_num - sa_dev->start_port];
984 spin_lock_irqsave(&port->ah_lock, flags);
985 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
986 spin_unlock_irqrestore(&port->ah_lock, flags);
988 return src_path_mask;
991 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
992 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
997 struct net_device *ndev = NULL;
999 memset(ah_attr, 0, sizeof *ah_attr);
1000 ah_attr->dlid = be16_to_cpu(rec->dlid);
1001 ah_attr->sl = rec->sl;
1002 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1003 get_src_path_mask(device, port_num);
1004 ah_attr->port_num = port_num;
1005 ah_attr->static_rate = rec->rate;
1007 use_roce = rdma_cap_eth_ah(device, port_num);
1010 struct net_device *idev;
1011 struct net_device *resolved_dev;
1012 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1013 .net = rec->net ? rec->net :
1016 struct sockaddr _sockaddr;
1017 struct sockaddr_in _sockaddr_in;
1018 struct sockaddr_in6 _sockaddr_in6;
1019 } sgid_addr, dgid_addr;
1021 if (!device->get_netdev)
1024 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
1025 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
1027 /* validate the route */
1028 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
1029 &dgid_addr._sockaddr, &dev_addr);
1033 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1034 dev_addr.network == RDMA_NETWORK_IPV6) &&
1035 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1038 idev = device->get_netdev(device, port_num);
1042 resolved_dev = dev_get_by_index(dev_addr.net,
1043 dev_addr.bound_dev_if);
1044 if (resolved_dev->flags & IFF_LOOPBACK) {
1045 dev_put(resolved_dev);
1046 resolved_dev = idev;
1047 dev_hold(resolved_dev);
1049 ndev = ib_get_ndev_from_path(rec);
1051 if ((ndev && ndev != resolved_dev) ||
1052 (resolved_dev != idev &&
1053 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1054 ret = -EHOSTUNREACH;
1057 dev_put(resolved_dev);
1065 if (rec->hop_limit > 0 || use_roce) {
1066 ah_attr->ah_flags = IB_AH_GRH;
1067 ah_attr->grh.dgid = rec->dgid;
1069 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1070 rec->gid_type, port_num, ndev,
1078 ah_attr->grh.sgid_index = gid_index;
1079 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
1080 ah_attr->grh.hop_limit = rec->hop_limit;
1081 ah_attr->grh.traffic_class = rec->traffic_class;
1087 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
1091 EXPORT_SYMBOL(ib_init_ah_from_path);
1093 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1095 unsigned long flags;
1097 spin_lock_irqsave(&query->port->ah_lock, flags);
1098 if (!query->port->sm_ah) {
1099 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1102 kref_get(&query->port->sm_ah->ref);
1103 query->sm_ah = query->port->sm_ah;
1104 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1106 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1107 query->sm_ah->pkey_index,
1108 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1110 IB_MGMT_BASE_VERSION);
1111 if (IS_ERR(query->mad_buf)) {
1112 kref_put(&query->sm_ah->ref, free_sm_ah);
1116 query->mad_buf->ah = query->sm_ah->ah;
1121 static void free_mad(struct ib_sa_query *query)
1123 ib_free_send_mad(query->mad_buf);
1124 kref_put(&query->sm_ah->ref, free_sm_ah);
1127 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
1129 unsigned long flags;
1131 memset(mad, 0, sizeof *mad);
1133 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1134 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1135 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1137 spin_lock_irqsave(&tid_lock, flags);
1139 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1140 spin_unlock_irqrestore(&tid_lock, flags);
1143 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1145 bool preload = gfpflags_allow_blocking(gfp_mask);
1146 unsigned long flags;
1150 idr_preload(gfp_mask);
1151 spin_lock_irqsave(&idr_lock, flags);
1153 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1155 spin_unlock_irqrestore(&idr_lock, flags);
1161 query->mad_buf->timeout_ms = timeout_ms;
1162 query->mad_buf->context[0] = query;
1165 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1166 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1167 if (!ib_nl_make_request(query, gfp_mask))
1170 ib_sa_disable_local_svc(query);
1173 ret = ib_post_send_mad(query->mad_buf, NULL);
1175 spin_lock_irqsave(&idr_lock, flags);
1176 idr_remove(&query_idr, id);
1177 spin_unlock_irqrestore(&idr_lock, flags);
1181 * It's not safe to dereference query any more, because the
1182 * send may already have completed and freed the query in
1185 return ret ? ret : id;
1188 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1190 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1192 EXPORT_SYMBOL(ib_sa_unpack_path);
1194 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1196 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1198 EXPORT_SYMBOL(ib_sa_pack_path);
1200 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1202 struct ib_sa_mad *mad)
1204 struct ib_sa_path_query *query =
1205 container_of(sa_query, struct ib_sa_path_query, sa_query);
1208 struct ib_sa_path_rec rec;
1210 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1214 rec.gid_type = IB_GID_TYPE_IB;
1215 eth_zero_addr(rec.dmac);
1216 query->callback(status, &rec, query->context);
1218 query->callback(status, NULL, query->context);
1221 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1223 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1227 * ib_sa_path_rec_get - Start a Path get query
1229 * @device:device to send query on
1230 * @port_num: port number to send query on
1231 * @rec:Path Record to send in query
1232 * @comp_mask:component mask to send in query
1233 * @timeout_ms:time to wait for response
1234 * @gfp_mask:GFP mask to use for internal allocations
1235 * @callback:function called when query completes, times out or is
1237 * @context:opaque user context passed to callback
1238 * @sa_query:query context, used to cancel query
1240 * Send a Path Record Get query to the SA to look up a path. The
1241 * callback function will be called when the query completes (or
1242 * fails); status is 0 for a successful response, -EINTR if the query
1243 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1244 * occurred sending the query. The resp parameter of the callback is
1245 * only valid if status is 0.
1247 * If the return value of ib_sa_path_rec_get() is negative, it is an
1248 * error code. Otherwise it is a query ID that can be used to cancel
1251 int ib_sa_path_rec_get(struct ib_sa_client *client,
1252 struct ib_device *device, u8 port_num,
1253 struct ib_sa_path_rec *rec,
1254 ib_sa_comp_mask comp_mask,
1255 int timeout_ms, gfp_t gfp_mask,
1256 void (*callback)(int status,
1257 struct ib_sa_path_rec *resp,
1260 struct ib_sa_query **sa_query)
1262 struct ib_sa_path_query *query;
1263 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1264 struct ib_sa_port *port;
1265 struct ib_mad_agent *agent;
1266 struct ib_sa_mad *mad;
1272 port = &sa_dev->port[port_num - sa_dev->start_port];
1273 agent = port->agent;
1275 query = kzalloc(sizeof(*query), gfp_mask);
1279 query->sa_query.port = port;
1280 ret = alloc_mad(&query->sa_query, gfp_mask);
1284 ib_sa_client_get(client);
1285 query->sa_query.client = client;
1286 query->callback = callback;
1287 query->context = context;
1289 mad = query->sa_query.mad_buf->mad;
1290 init_mad(mad, agent);
1292 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1293 query->sa_query.release = ib_sa_path_rec_release;
1294 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1295 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1296 mad->sa_hdr.comp_mask = comp_mask;
1298 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
1300 *sa_query = &query->sa_query;
1302 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1303 query->sa_query.mad_buf->context[1] = rec;
1305 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1313 ib_sa_client_put(query->sa_query.client);
1314 free_mad(&query->sa_query);
1320 EXPORT_SYMBOL(ib_sa_path_rec_get);
1322 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1324 struct ib_sa_mad *mad)
1326 struct ib_sa_service_query *query =
1327 container_of(sa_query, struct ib_sa_service_query, sa_query);
1330 struct ib_sa_service_rec rec;
1332 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1334 query->callback(status, &rec, query->context);
1336 query->callback(status, NULL, query->context);
1339 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1341 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1345 * ib_sa_service_rec_query - Start Service Record operation
1347 * @device:device to send request on
1348 * @port_num: port number to send request on
1349 * @method:SA method - should be get, set, or delete
1350 * @rec:Service Record to send in request
1351 * @comp_mask:component mask to send in request
1352 * @timeout_ms:time to wait for response
1353 * @gfp_mask:GFP mask to use for internal allocations
1354 * @callback:function called when request completes, times out or is
1356 * @context:opaque user context passed to callback
1357 * @sa_query:request context, used to cancel request
1359 * Send a Service Record set/get/delete to the SA to register,
1360 * unregister or query a service record.
1361 * The callback function will be called when the request completes (or
1362 * fails); status is 0 for a successful response, -EINTR if the query
1363 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1364 * occurred sending the query. The resp parameter of the callback is
1365 * only valid if status is 0.
1367 * If the return value of ib_sa_service_rec_query() is negative, it is an
1368 * error code. Otherwise it is a request ID that can be used to cancel
1371 int ib_sa_service_rec_query(struct ib_sa_client *client,
1372 struct ib_device *device, u8 port_num, u8 method,
1373 struct ib_sa_service_rec *rec,
1374 ib_sa_comp_mask comp_mask,
1375 int timeout_ms, gfp_t gfp_mask,
1376 void (*callback)(int status,
1377 struct ib_sa_service_rec *resp,
1380 struct ib_sa_query **sa_query)
1382 struct ib_sa_service_query *query;
1383 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1384 struct ib_sa_port *port;
1385 struct ib_mad_agent *agent;
1386 struct ib_sa_mad *mad;
1392 port = &sa_dev->port[port_num - sa_dev->start_port];
1393 agent = port->agent;
1395 if (method != IB_MGMT_METHOD_GET &&
1396 method != IB_MGMT_METHOD_SET &&
1397 method != IB_SA_METHOD_DELETE)
1400 query = kzalloc(sizeof(*query), gfp_mask);
1404 query->sa_query.port = port;
1405 ret = alloc_mad(&query->sa_query, gfp_mask);
1409 ib_sa_client_get(client);
1410 query->sa_query.client = client;
1411 query->callback = callback;
1412 query->context = context;
1414 mad = query->sa_query.mad_buf->mad;
1415 init_mad(mad, agent);
1417 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1418 query->sa_query.release = ib_sa_service_rec_release;
1419 mad->mad_hdr.method = method;
1420 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1421 mad->sa_hdr.comp_mask = comp_mask;
1423 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1426 *sa_query = &query->sa_query;
1428 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1436 ib_sa_client_put(query->sa_query.client);
1437 free_mad(&query->sa_query);
1443 EXPORT_SYMBOL(ib_sa_service_rec_query);
1445 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1447 struct ib_sa_mad *mad)
1449 struct ib_sa_mcmember_query *query =
1450 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1453 struct ib_sa_mcmember_rec rec;
1455 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1457 query->callback(status, &rec, query->context);
1459 query->callback(status, NULL, query->context);
1462 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1464 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1467 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1468 struct ib_device *device, u8 port_num,
1470 struct ib_sa_mcmember_rec *rec,
1471 ib_sa_comp_mask comp_mask,
1472 int timeout_ms, gfp_t gfp_mask,
1473 void (*callback)(int status,
1474 struct ib_sa_mcmember_rec *resp,
1477 struct ib_sa_query **sa_query)
1479 struct ib_sa_mcmember_query *query;
1480 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1481 struct ib_sa_port *port;
1482 struct ib_mad_agent *agent;
1483 struct ib_sa_mad *mad;
1489 port = &sa_dev->port[port_num - sa_dev->start_port];
1490 agent = port->agent;
1492 query = kzalloc(sizeof(*query), gfp_mask);
1496 query->sa_query.port = port;
1497 ret = alloc_mad(&query->sa_query, gfp_mask);
1501 ib_sa_client_get(client);
1502 query->sa_query.client = client;
1503 query->callback = callback;
1504 query->context = context;
1506 mad = query->sa_query.mad_buf->mad;
1507 init_mad(mad, agent);
1509 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1510 query->sa_query.release = ib_sa_mcmember_rec_release;
1511 mad->mad_hdr.method = method;
1512 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1513 mad->sa_hdr.comp_mask = comp_mask;
1515 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1518 *sa_query = &query->sa_query;
1520 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1528 ib_sa_client_put(query->sa_query.client);
1529 free_mad(&query->sa_query);
1536 /* Support GuidInfoRecord */
1537 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1539 struct ib_sa_mad *mad)
1541 struct ib_sa_guidinfo_query *query =
1542 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1545 struct ib_sa_guidinfo_rec rec;
1547 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1549 query->callback(status, &rec, query->context);
1551 query->callback(status, NULL, query->context);
1554 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1556 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1559 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1560 struct ib_device *device, u8 port_num,
1561 struct ib_sa_guidinfo_rec *rec,
1562 ib_sa_comp_mask comp_mask, u8 method,
1563 int timeout_ms, gfp_t gfp_mask,
1564 void (*callback)(int status,
1565 struct ib_sa_guidinfo_rec *resp,
1568 struct ib_sa_query **sa_query)
1570 struct ib_sa_guidinfo_query *query;
1571 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1572 struct ib_sa_port *port;
1573 struct ib_mad_agent *agent;
1574 struct ib_sa_mad *mad;
1580 if (method != IB_MGMT_METHOD_GET &&
1581 method != IB_MGMT_METHOD_SET &&
1582 method != IB_SA_METHOD_DELETE) {
1586 port = &sa_dev->port[port_num - sa_dev->start_port];
1587 agent = port->agent;
1589 query = kzalloc(sizeof(*query), gfp_mask);
1593 query->sa_query.port = port;
1594 ret = alloc_mad(&query->sa_query, gfp_mask);
1598 ib_sa_client_get(client);
1599 query->sa_query.client = client;
1600 query->callback = callback;
1601 query->context = context;
1603 mad = query->sa_query.mad_buf->mad;
1604 init_mad(mad, agent);
1606 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1607 query->sa_query.release = ib_sa_guidinfo_rec_release;
1609 mad->mad_hdr.method = method;
1610 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1611 mad->sa_hdr.comp_mask = comp_mask;
1613 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1616 *sa_query = &query->sa_query;
1618 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1626 ib_sa_client_put(query->sa_query.client);
1627 free_mad(&query->sa_query);
1633 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1635 static void send_handler(struct ib_mad_agent *agent,
1636 struct ib_mad_send_wc *mad_send_wc)
1638 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1639 unsigned long flags;
1641 if (query->callback)
1642 switch (mad_send_wc->status) {
1644 /* No callback -- already got recv */
1646 case IB_WC_RESP_TIMEOUT_ERR:
1647 query->callback(query, -ETIMEDOUT, NULL);
1649 case IB_WC_WR_FLUSH_ERR:
1650 query->callback(query, -EINTR, NULL);
1653 query->callback(query, -EIO, NULL);
1657 spin_lock_irqsave(&idr_lock, flags);
1658 idr_remove(&query_idr, query->id);
1659 spin_unlock_irqrestore(&idr_lock, flags);
1662 ib_sa_client_put(query->client);
1663 query->release(query);
1666 static void recv_handler(struct ib_mad_agent *mad_agent,
1667 struct ib_mad_send_buf *send_buf,
1668 struct ib_mad_recv_wc *mad_recv_wc)
1670 struct ib_sa_query *query;
1675 query = send_buf->context[0];
1676 if (query->callback) {
1677 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1678 query->callback(query,
1679 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1681 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1683 query->callback(query, -EIO, NULL);
1686 ib_free_recv_mad(mad_recv_wc);
1689 static void ib_sa_add_one(struct ib_device *device)
1691 struct ib_sa_device *sa_dev;
1695 s = rdma_start_port(device);
1696 e = rdma_end_port(device);
1698 sa_dev = kzalloc(sizeof *sa_dev +
1699 (e - s + 1) * sizeof (struct ib_sa_port),
1704 sa_dev->start_port = s;
1705 sa_dev->end_port = e;
1707 for (i = 0; i <= e - s; ++i) {
1708 spin_lock_init(&sa_dev->port[i].ah_lock);
1709 if (!rdma_cap_ib_sa(device, i + 1))
1712 sa_dev->port[i].sm_ah = NULL;
1713 sa_dev->port[i].port_num = i + s;
1715 sa_dev->port[i].agent =
1716 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1717 NULL, 0, send_handler,
1718 recv_handler, sa_dev, 0);
1719 if (IS_ERR(sa_dev->port[i].agent))
1722 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1730 ib_set_client_data(device, &sa_client, sa_dev);
1733 * We register our event handler after everything is set up,
1734 * and then update our cached info after the event handler is
1735 * registered to avoid any problems if a port changes state
1736 * during our initialization.
1739 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1740 if (ib_register_event_handler(&sa_dev->event_handler))
1743 for (i = 0; i <= e - s; ++i) {
1744 if (rdma_cap_ib_sa(device, i + 1))
1745 update_sm_ah(&sa_dev->port[i].update_task);
1752 if (rdma_cap_ib_sa(device, i + 1))
1753 ib_unregister_mad_agent(sa_dev->port[i].agent);
1760 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1762 struct ib_sa_device *sa_dev = client_data;
1768 ib_unregister_event_handler(&sa_dev->event_handler);
1770 flush_workqueue(ib_wq);
1772 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1773 if (rdma_cap_ib_sa(device, i + 1)) {
1774 ib_unregister_mad_agent(sa_dev->port[i].agent);
1775 if (sa_dev->port[i].sm_ah)
1776 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1784 int ib_sa_init(void)
1788 get_random_bytes(&tid, sizeof tid);
1790 atomic_set(&ib_nl_sa_request_seq, 0);
1792 ret = ib_register_client(&sa_client);
1794 pr_err("Couldn't register ib_sa client\n");
1800 pr_err("Couldn't initialize multicast handling\n");
1804 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq");
1810 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
1817 ib_unregister_client(&sa_client);
1822 void ib_sa_cleanup(void)
1824 cancel_delayed_work(&ib_nl_timed_work);
1825 flush_workqueue(ib_nl_wq);
1826 destroy_workqueue(ib_nl_wq);
1828 ib_unregister_client(&sa_client);
1829 idr_destroy(&query_idr);