]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/mlx4/main.c
ASoC: dmaengine: Remove unnecessary snd_pcm_lib_preallocate_free_for_all()
[karo-tx-linux.git] / drivers / infiniband / hw / mlx4 / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <net/ipv6.h>
43 #include <net/addrconf.h>
44
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
48
49 #include <linux/mlx4/driver.h>
50 #include <linux/mlx4/cmd.h>
51 #include <linux/mlx4/qp.h>
52
53 #include "mlx4_ib.h"
54 #include "user.h"
55
56 #define DRV_NAME        MLX4_IB_DRV_NAME
57 #define DRV_VERSION     "2.2-1"
58 #define DRV_RELDATE     "Feb 2014"
59
60 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
62 #define MLX4_IB_CARD_REV_A0   0xA0
63
64 MODULE_AUTHOR("Roland Dreier");
65 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_VERSION(DRV_VERSION);
68
69 int mlx4_ib_sm_guid_assign = 1;
70 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
71 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
72
73 static const char mlx4_ib_version[] =
74         DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
75         DRV_VERSION " (" DRV_RELDATE ")\n";
76
77 struct update_gid_work {
78         struct work_struct      work;
79         union ib_gid            gids[128];
80         struct mlx4_ib_dev     *dev;
81         int                     port;
82 };
83
84 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
85
86 static struct workqueue_struct *wq;
87
88 static void init_query_mad(struct ib_smp *mad)
89 {
90         mad->base_version  = 1;
91         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92         mad->class_version = 1;
93         mad->method        = IB_MGMT_METHOD_GET;
94 }
95
96 static union ib_gid zgid;
97
98 static int check_flow_steering_support(struct mlx4_dev *dev)
99 {
100         int eth_num_ports = 0;
101         int ib_num_ports = 0;
102
103         int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
104
105         if (dmfs) {
106                 int i;
107                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
108                         eth_num_ports++;
109                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
110                         ib_num_ports++;
111                 dmfs &= (!ib_num_ports ||
112                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
113                         (!eth_num_ports ||
114                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
115                 if (ib_num_ports && mlx4_is_mfunc(dev)) {
116                         pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
117                         dmfs = 0;
118                 }
119         }
120         return dmfs;
121 }
122
123 static int num_ib_ports(struct mlx4_dev *dev)
124 {
125         int ib_ports = 0;
126         int i;
127
128         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
129                 ib_ports++;
130
131         return ib_ports;
132 }
133
134 static int mlx4_ib_query_device(struct ib_device *ibdev,
135                                 struct ib_device_attr *props)
136 {
137         struct mlx4_ib_dev *dev = to_mdev(ibdev);
138         struct ib_smp *in_mad  = NULL;
139         struct ib_smp *out_mad = NULL;
140         int err = -ENOMEM;
141         int have_ib_ports;
142
143         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
144         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
145         if (!in_mad || !out_mad)
146                 goto out;
147
148         init_query_mad(in_mad);
149         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
150
151         err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
152                            1, NULL, NULL, in_mad, out_mad);
153         if (err)
154                 goto out;
155
156         memset(props, 0, sizeof *props);
157
158         have_ib_ports = num_ib_ports(dev->dev);
159
160         props->fw_ver = dev->dev->caps.fw_ver;
161         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
162                 IB_DEVICE_PORT_ACTIVE_EVENT             |
163                 IB_DEVICE_SYS_IMAGE_GUID                |
164                 IB_DEVICE_RC_RNR_NAK_GEN                |
165                 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
166         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
167                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
168         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
169                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
170         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
171                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
172         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
173                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
174         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
175                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
176         if (dev->dev->caps.max_gso_sz &&
177             (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
178             (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
179                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
180         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
181                 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
182         if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
183             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
184             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
185                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
186         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
187                 props->device_cap_flags |= IB_DEVICE_XRC;
188         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
189                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
190         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
191                 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
192                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
193                 else
194                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
195         if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
196                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
197         }
198
199         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
200                 0xffffff;
201         props->vendor_part_id      = dev->dev->pdev->device;
202         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
203         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
204
205         props->max_mr_size         = ~0ull;
206         props->page_size_cap       = dev->dev->caps.page_size_cap;
207         props->max_qp              = dev->dev->quotas.qp;
208         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
209         props->max_sge             = min(dev->dev->caps.max_sq_sg,
210                                          dev->dev->caps.max_rq_sg);
211         props->max_cq              = dev->dev->quotas.cq;
212         props->max_cqe             = dev->dev->caps.max_cqes;
213         props->max_mr              = dev->dev->quotas.mpt;
214         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
215         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
216         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
217         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
218         props->max_srq             = dev->dev->quotas.srq;
219         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
220         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
221         props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
222         props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
223         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
224                 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
225         props->masked_atomic_cap   = props->atomic_cap;
226         props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
227         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
228         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
229         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
230                                            props->max_mcast_grp;
231         props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
232
233 out:
234         kfree(in_mad);
235         kfree(out_mad);
236
237         return err;
238 }
239
240 static enum rdma_link_layer
241 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
242 {
243         struct mlx4_dev *dev = to_mdev(device)->dev;
244
245         return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
246                 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
247 }
248
249 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
250                               struct ib_port_attr *props, int netw_view)
251 {
252         struct ib_smp *in_mad  = NULL;
253         struct ib_smp *out_mad = NULL;
254         int ext_active_speed;
255         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
256         int err = -ENOMEM;
257
258         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
259         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
260         if (!in_mad || !out_mad)
261                 goto out;
262
263         init_query_mad(in_mad);
264         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
265         in_mad->attr_mod = cpu_to_be32(port);
266
267         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
268                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
269
270         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
271                                 in_mad, out_mad);
272         if (err)
273                 goto out;
274
275
276         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
277         props->lmc              = out_mad->data[34] & 0x7;
278         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
279         props->sm_sl            = out_mad->data[36] & 0xf;
280         props->state            = out_mad->data[32] & 0xf;
281         props->phys_state       = out_mad->data[33] >> 4;
282         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
283         if (netw_view)
284                 props->gid_tbl_len = out_mad->data[50];
285         else
286                 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
287         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
288         props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
289         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
290         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
291         props->active_width     = out_mad->data[31] & 0xf;
292         props->active_speed     = out_mad->data[35] >> 4;
293         props->max_mtu          = out_mad->data[41] & 0xf;
294         props->active_mtu       = out_mad->data[36] >> 4;
295         props->subnet_timeout   = out_mad->data[51] & 0x1f;
296         props->max_vl_num       = out_mad->data[37] >> 4;
297         props->init_type_reply  = out_mad->data[41] >> 4;
298
299         /* Check if extended speeds (EDR/FDR/...) are supported */
300         if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
301                 ext_active_speed = out_mad->data[62] >> 4;
302
303                 switch (ext_active_speed) {
304                 case 1:
305                         props->active_speed = IB_SPEED_FDR;
306                         break;
307                 case 2:
308                         props->active_speed = IB_SPEED_EDR;
309                         break;
310                 }
311         }
312
313         /* If reported active speed is QDR, check if is FDR-10 */
314         if (props->active_speed == IB_SPEED_QDR) {
315                 init_query_mad(in_mad);
316                 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
317                 in_mad->attr_mod = cpu_to_be32(port);
318
319                 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
320                                    NULL, NULL, in_mad, out_mad);
321                 if (err)
322                         goto out;
323
324                 /* Checking LinkSpeedActive for FDR-10 */
325                 if (out_mad->data[15] & 0x1)
326                         props->active_speed = IB_SPEED_FDR10;
327         }
328
329         /* Avoid wrong speed value returned by FW if the IB link is down. */
330         if (props->state == IB_PORT_DOWN)
331                  props->active_speed = IB_SPEED_SDR;
332
333 out:
334         kfree(in_mad);
335         kfree(out_mad);
336         return err;
337 }
338
339 static u8 state_to_phys_state(enum ib_port_state state)
340 {
341         return state == IB_PORT_ACTIVE ? 5 : 3;
342 }
343
344 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
345                                struct ib_port_attr *props, int netw_view)
346 {
347
348         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
349         struct mlx4_ib_iboe *iboe = &mdev->iboe;
350         struct net_device *ndev;
351         enum ib_mtu tmp;
352         struct mlx4_cmd_mailbox *mailbox;
353         int err = 0;
354
355         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
356         if (IS_ERR(mailbox))
357                 return PTR_ERR(mailbox);
358
359         err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
360                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
361                            MLX4_CMD_WRAPPED);
362         if (err)
363                 goto out;
364
365         props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ?
366                                                 IB_WIDTH_4X : IB_WIDTH_1X;
367         props->active_speed     = IB_SPEED_QDR;
368         props->port_cap_flags   = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
369         props->gid_tbl_len      = mdev->dev->caps.gid_table_len[port];
370         props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
371         props->pkey_tbl_len     = 1;
372         props->max_mtu          = IB_MTU_4096;
373         props->max_vl_num       = 2;
374         props->state            = IB_PORT_DOWN;
375         props->phys_state       = state_to_phys_state(props->state);
376         props->active_mtu       = IB_MTU_256;
377         spin_lock_bh(&iboe->lock);
378         ndev = iboe->netdevs[port - 1];
379         if (!ndev)
380                 goto out_unlock;
381
382         tmp = iboe_get_mtu(ndev->mtu);
383         props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
384
385         props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
386                                         IB_PORT_ACTIVE : IB_PORT_DOWN;
387         props->phys_state       = state_to_phys_state(props->state);
388 out_unlock:
389         spin_unlock_bh(&iboe->lock);
390 out:
391         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
392         return err;
393 }
394
395 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
396                          struct ib_port_attr *props, int netw_view)
397 {
398         int err;
399
400         memset(props, 0, sizeof *props);
401
402         err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
403                 ib_link_query_port(ibdev, port, props, netw_view) :
404                                 eth_link_query_port(ibdev, port, props, netw_view);
405
406         return err;
407 }
408
409 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
410                               struct ib_port_attr *props)
411 {
412         /* returns host view */
413         return __mlx4_ib_query_port(ibdev, port, props, 0);
414 }
415
416 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
417                         union ib_gid *gid, int netw_view)
418 {
419         struct ib_smp *in_mad  = NULL;
420         struct ib_smp *out_mad = NULL;
421         int err = -ENOMEM;
422         struct mlx4_ib_dev *dev = to_mdev(ibdev);
423         int clear = 0;
424         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
425
426         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
427         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
428         if (!in_mad || !out_mad)
429                 goto out;
430
431         init_query_mad(in_mad);
432         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
433         in_mad->attr_mod = cpu_to_be32(port);
434
435         if (mlx4_is_mfunc(dev->dev) && netw_view)
436                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
437
438         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
439         if (err)
440                 goto out;
441
442         memcpy(gid->raw, out_mad->data + 8, 8);
443
444         if (mlx4_is_mfunc(dev->dev) && !netw_view) {
445                 if (index) {
446                         /* For any index > 0, return the null guid */
447                         err = 0;
448                         clear = 1;
449                         goto out;
450                 }
451         }
452
453         init_query_mad(in_mad);
454         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
455         in_mad->attr_mod = cpu_to_be32(index / 8);
456
457         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
458                            NULL, NULL, in_mad, out_mad);
459         if (err)
460                 goto out;
461
462         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
463
464 out:
465         if (clear)
466                 memset(gid->raw + 8, 0, 8);
467         kfree(in_mad);
468         kfree(out_mad);
469         return err;
470 }
471
472 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
473                           union ib_gid *gid)
474 {
475         struct mlx4_ib_dev *dev = to_mdev(ibdev);
476
477         *gid = dev->iboe.gid_table[port - 1][index];
478
479         return 0;
480 }
481
482 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
483                              union ib_gid *gid)
484 {
485         if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
486                 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
487         else
488                 return iboe_query_gid(ibdev, port, index, gid);
489 }
490
491 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
492                          u16 *pkey, int netw_view)
493 {
494         struct ib_smp *in_mad  = NULL;
495         struct ib_smp *out_mad = NULL;
496         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
497         int err = -ENOMEM;
498
499         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
500         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
501         if (!in_mad || !out_mad)
502                 goto out;
503
504         init_query_mad(in_mad);
505         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
506         in_mad->attr_mod = cpu_to_be32(index / 32);
507
508         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
509                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
510
511         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
512                            in_mad, out_mad);
513         if (err)
514                 goto out;
515
516         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
517
518 out:
519         kfree(in_mad);
520         kfree(out_mad);
521         return err;
522 }
523
524 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
525 {
526         return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
527 }
528
529 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
530                                  struct ib_device_modify *props)
531 {
532         struct mlx4_cmd_mailbox *mailbox;
533         unsigned long flags;
534
535         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
536                 return -EOPNOTSUPP;
537
538         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
539                 return 0;
540
541         if (mlx4_is_slave(to_mdev(ibdev)->dev))
542                 return -EOPNOTSUPP;
543
544         spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
545         memcpy(ibdev->node_desc, props->node_desc, 64);
546         spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
547
548         /*
549          * If possible, pass node desc to FW, so it can generate
550          * a 144 trap.  If cmd fails, just ignore.
551          */
552         mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
553         if (IS_ERR(mailbox))
554                 return 0;
555
556         memcpy(mailbox->buf, props->node_desc, 64);
557         mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
558                  MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
559
560         mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
561
562         return 0;
563 }
564
565 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
566                             u32 cap_mask)
567 {
568         struct mlx4_cmd_mailbox *mailbox;
569         int err;
570
571         mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
572         if (IS_ERR(mailbox))
573                 return PTR_ERR(mailbox);
574
575         if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
576                 *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
577                 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
578         } else {
579                 ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
580                 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
581         }
582
583         err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
584                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
585
586         mlx4_free_cmd_mailbox(dev->dev, mailbox);
587         return err;
588 }
589
590 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
591                                struct ib_port_modify *props)
592 {
593         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
594         u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
595         struct ib_port_attr attr;
596         u32 cap_mask;
597         int err;
598
599         /* return OK if this is RoCE. CM calls ib_modify_port() regardless
600          * of whether port link layer is ETH or IB. For ETH ports, qkey
601          * violations and port capabilities are not meaningful.
602          */
603         if (is_eth)
604                 return 0;
605
606         mutex_lock(&mdev->cap_mask_mutex);
607
608         err = mlx4_ib_query_port(ibdev, port, &attr);
609         if (err)
610                 goto out;
611
612         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
613                 ~props->clr_port_cap_mask;
614
615         err = mlx4_ib_SET_PORT(mdev, port,
616                                !!(mask & IB_PORT_RESET_QKEY_CNTR),
617                                cap_mask);
618
619 out:
620         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
621         return err;
622 }
623
624 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
625                                                   struct ib_udata *udata)
626 {
627         struct mlx4_ib_dev *dev = to_mdev(ibdev);
628         struct mlx4_ib_ucontext *context;
629         struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
630         struct mlx4_ib_alloc_ucontext_resp resp;
631         int err;
632
633         if (!dev->ib_active)
634                 return ERR_PTR(-EAGAIN);
635
636         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
637                 resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
638                 resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
639                 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
640         } else {
641                 resp.dev_caps         = dev->dev->caps.userspace_caps;
642                 resp.qp_tab_size      = dev->dev->caps.num_qps;
643                 resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
644                 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
645                 resp.cqe_size         = dev->dev->caps.cqe_size;
646         }
647
648         context = kmalloc(sizeof *context, GFP_KERNEL);
649         if (!context)
650                 return ERR_PTR(-ENOMEM);
651
652         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
653         if (err) {
654                 kfree(context);
655                 return ERR_PTR(err);
656         }
657
658         INIT_LIST_HEAD(&context->db_page_list);
659         mutex_init(&context->db_page_mutex);
660
661         if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
662                 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
663         else
664                 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
665
666         if (err) {
667                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
668                 kfree(context);
669                 return ERR_PTR(-EFAULT);
670         }
671
672         return &context->ibucontext;
673 }
674
675 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
676 {
677         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
678
679         mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
680         kfree(context);
681
682         return 0;
683 }
684
685 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
686 {
687         struct mlx4_ib_dev *dev = to_mdev(context->device);
688
689         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
690                 return -EINVAL;
691
692         if (vma->vm_pgoff == 0) {
693                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
694
695                 if (io_remap_pfn_range(vma, vma->vm_start,
696                                        to_mucontext(context)->uar.pfn,
697                                        PAGE_SIZE, vma->vm_page_prot))
698                         return -EAGAIN;
699         } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
700                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
701
702                 if (io_remap_pfn_range(vma, vma->vm_start,
703                                        to_mucontext(context)->uar.pfn +
704                                        dev->dev->caps.num_uars,
705                                        PAGE_SIZE, vma->vm_page_prot))
706                         return -EAGAIN;
707         } else
708                 return -EINVAL;
709
710         return 0;
711 }
712
713 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
714                                       struct ib_ucontext *context,
715                                       struct ib_udata *udata)
716 {
717         struct mlx4_ib_pd *pd;
718         int err;
719
720         pd = kmalloc(sizeof *pd, GFP_KERNEL);
721         if (!pd)
722                 return ERR_PTR(-ENOMEM);
723
724         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
725         if (err) {
726                 kfree(pd);
727                 return ERR_PTR(err);
728         }
729
730         if (context)
731                 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
732                         mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
733                         kfree(pd);
734                         return ERR_PTR(-EFAULT);
735                 }
736
737         return &pd->ibpd;
738 }
739
740 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
741 {
742         mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
743         kfree(pd);
744
745         return 0;
746 }
747
748 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
749                                           struct ib_ucontext *context,
750                                           struct ib_udata *udata)
751 {
752         struct mlx4_ib_xrcd *xrcd;
753         int err;
754
755         if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
756                 return ERR_PTR(-ENOSYS);
757
758         xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
759         if (!xrcd)
760                 return ERR_PTR(-ENOMEM);
761
762         err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
763         if (err)
764                 goto err1;
765
766         xrcd->pd = ib_alloc_pd(ibdev);
767         if (IS_ERR(xrcd->pd)) {
768                 err = PTR_ERR(xrcd->pd);
769                 goto err2;
770         }
771
772         xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
773         if (IS_ERR(xrcd->cq)) {
774                 err = PTR_ERR(xrcd->cq);
775                 goto err3;
776         }
777
778         return &xrcd->ibxrcd;
779
780 err3:
781         ib_dealloc_pd(xrcd->pd);
782 err2:
783         mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
784 err1:
785         kfree(xrcd);
786         return ERR_PTR(err);
787 }
788
789 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
790 {
791         ib_destroy_cq(to_mxrcd(xrcd)->cq);
792         ib_dealloc_pd(to_mxrcd(xrcd)->pd);
793         mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
794         kfree(xrcd);
795
796         return 0;
797 }
798
799 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
800 {
801         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
802         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
803         struct mlx4_ib_gid_entry *ge;
804
805         ge = kzalloc(sizeof *ge, GFP_KERNEL);
806         if (!ge)
807                 return -ENOMEM;
808
809         ge->gid = *gid;
810         if (mlx4_ib_add_mc(mdev, mqp, gid)) {
811                 ge->port = mqp->port;
812                 ge->added = 1;
813         }
814
815         mutex_lock(&mqp->mutex);
816         list_add_tail(&ge->list, &mqp->gid_list);
817         mutex_unlock(&mqp->mutex);
818
819         return 0;
820 }
821
822 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
823                    union ib_gid *gid)
824 {
825         struct net_device *ndev;
826         int ret = 0;
827
828         if (!mqp->port)
829                 return 0;
830
831         spin_lock_bh(&mdev->iboe.lock);
832         ndev = mdev->iboe.netdevs[mqp->port - 1];
833         if (ndev)
834                 dev_hold(ndev);
835         spin_unlock_bh(&mdev->iboe.lock);
836
837         if (ndev) {
838                 ret = 1;
839                 dev_put(ndev);
840         }
841
842         return ret;
843 }
844
845 struct mlx4_ib_steering {
846         struct list_head list;
847         u64 reg_id;
848         union ib_gid gid;
849 };
850
851 static int parse_flow_attr(struct mlx4_dev *dev,
852                            u32 qp_num,
853                            union ib_flow_spec *ib_spec,
854                            struct _rule_hw *mlx4_spec)
855 {
856         enum mlx4_net_trans_rule_id type;
857
858         switch (ib_spec->type) {
859         case IB_FLOW_SPEC_ETH:
860                 type = MLX4_NET_TRANS_RULE_ID_ETH;
861                 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
862                        ETH_ALEN);
863                 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
864                        ETH_ALEN);
865                 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
866                 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
867                 break;
868         case IB_FLOW_SPEC_IB:
869                 type = MLX4_NET_TRANS_RULE_ID_IB;
870                 mlx4_spec->ib.l3_qpn =
871                         cpu_to_be32(qp_num);
872                 mlx4_spec->ib.qpn_mask =
873                         cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
874                 break;
875
876
877         case IB_FLOW_SPEC_IPV4:
878                 type = MLX4_NET_TRANS_RULE_ID_IPV4;
879                 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
880                 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
881                 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
882                 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
883                 break;
884
885         case IB_FLOW_SPEC_TCP:
886         case IB_FLOW_SPEC_UDP:
887                 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
888                                         MLX4_NET_TRANS_RULE_ID_TCP :
889                                         MLX4_NET_TRANS_RULE_ID_UDP;
890                 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
891                 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
892                 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
893                 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
894                 break;
895
896         default:
897                 return -EINVAL;
898         }
899         if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
900             mlx4_hw_rule_sz(dev, type) < 0)
901                 return -EINVAL;
902         mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
903         mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
904         return mlx4_hw_rule_sz(dev, type);
905 }
906
907 struct default_rules {
908         __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
909         __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
910         __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
911         __u8  link_layer;
912 };
913 static const struct default_rules default_table[] = {
914         {
915                 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
916                 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
917                 .rules_create_list = {IB_FLOW_SPEC_IB},
918                 .link_layer = IB_LINK_LAYER_INFINIBAND
919         }
920 };
921
922 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
923                                          struct ib_flow_attr *flow_attr)
924 {
925         int i, j, k;
926         void *ib_flow;
927         const struct default_rules *pdefault_rules = default_table;
928         u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
929
930         for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
931                 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
932                 memset(&field_types, 0, sizeof(field_types));
933
934                 if (link_layer != pdefault_rules->link_layer)
935                         continue;
936
937                 ib_flow = flow_attr + 1;
938                 /* we assume the specs are sorted */
939                 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
940                      j < flow_attr->num_of_specs; k++) {
941                         union ib_flow_spec *current_flow =
942                                 (union ib_flow_spec *)ib_flow;
943
944                         /* same layer but different type */
945                         if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
946                              (pdefault_rules->mandatory_fields[k] &
947                               IB_FLOW_SPEC_LAYER_MASK)) &&
948                             (current_flow->type !=
949                              pdefault_rules->mandatory_fields[k]))
950                                 goto out;
951
952                         /* same layer, try match next one */
953                         if (current_flow->type ==
954                             pdefault_rules->mandatory_fields[k]) {
955                                 j++;
956                                 ib_flow +=
957                                         ((union ib_flow_spec *)ib_flow)->size;
958                         }
959                 }
960
961                 ib_flow = flow_attr + 1;
962                 for (j = 0; j < flow_attr->num_of_specs;
963                      j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
964                         for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
965                                 /* same layer and same type */
966                                 if (((union ib_flow_spec *)ib_flow)->type ==
967                                     pdefault_rules->mandatory_not_fields[k])
968                                         goto out;
969
970                 return i;
971         }
972 out:
973         return -1;
974 }
975
976 static int __mlx4_ib_create_default_rules(
977                 struct mlx4_ib_dev *mdev,
978                 struct ib_qp *qp,
979                 const struct default_rules *pdefault_rules,
980                 struct _rule_hw *mlx4_spec) {
981         int size = 0;
982         int i;
983
984         for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
985                 int ret;
986                 union ib_flow_spec ib_spec;
987                 switch (pdefault_rules->rules_create_list[i]) {
988                 case 0:
989                         /* no rule */
990                         continue;
991                 case IB_FLOW_SPEC_IB:
992                         ib_spec.type = IB_FLOW_SPEC_IB;
993                         ib_spec.size = sizeof(struct ib_flow_spec_ib);
994
995                         break;
996                 default:
997                         /* invalid rule */
998                         return -EINVAL;
999                 }
1000                 /* We must put empty rule, qpn is being ignored */
1001                 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1002                                       mlx4_spec);
1003                 if (ret < 0) {
1004                         pr_info("invalid parsing\n");
1005                         return -EINVAL;
1006                 }
1007
1008                 mlx4_spec = (void *)mlx4_spec + ret;
1009                 size += ret;
1010         }
1011         return size;
1012 }
1013
1014 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1015                           int domain,
1016                           enum mlx4_net_trans_promisc_mode flow_type,
1017                           u64 *reg_id)
1018 {
1019         int ret, i;
1020         int size = 0;
1021         void *ib_flow;
1022         struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1023         struct mlx4_cmd_mailbox *mailbox;
1024         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1025         int default_flow;
1026
1027         static const u16 __mlx4_domain[] = {
1028                 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1029                 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1030                 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1031                 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1032         };
1033
1034         if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1035                 pr_err("Invalid priority value %d\n", flow_attr->priority);
1036                 return -EINVAL;
1037         }
1038
1039         if (domain >= IB_FLOW_DOMAIN_NUM) {
1040                 pr_err("Invalid domain value %d\n", domain);
1041                 return -EINVAL;
1042         }
1043
1044         if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1045                 return -EINVAL;
1046
1047         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1048         if (IS_ERR(mailbox))
1049                 return PTR_ERR(mailbox);
1050         ctrl = mailbox->buf;
1051
1052         ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1053                                  flow_attr->priority);
1054         ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1055         ctrl->port = flow_attr->port;
1056         ctrl->qpn = cpu_to_be32(qp->qp_num);
1057
1058         ib_flow = flow_attr + 1;
1059         size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1060         /* Add default flows */
1061         default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1062         if (default_flow >= 0) {
1063                 ret = __mlx4_ib_create_default_rules(
1064                                 mdev, qp, default_table + default_flow,
1065                                 mailbox->buf + size);
1066                 if (ret < 0) {
1067                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1068                         return -EINVAL;
1069                 }
1070                 size += ret;
1071         }
1072         for (i = 0; i < flow_attr->num_of_specs; i++) {
1073                 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1074                                       mailbox->buf + size);
1075                 if (ret < 0) {
1076                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1077                         return -EINVAL;
1078                 }
1079                 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1080                 size += ret;
1081         }
1082
1083         ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1084                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1085                            MLX4_CMD_NATIVE);
1086         if (ret == -ENOMEM)
1087                 pr_err("mcg table is full. Fail to register network rule.\n");
1088         else if (ret == -ENXIO)
1089                 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1090         else if (ret)
1091                 pr_err("Invalid argumant. Fail to register network rule.\n");
1092
1093         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1094         return ret;
1095 }
1096
1097 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1098 {
1099         int err;
1100         err = mlx4_cmd(dev, reg_id, 0, 0,
1101                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1102                        MLX4_CMD_NATIVE);
1103         if (err)
1104                 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1105                        reg_id);
1106         return err;
1107 }
1108
1109 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1110                                     u64 *reg_id)
1111 {
1112         void *ib_flow;
1113         union ib_flow_spec *ib_spec;
1114         struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1115         int err = 0;
1116
1117         if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1118                 return 0; /* do nothing */
1119
1120         ib_flow = flow_attr + 1;
1121         ib_spec = (union ib_flow_spec *)ib_flow;
1122
1123         if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1124                 return 0; /* do nothing */
1125
1126         err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1127                                     flow_attr->port, qp->qp_num,
1128                                     MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1129                                     reg_id);
1130         return err;
1131 }
1132
1133 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1134                                     struct ib_flow_attr *flow_attr,
1135                                     int domain)
1136 {
1137         int err = 0, i = 0;
1138         struct mlx4_ib_flow *mflow;
1139         enum mlx4_net_trans_promisc_mode type[2];
1140
1141         memset(type, 0, sizeof(type));
1142
1143         mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1144         if (!mflow) {
1145                 err = -ENOMEM;
1146                 goto err_free;
1147         }
1148
1149         switch (flow_attr->type) {
1150         case IB_FLOW_ATTR_NORMAL:
1151                 type[0] = MLX4_FS_REGULAR;
1152                 break;
1153
1154         case IB_FLOW_ATTR_ALL_DEFAULT:
1155                 type[0] = MLX4_FS_ALL_DEFAULT;
1156                 break;
1157
1158         case IB_FLOW_ATTR_MC_DEFAULT:
1159                 type[0] = MLX4_FS_MC_DEFAULT;
1160                 break;
1161
1162         case IB_FLOW_ATTR_SNIFFER:
1163                 type[0] = MLX4_FS_UC_SNIFFER;
1164                 type[1] = MLX4_FS_MC_SNIFFER;
1165                 break;
1166
1167         default:
1168                 err = -EINVAL;
1169                 goto err_free;
1170         }
1171
1172         while (i < ARRAY_SIZE(type) && type[i]) {
1173                 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1174                                             &mflow->reg_id[i]);
1175                 if (err)
1176                         goto err_create_flow;
1177                 i++;
1178         }
1179
1180         if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1181                 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1182                 if (err)
1183                         goto err_create_flow;
1184                 i++;
1185         }
1186
1187         return &mflow->ibflow;
1188
1189 err_create_flow:
1190         while (i) {
1191                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
1192                 i--;
1193         }
1194 err_free:
1195         kfree(mflow);
1196         return ERR_PTR(err);
1197 }
1198
1199 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1200 {
1201         int err, ret = 0;
1202         int i = 0;
1203         struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1204         struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1205
1206         while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
1207                 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
1208                 if (err)
1209                         ret = err;
1210                 i++;
1211         }
1212
1213         kfree(mflow);
1214         return ret;
1215 }
1216
1217 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1218 {
1219         int err;
1220         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1221         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1222         u64 reg_id;
1223         struct mlx4_ib_steering *ib_steering = NULL;
1224         enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1225                 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1226
1227         if (mdev->dev->caps.steering_mode ==
1228             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1229                 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1230                 if (!ib_steering)
1231                         return -ENOMEM;
1232         }
1233
1234         err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1235                                     !!(mqp->flags &
1236                                        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1237                                     prot, &reg_id);
1238         if (err)
1239                 goto err_malloc;
1240
1241         err = add_gid_entry(ibqp, gid);
1242         if (err)
1243                 goto err_add;
1244
1245         if (ib_steering) {
1246                 memcpy(ib_steering->gid.raw, gid->raw, 16);
1247                 ib_steering->reg_id = reg_id;
1248                 mutex_lock(&mqp->mutex);
1249                 list_add(&ib_steering->list, &mqp->steering_rules);
1250                 mutex_unlock(&mqp->mutex);
1251         }
1252         return 0;
1253
1254 err_add:
1255         mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1256                               prot, reg_id);
1257 err_malloc:
1258         kfree(ib_steering);
1259
1260         return err;
1261 }
1262
1263 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1264 {
1265         struct mlx4_ib_gid_entry *ge;
1266         struct mlx4_ib_gid_entry *tmp;
1267         struct mlx4_ib_gid_entry *ret = NULL;
1268
1269         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1270                 if (!memcmp(raw, ge->gid.raw, 16)) {
1271                         ret = ge;
1272                         break;
1273                 }
1274         }
1275
1276         return ret;
1277 }
1278
1279 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1280 {
1281         int err;
1282         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1283         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1284         struct net_device *ndev;
1285         struct mlx4_ib_gid_entry *ge;
1286         u64 reg_id = 0;
1287         enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1288                 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1289
1290         if (mdev->dev->caps.steering_mode ==
1291             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1292                 struct mlx4_ib_steering *ib_steering;
1293
1294                 mutex_lock(&mqp->mutex);
1295                 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1296                         if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1297                                 list_del(&ib_steering->list);
1298                                 break;
1299                         }
1300                 }
1301                 mutex_unlock(&mqp->mutex);
1302                 if (&ib_steering->list == &mqp->steering_rules) {
1303                         pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1304                         return -EINVAL;
1305                 }
1306                 reg_id = ib_steering->reg_id;
1307                 kfree(ib_steering);
1308         }
1309
1310         err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1311                                     prot, reg_id);
1312         if (err)
1313                 return err;
1314
1315         mutex_lock(&mqp->mutex);
1316         ge = find_gid_entry(mqp, gid->raw);
1317         if (ge) {
1318                 spin_lock_bh(&mdev->iboe.lock);
1319                 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1320                 if (ndev)
1321                         dev_hold(ndev);
1322                 spin_unlock_bh(&mdev->iboe.lock);
1323                 if (ndev)
1324                         dev_put(ndev);
1325                 list_del(&ge->list);
1326                 kfree(ge);
1327         } else
1328                 pr_warn("could not find mgid entry\n");
1329
1330         mutex_unlock(&mqp->mutex);
1331
1332         return 0;
1333 }
1334
1335 static int init_node_data(struct mlx4_ib_dev *dev)
1336 {
1337         struct ib_smp *in_mad  = NULL;
1338         struct ib_smp *out_mad = NULL;
1339         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1340         int err = -ENOMEM;
1341
1342         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1343         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1344         if (!in_mad || !out_mad)
1345                 goto out;
1346
1347         init_query_mad(in_mad);
1348         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1349         if (mlx4_is_master(dev->dev))
1350                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1351
1352         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1353         if (err)
1354                 goto out;
1355
1356         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1357
1358         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1359
1360         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1361         if (err)
1362                 goto out;
1363
1364         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1365         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1366
1367 out:
1368         kfree(in_mad);
1369         kfree(out_mad);
1370         return err;
1371 }
1372
1373 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1374                         char *buf)
1375 {
1376         struct mlx4_ib_dev *dev =
1377                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1378         return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1379 }
1380
1381 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1382                            char *buf)
1383 {
1384         struct mlx4_ib_dev *dev =
1385                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1386         return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1387                        (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1388                        (int) dev->dev->caps.fw_ver & 0xffff);
1389 }
1390
1391 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1392                         char *buf)
1393 {
1394         struct mlx4_ib_dev *dev =
1395                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1396         return sprintf(buf, "%x\n", dev->dev->rev_id);
1397 }
1398
1399 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1400                           char *buf)
1401 {
1402         struct mlx4_ib_dev *dev =
1403                 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1404         return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1405                        dev->dev->board_id);
1406 }
1407
1408 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1409 static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1410 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1411 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1412
1413 static struct device_attribute *mlx4_class_attributes[] = {
1414         &dev_attr_hw_rev,
1415         &dev_attr_fw_ver,
1416         &dev_attr_hca_type,
1417         &dev_attr_board_id
1418 };
1419
1420 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
1421                                      struct net_device *dev)
1422 {
1423         memcpy(eui, dev->dev_addr, 3);
1424         memcpy(eui + 5, dev->dev_addr + 3, 3);
1425         if (vlan_id < 0x1000) {
1426                 eui[3] = vlan_id >> 8;
1427                 eui[4] = vlan_id & 0xff;
1428         } else {
1429                 eui[3] = 0xff;
1430                 eui[4] = 0xfe;
1431         }
1432         eui[0] ^= 2;
1433 }
1434
1435 static void update_gids_task(struct work_struct *work)
1436 {
1437         struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1438         struct mlx4_cmd_mailbox *mailbox;
1439         union ib_gid *gids;
1440         int err;
1441         struct mlx4_dev *dev = gw->dev->dev;
1442
1443         if (!gw->dev->ib_active)
1444                 return;
1445
1446         mailbox = mlx4_alloc_cmd_mailbox(dev);
1447         if (IS_ERR(mailbox)) {
1448                 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1449                 return;
1450         }
1451
1452         gids = mailbox->buf;
1453         memcpy(gids, gw->gids, sizeof gw->gids);
1454
1455         err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1456                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1457                        MLX4_CMD_WRAPPED);
1458         if (err)
1459                 pr_warn("set port command failed\n");
1460         else
1461                 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1462
1463         mlx4_free_cmd_mailbox(dev, mailbox);
1464         kfree(gw);
1465 }
1466
1467 static void reset_gids_task(struct work_struct *work)
1468 {
1469         struct update_gid_work *gw =
1470                         container_of(work, struct update_gid_work, work);
1471         struct mlx4_cmd_mailbox *mailbox;
1472         union ib_gid *gids;
1473         int err;
1474         struct mlx4_dev *dev = gw->dev->dev;
1475
1476         if (!gw->dev->ib_active)
1477                 return;
1478
1479         mailbox = mlx4_alloc_cmd_mailbox(dev);
1480         if (IS_ERR(mailbox)) {
1481                 pr_warn("reset gid table failed\n");
1482                 goto free;
1483         }
1484
1485         gids = mailbox->buf;
1486         memcpy(gids, gw->gids, sizeof(gw->gids));
1487
1488         if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1489                                     IB_LINK_LAYER_ETHERNET) {
1490                 err = mlx4_cmd(dev, mailbox->dma,
1491                                MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1492                                1, MLX4_CMD_SET_PORT,
1493                                MLX4_CMD_TIME_CLASS_B,
1494                                MLX4_CMD_WRAPPED);
1495                 if (err)
1496                         pr_warn(KERN_WARNING
1497                                 "set port %d command failed\n", gw->port);
1498         }
1499
1500         mlx4_free_cmd_mailbox(dev, mailbox);
1501 free:
1502         kfree(gw);
1503 }
1504
1505 static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1506                             union ib_gid *gid, int clear,
1507                             int default_gid)
1508 {
1509         struct update_gid_work *work;
1510         int i;
1511         int need_update = 0;
1512         int free = -1;
1513         int found = -1;
1514         int max_gids;
1515
1516         if (default_gid) {
1517                 free = 0;
1518         } else {
1519                 max_gids = dev->dev->caps.gid_table_len[port];
1520                 for (i = 1; i < max_gids; ++i) {
1521                         if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1522                                     sizeof(*gid)))
1523                                 found = i;
1524
1525                         if (clear) {
1526                                 if (found >= 0) {
1527                                         need_update = 1;
1528                                         dev->iboe.gid_table[port - 1][found] =
1529                                                 zgid;
1530                                         break;
1531                                 }
1532                         } else {
1533                                 if (found >= 0)
1534                                         break;
1535
1536                                 if (free < 0 &&
1537                                     !memcmp(&dev->iboe.gid_table[port - 1][i],
1538                                             &zgid, sizeof(*gid)))
1539                                         free = i;
1540                         }
1541                 }
1542         }
1543
1544         if (found == -1 && !clear && free >= 0) {
1545                 dev->iboe.gid_table[port - 1][free] = *gid;
1546                 need_update = 1;
1547         }
1548
1549         if (!need_update)
1550                 return 0;
1551
1552         work = kzalloc(sizeof(*work), GFP_ATOMIC);
1553         if (!work)
1554                 return -ENOMEM;
1555
1556         memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1557         INIT_WORK(&work->work, update_gids_task);
1558         work->port = port;
1559         work->dev = dev;
1560         queue_work(wq, &work->work);
1561
1562         return 0;
1563 }
1564
1565 static void mlx4_make_default_gid(struct  net_device *dev, union ib_gid *gid)
1566 {
1567         gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1568         mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
1569 }
1570
1571
1572 static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
1573 {
1574         struct update_gid_work *work;
1575
1576         work = kzalloc(sizeof(*work), GFP_ATOMIC);
1577         if (!work)
1578                 return -ENOMEM;
1579
1580         memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
1581         memset(work->gids, 0, sizeof(work->gids));
1582         INIT_WORK(&work->work, reset_gids_task);
1583         work->dev = dev;
1584         work->port = port;
1585         queue_work(wq, &work->work);
1586         return 0;
1587 }
1588
1589 static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1590                               struct mlx4_ib_dev *ibdev, union ib_gid *gid)
1591 {
1592         struct mlx4_ib_iboe *iboe;
1593         int port = 0;
1594         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1595                                 rdma_vlan_dev_real_dev(event_netdev) :
1596                                 event_netdev;
1597         union ib_gid default_gid;
1598
1599         mlx4_make_default_gid(real_dev, &default_gid);
1600
1601         if (!memcmp(gid, &default_gid, sizeof(*gid)))
1602                 return 0;
1603
1604         if (event != NETDEV_DOWN && event != NETDEV_UP)
1605                 return 0;
1606
1607         if ((real_dev != event_netdev) &&
1608             (event == NETDEV_DOWN) &&
1609             rdma_link_local_addr((struct in6_addr *)gid))
1610                 return 0;
1611
1612         iboe = &ibdev->iboe;
1613         spin_lock_bh(&iboe->lock);
1614
1615         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1616                 if ((netif_is_bond_master(real_dev) &&
1617                      (real_dev == iboe->masters[port - 1])) ||
1618                      (!netif_is_bond_master(real_dev) &&
1619                      (real_dev == iboe->netdevs[port - 1])))
1620                         update_gid_table(ibdev, port, gid,
1621                                          event == NETDEV_DOWN, 0);
1622
1623         spin_unlock_bh(&iboe->lock);
1624         return 0;
1625
1626 }
1627
1628 static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1629                                struct mlx4_ib_dev *ibdev)
1630 {
1631         u8 port = 0;
1632         struct mlx4_ib_iboe *iboe;
1633         struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1634                                 rdma_vlan_dev_real_dev(dev) : dev;
1635
1636         iboe = &ibdev->iboe;
1637
1638         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1639                 if ((netif_is_bond_master(real_dev) &&
1640                      (real_dev == iboe->masters[port - 1])) ||
1641                      (!netif_is_bond_master(real_dev) &&
1642                      (real_dev == iboe->netdevs[port - 1])))
1643                         break;
1644
1645         if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1646                 return 0;
1647         else
1648                 return port;
1649 }
1650
1651 static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1652                                 void *ptr)
1653 {
1654         struct mlx4_ib_dev *ibdev;
1655         struct in_ifaddr *ifa = ptr;
1656         union ib_gid gid;
1657         struct net_device *event_netdev = ifa->ifa_dev->dev;
1658
1659         ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
1660
1661         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1662
1663         mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
1664         return NOTIFY_DONE;
1665 }
1666
1667 #if IS_ENABLED(CONFIG_IPV6)
1668 static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1669                                 void *ptr)
1670 {
1671         struct mlx4_ib_dev *ibdev;
1672         struct inet6_ifaddr *ifa = ptr;
1673         union  ib_gid *gid = (union ib_gid *)&ifa->addr;
1674         struct net_device *event_netdev = ifa->idev->dev;
1675
1676         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
1677
1678         mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
1679         return NOTIFY_DONE;
1680 }
1681 #endif
1682
1683 #define MLX4_IB_INVALID_MAC     ((u64)-1)
1684 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1685                                struct net_device *dev,
1686                                int port)
1687 {
1688         u64 new_smac = 0;
1689         u64 release_mac = MLX4_IB_INVALID_MAC;
1690         struct mlx4_ib_qp *qp;
1691
1692         read_lock(&dev_base_lock);
1693         new_smac = mlx4_mac_to_u64(dev->dev_addr);
1694         read_unlock(&dev_base_lock);
1695
1696         atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
1697
1698         /* no need for update QP1 and mac registration in non-SRIOV */
1699         if (!mlx4_is_mfunc(ibdev->dev))
1700                 return;
1701
1702         mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1703         qp = ibdev->qp1_proxy[port - 1];
1704         if (qp) {
1705                 int new_smac_index;
1706                 u64 old_smac;
1707                 struct mlx4_update_qp_params update_params;
1708
1709                 mutex_lock(&qp->mutex);
1710                 old_smac = qp->pri.smac;
1711                 if (new_smac == old_smac)
1712                         goto unlock;
1713
1714                 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1715
1716                 if (new_smac_index < 0)
1717                         goto unlock;
1718
1719                 update_params.smac_index = new_smac_index;
1720                 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
1721                                    &update_params)) {
1722                         release_mac = new_smac;
1723                         goto unlock;
1724                 }
1725                 /* if old port was zero, no mac was yet registered for this QP */
1726                 if (qp->pri.smac_port)
1727                         release_mac = old_smac;
1728                 qp->pri.smac = new_smac;
1729                 qp->pri.smac_port = port;
1730                 qp->pri.smac_index = new_smac_index;
1731         }
1732
1733 unlock:
1734         if (release_mac != MLX4_IB_INVALID_MAC)
1735                 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1736         if (qp)
1737                 mutex_unlock(&qp->mutex);
1738         mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1739 }
1740
1741 static void mlx4_ib_get_dev_addr(struct net_device *dev,
1742                                  struct mlx4_ib_dev *ibdev, u8 port)
1743 {
1744         struct in_device *in_dev;
1745 #if IS_ENABLED(CONFIG_IPV6)
1746         struct inet6_dev *in6_dev;
1747         union ib_gid  *pgid;
1748         struct inet6_ifaddr *ifp;
1749         union ib_gid default_gid;
1750 #endif
1751         union ib_gid gid;
1752
1753
1754         if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1755                 return;
1756
1757         /* IPv4 gids */
1758         in_dev = in_dev_get(dev);
1759         if (in_dev) {
1760                 for_ifa(in_dev) {
1761                         /*ifa->ifa_address;*/
1762                         ipv6_addr_set_v4mapped(ifa->ifa_address,
1763                                                (struct in6_addr *)&gid);
1764                         update_gid_table(ibdev, port, &gid, 0, 0);
1765                 }
1766                 endfor_ifa(in_dev);
1767                 in_dev_put(in_dev);
1768         }
1769 #if IS_ENABLED(CONFIG_IPV6)
1770         mlx4_make_default_gid(dev, &default_gid);
1771         /* IPv6 gids */
1772         in6_dev = in6_dev_get(dev);
1773         if (in6_dev) {
1774                 read_lock_bh(&in6_dev->lock);
1775                 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1776                         pgid = (union ib_gid *)&ifp->addr;
1777                         if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
1778                                 continue;
1779                         update_gid_table(ibdev, port, pgid, 0, 0);
1780                 }
1781                 read_unlock_bh(&in6_dev->lock);
1782                 in6_dev_put(in6_dev);
1783         }
1784 #endif
1785 }
1786
1787 static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
1788                                  struct  net_device *dev, u8 port)
1789 {
1790         union ib_gid gid;
1791         mlx4_make_default_gid(dev, &gid);
1792         update_gid_table(ibdev, port, &gid, 0, 1);
1793 }
1794
1795 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1796 {
1797         struct  net_device *dev;
1798         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1799         int i;
1800         int err = 0;
1801
1802         for (i = 1; i <= ibdev->num_ports; ++i) {
1803                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
1804                     IB_LINK_LAYER_ETHERNET) {
1805                         err = reset_gid_table(ibdev, i);
1806                         if (err)
1807                                 goto out;
1808                 }
1809         }
1810
1811         read_lock(&dev_base_lock);
1812         spin_lock_bh(&iboe->lock);
1813
1814         for_each_netdev(&init_net, dev) {
1815                 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1816                 /* port will be non-zero only for ETH ports */
1817                 if (port) {
1818                         mlx4_ib_set_default_gid(ibdev, dev, port);
1819                         mlx4_ib_get_dev_addr(dev, ibdev, port);
1820                 }
1821         }
1822
1823         spin_unlock_bh(&iboe->lock);
1824         read_unlock(&dev_base_lock);
1825 out:
1826         return err;
1827 }
1828
1829 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1830                                  struct net_device *dev,
1831                                  unsigned long event)
1832
1833 {
1834         struct mlx4_ib_iboe *iboe;
1835         int update_qps_port = -1;
1836         int port;
1837
1838         iboe = &ibdev->iboe;
1839
1840         spin_lock_bh(&iboe->lock);
1841         mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1842                 enum ib_port_state      port_state = IB_PORT_NOP;
1843                 struct net_device *old_master = iboe->masters[port - 1];
1844                 struct net_device *curr_netdev;
1845                 struct net_device *curr_master;
1846
1847                 iboe->netdevs[port - 1] =
1848                         mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1849                 if (iboe->netdevs[port - 1])
1850                         mlx4_ib_set_default_gid(ibdev,
1851                                                 iboe->netdevs[port - 1], port);
1852                 curr_netdev = iboe->netdevs[port - 1];
1853
1854                 if (iboe->netdevs[port - 1] &&
1855                     netif_is_bond_slave(iboe->netdevs[port - 1])) {
1856                         iboe->masters[port - 1] = netdev_master_upper_dev_get(
1857                                 iboe->netdevs[port - 1]);
1858                 } else {
1859                         iboe->masters[port - 1] = NULL;
1860                 }
1861                 curr_master = iboe->masters[port - 1];
1862
1863                 if (dev == iboe->netdevs[port - 1] &&
1864                     (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1865                      event == NETDEV_UP || event == NETDEV_CHANGE))
1866                         update_qps_port = port;
1867
1868                 if (curr_netdev) {
1869                         port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1870                                                 IB_PORT_ACTIVE : IB_PORT_DOWN;
1871                         mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1872                         if (curr_master) {
1873                                 /* if using bonding/team and a slave port is down, we
1874                                  * don't want the bond IP based gids in the table since
1875                                  * flows that select port by gid may get the down port.
1876                                 */
1877                                 if (port_state == IB_PORT_DOWN) {
1878                                         reset_gid_table(ibdev, port);
1879                                         mlx4_ib_set_default_gid(ibdev,
1880                                                                 curr_netdev,
1881                                                                 port);
1882                                 } else {
1883                                         /* gids from the upper dev (bond/team)
1884                                          * should appear in port's gid table
1885                                         */
1886                                         mlx4_ib_get_dev_addr(curr_master,
1887                                                              ibdev, port);
1888                                 }
1889                         }
1890                         /* if bonding is used it is possible that we add it to
1891                          * masters only after IP address is assigned to the
1892                          * net bonding interface.
1893                         */
1894                         if (curr_master && (old_master != curr_master)) {
1895                                 reset_gid_table(ibdev, port);
1896                                 mlx4_ib_set_default_gid(ibdev,
1897                                                         curr_netdev, port);
1898                                 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1899                         }
1900
1901                         if (!curr_master && (old_master != curr_master)) {
1902                                 reset_gid_table(ibdev, port);
1903                                 mlx4_ib_set_default_gid(ibdev,
1904                                                         curr_netdev, port);
1905                                 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1906                         }
1907                 } else {
1908                         reset_gid_table(ibdev, port);
1909                 }
1910         }
1911
1912         spin_unlock_bh(&iboe->lock);
1913
1914         if (update_qps_port > 0)
1915                 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1916 }
1917
1918 static int mlx4_ib_netdev_event(struct notifier_block *this,
1919                                 unsigned long event, void *ptr)
1920 {
1921         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1922         struct mlx4_ib_dev *ibdev;
1923
1924         if (!net_eq(dev_net(dev), &init_net))
1925                 return NOTIFY_DONE;
1926
1927         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1928         mlx4_ib_scan_netdevs(ibdev, dev, event);
1929
1930         return NOTIFY_DONE;
1931 }
1932
1933 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1934 {
1935         int port;
1936         int slave;
1937         int i;
1938
1939         if (mlx4_is_master(ibdev->dev)) {
1940                 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1941                         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1942                                 for (i = 0;
1943                                      i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1944                                      ++i) {
1945                                         ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1946                                         /* master has the identity virt2phys pkey mapping */
1947                                                 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1948                                                         ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1949                                         mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1950                                                              ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1951                                 }
1952                         }
1953                 }
1954                 /* initialize pkey cache */
1955                 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1956                         for (i = 0;
1957                              i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1958                              ++i)
1959                                 ibdev->pkeys.phys_pkey_cache[port-1][i] =
1960                                         (i) ? 0 : 0xFFFF;
1961                 }
1962         }
1963 }
1964
1965 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1966 {
1967         char name[80];
1968         int eq_per_port = 0;
1969         int added_eqs = 0;
1970         int total_eqs = 0;
1971         int i, j, eq;
1972
1973         /* Legacy mode or comp_pool is not large enough */
1974         if (dev->caps.comp_pool == 0 ||
1975             dev->caps.num_ports > dev->caps.comp_pool)
1976                 return;
1977
1978         eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
1979
1980         /* Init eq table */
1981         added_eqs = 0;
1982         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1983                 added_eqs += eq_per_port;
1984
1985         total_eqs = dev->caps.num_comp_vectors + added_eqs;
1986
1987         ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1988         if (!ibdev->eq_table)
1989                 return;
1990
1991         ibdev->eq_added = added_eqs;
1992
1993         eq = 0;
1994         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1995                 for (j = 0; j < eq_per_port; j++) {
1996                         snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
1997                                  i, j, dev->pdev->bus->name);
1998                         /* Set IRQ for specific name (per ring) */
1999                         if (mlx4_assign_eq(dev, name, NULL,
2000                                            &ibdev->eq_table[eq])) {
2001                                 /* Use legacy (same as mlx4_en driver) */
2002                                 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
2003                                 ibdev->eq_table[eq] =
2004                                         (eq % dev->caps.num_comp_vectors);
2005                         }
2006                         eq++;
2007                 }
2008         }
2009
2010         /* Fill the reset of the vector with legacy EQ */
2011         for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
2012                 ibdev->eq_table[eq++] = i;
2013
2014         /* Advertise the new number of EQs to clients */
2015         ibdev->ib_dev.num_comp_vectors = total_eqs;
2016 }
2017
2018 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2019 {
2020         int i;
2021
2022         /* no additional eqs were added */
2023         if (!ibdev->eq_table)
2024                 return;
2025
2026         /* Reset the advertised EQ number */
2027         ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2028
2029         /* Free only the added eqs */
2030         for (i = 0; i < ibdev->eq_added; i++) {
2031                 /* Don't free legacy eqs if used */
2032                 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
2033                         continue;
2034                 mlx4_release_eq(dev, ibdev->eq_table[i]);
2035         }
2036
2037         kfree(ibdev->eq_table);
2038 }
2039
2040 static void *mlx4_ib_add(struct mlx4_dev *dev)
2041 {
2042         struct mlx4_ib_dev *ibdev;
2043         int num_ports = 0;
2044         int i, j;
2045         int err;
2046         struct mlx4_ib_iboe *iboe;
2047         int ib_num_ports = 0;
2048
2049         pr_info_once("%s", mlx4_ib_version);
2050
2051         num_ports = 0;
2052         mlx4_foreach_ib_transport_port(i, dev)
2053                 num_ports++;
2054
2055         /* No point in registering a device with no ports... */
2056         if (num_ports == 0)
2057                 return NULL;
2058
2059         ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2060         if (!ibdev) {
2061                 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
2062                 return NULL;
2063         }
2064
2065         iboe = &ibdev->iboe;
2066
2067         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2068                 goto err_dealloc;
2069
2070         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2071                 goto err_pd;
2072
2073         ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2074                                  PAGE_SIZE);
2075         if (!ibdev->uar_map)
2076                 goto err_uar;
2077         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2078
2079         ibdev->dev = dev;
2080
2081         strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2082         ibdev->ib_dev.owner             = THIS_MODULE;
2083         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
2084         ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
2085         ibdev->num_ports                = num_ports;
2086         ibdev->ib_dev.phys_port_cnt     = ibdev->num_ports;
2087         ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
2088         ibdev->ib_dev.dma_device        = &dev->pdev->dev;
2089
2090         if (dev->caps.userspace_caps)
2091                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2092         else
2093                 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2094
2095         ibdev->ib_dev.uverbs_cmd_mask   =
2096                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
2097                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
2098                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
2099                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
2100                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
2101                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
2102                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
2103                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
2104                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2105                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
2106                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
2107                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
2108                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
2109                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
2110                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
2111                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
2112                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
2113                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
2114                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
2115                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
2116                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
2117                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
2118                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
2119                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2120
2121         ibdev->ib_dev.query_device      = mlx4_ib_query_device;
2122         ibdev->ib_dev.query_port        = mlx4_ib_query_port;
2123         ibdev->ib_dev.get_link_layer    = mlx4_ib_port_link_layer;
2124         ibdev->ib_dev.query_gid         = mlx4_ib_query_gid;
2125         ibdev->ib_dev.query_pkey        = mlx4_ib_query_pkey;
2126         ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
2127         ibdev->ib_dev.modify_port       = mlx4_ib_modify_port;
2128         ibdev->ib_dev.alloc_ucontext    = mlx4_ib_alloc_ucontext;
2129         ibdev->ib_dev.dealloc_ucontext  = mlx4_ib_dealloc_ucontext;
2130         ibdev->ib_dev.mmap              = mlx4_ib_mmap;
2131         ibdev->ib_dev.alloc_pd          = mlx4_ib_alloc_pd;
2132         ibdev->ib_dev.dealloc_pd        = mlx4_ib_dealloc_pd;
2133         ibdev->ib_dev.create_ah         = mlx4_ib_create_ah;
2134         ibdev->ib_dev.query_ah          = mlx4_ib_query_ah;
2135         ibdev->ib_dev.destroy_ah        = mlx4_ib_destroy_ah;
2136         ibdev->ib_dev.create_srq        = mlx4_ib_create_srq;
2137         ibdev->ib_dev.modify_srq        = mlx4_ib_modify_srq;
2138         ibdev->ib_dev.query_srq         = mlx4_ib_query_srq;
2139         ibdev->ib_dev.destroy_srq       = mlx4_ib_destroy_srq;
2140         ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
2141         ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
2142         ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
2143         ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
2144         ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
2145         ibdev->ib_dev.post_send         = mlx4_ib_post_send;
2146         ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
2147         ibdev->ib_dev.create_cq         = mlx4_ib_create_cq;
2148         ibdev->ib_dev.modify_cq         = mlx4_ib_modify_cq;
2149         ibdev->ib_dev.resize_cq         = mlx4_ib_resize_cq;
2150         ibdev->ib_dev.destroy_cq        = mlx4_ib_destroy_cq;
2151         ibdev->ib_dev.poll_cq           = mlx4_ib_poll_cq;
2152         ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
2153         ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
2154         ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
2155         ibdev->ib_dev.rereg_user_mr     = mlx4_ib_rereg_user_mr;
2156         ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
2157         ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2158         ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2159         ibdev->ib_dev.free_fast_reg_page_list  = mlx4_ib_free_fast_reg_page_list;
2160         ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
2161         ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
2162         ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
2163
2164         if (!mlx4_is_slave(ibdev->dev)) {
2165                 ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
2166                 ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
2167                 ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
2168                 ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
2169         }
2170
2171         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2172             dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2173                 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2174                 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2175                 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2176
2177                 ibdev->ib_dev.uverbs_cmd_mask |=
2178                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2179                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2180         }
2181
2182         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2183                 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2184                 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2185                 ibdev->ib_dev.uverbs_cmd_mask |=
2186                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2187                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2188         }
2189
2190         if (check_flow_steering_support(dev)) {
2191                 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2192                 ibdev->ib_dev.create_flow       = mlx4_ib_create_flow;
2193                 ibdev->ib_dev.destroy_flow      = mlx4_ib_destroy_flow;
2194
2195                 ibdev->ib_dev.uverbs_ex_cmd_mask        |=
2196                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2197                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2198         }
2199
2200         mlx4_ib_alloc_eqs(dev, ibdev);
2201
2202         spin_lock_init(&iboe->lock);
2203
2204         if (init_node_data(ibdev))
2205                 goto err_map;
2206
2207         for (i = 0; i < ibdev->num_ports; ++i) {
2208                 mutex_init(&ibdev->qp1_proxy_lock[i]);
2209                 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2210                                                 IB_LINK_LAYER_ETHERNET) {
2211                         err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
2212                         if (err)
2213                                 ibdev->counters[i] = -1;
2214                 } else {
2215                         ibdev->counters[i] = -1;
2216                 }
2217         }
2218
2219         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2220                 ib_num_ports++;
2221
2222         spin_lock_init(&ibdev->sm_lock);
2223         mutex_init(&ibdev->cap_mask_mutex);
2224
2225         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2226             ib_num_ports) {
2227                 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2228                 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2229                                             MLX4_IB_UC_STEER_QPN_ALIGN,
2230                                             &ibdev->steer_qpn_base, 0);
2231                 if (err)
2232                         goto err_counter;
2233
2234                 ibdev->ib_uc_qpns_bitmap =
2235                         kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2236                                 sizeof(long),
2237                                 GFP_KERNEL);
2238                 if (!ibdev->ib_uc_qpns_bitmap) {
2239                         dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2240                         goto err_steer_qp_release;
2241                 }
2242
2243                 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2244
2245                 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2246                                 dev, ibdev->steer_qpn_base,
2247                                 ibdev->steer_qpn_base +
2248                                 ibdev->steer_qpn_count - 1);
2249                 if (err)
2250                         goto err_steer_free_bitmap;
2251         }
2252
2253         for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2254                 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2255
2256         if (ib_register_device(&ibdev->ib_dev, NULL))
2257                 goto err_steer_free_bitmap;
2258
2259         if (mlx4_ib_mad_init(ibdev))
2260                 goto err_reg;
2261
2262         if (mlx4_ib_init_sriov(ibdev))
2263                 goto err_mad;
2264
2265         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2266                 if (!iboe->nb.notifier_call) {
2267                         iboe->nb.notifier_call = mlx4_ib_netdev_event;
2268                         err = register_netdevice_notifier(&iboe->nb);
2269                         if (err) {
2270                                 iboe->nb.notifier_call = NULL;
2271                                 goto err_notif;
2272                         }
2273                 }
2274                 if (!iboe->nb_inet.notifier_call) {
2275                         iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2276                         err = register_inetaddr_notifier(&iboe->nb_inet);
2277                         if (err) {
2278                                 iboe->nb_inet.notifier_call = NULL;
2279                                 goto err_notif;
2280                         }
2281                 }
2282 #if IS_ENABLED(CONFIG_IPV6)
2283                 if (!iboe->nb_inet6.notifier_call) {
2284                         iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
2285                         err = register_inet6addr_notifier(&iboe->nb_inet6);
2286                         if (err) {
2287                                 iboe->nb_inet6.notifier_call = NULL;
2288                                 goto err_notif;
2289                         }
2290                 }
2291 #endif
2292                 if (mlx4_ib_init_gid_table(ibdev))
2293                         goto err_notif;
2294         }
2295
2296         for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2297                 if (device_create_file(&ibdev->ib_dev.dev,
2298                                        mlx4_class_attributes[j]))
2299                         goto err_notif;
2300         }
2301
2302         ibdev->ib_active = true;
2303
2304         if (mlx4_is_mfunc(ibdev->dev))
2305                 init_pkeys(ibdev);
2306
2307         /* create paravirt contexts for any VFs which are active */
2308         if (mlx4_is_master(ibdev->dev)) {
2309                 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2310                         if (j == mlx4_master_func_num(ibdev->dev))
2311                                 continue;
2312                         if (mlx4_is_slave_active(ibdev->dev, j))
2313                                 do_slave_init(ibdev, j, 1);
2314                 }
2315         }
2316         return ibdev;
2317
2318 err_notif:
2319         if (ibdev->iboe.nb.notifier_call) {
2320                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2321                         pr_warn("failure unregistering notifier\n");
2322                 ibdev->iboe.nb.notifier_call = NULL;
2323         }
2324         if (ibdev->iboe.nb_inet.notifier_call) {
2325                 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2326                         pr_warn("failure unregistering notifier\n");
2327                 ibdev->iboe.nb_inet.notifier_call = NULL;
2328         }
2329 #if IS_ENABLED(CONFIG_IPV6)
2330         if (ibdev->iboe.nb_inet6.notifier_call) {
2331                 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2332                         pr_warn("failure unregistering notifier\n");
2333                 ibdev->iboe.nb_inet6.notifier_call = NULL;
2334         }
2335 #endif
2336         flush_workqueue(wq);
2337
2338         mlx4_ib_close_sriov(ibdev);
2339
2340 err_mad:
2341         mlx4_ib_mad_cleanup(ibdev);
2342
2343 err_reg:
2344         ib_unregister_device(&ibdev->ib_dev);
2345
2346 err_steer_free_bitmap:
2347         kfree(ibdev->ib_uc_qpns_bitmap);
2348
2349 err_steer_qp_release:
2350         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2351                 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2352                                       ibdev->steer_qpn_count);
2353 err_counter:
2354         for (; i; --i)
2355                 if (ibdev->counters[i - 1] != -1)
2356                         mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2357
2358 err_map:
2359         iounmap(ibdev->uar_map);
2360
2361 err_uar:
2362         mlx4_uar_free(dev, &ibdev->priv_uar);
2363
2364 err_pd:
2365         mlx4_pd_free(dev, ibdev->priv_pdn);
2366
2367 err_dealloc:
2368         ib_dealloc_device(&ibdev->ib_dev);
2369
2370         return NULL;
2371 }
2372
2373 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2374 {
2375         int offset;
2376
2377         WARN_ON(!dev->ib_uc_qpns_bitmap);
2378
2379         offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2380                                          dev->steer_qpn_count,
2381                                          get_count_order(count));
2382         if (offset < 0)
2383                 return offset;
2384
2385         *qpn = dev->steer_qpn_base + offset;
2386         return 0;
2387 }
2388
2389 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2390 {
2391         if (!qpn ||
2392             dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2393                 return;
2394
2395         BUG_ON(qpn < dev->steer_qpn_base);
2396
2397         bitmap_release_region(dev->ib_uc_qpns_bitmap,
2398                               qpn - dev->steer_qpn_base,
2399                               get_count_order(count));
2400 }
2401
2402 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2403                          int is_attach)
2404 {
2405         int err;
2406         size_t flow_size;
2407         struct ib_flow_attr *flow = NULL;
2408         struct ib_flow_spec_ib *ib_spec;
2409
2410         if (is_attach) {
2411                 flow_size = sizeof(struct ib_flow_attr) +
2412                             sizeof(struct ib_flow_spec_ib);
2413                 flow = kzalloc(flow_size, GFP_KERNEL);
2414                 if (!flow)
2415                         return -ENOMEM;
2416                 flow->port = mqp->port;
2417                 flow->num_of_specs = 1;
2418                 flow->size = flow_size;
2419                 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2420                 ib_spec->type = IB_FLOW_SPEC_IB;
2421                 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2422                 /* Add an empty rule for IB L2 */
2423                 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2424
2425                 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2426                                             IB_FLOW_DOMAIN_NIC,
2427                                             MLX4_FS_REGULAR,
2428                                             &mqp->reg_id);
2429         } else {
2430                 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2431         }
2432         kfree(flow);
2433         return err;
2434 }
2435
2436 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2437 {
2438         struct mlx4_ib_dev *ibdev = ibdev_ptr;
2439         int p;
2440
2441         ibdev->ib_active = false;
2442         flush_workqueue(wq);
2443
2444         mlx4_ib_close_sriov(ibdev);
2445         mlx4_ib_mad_cleanup(ibdev);
2446         ib_unregister_device(&ibdev->ib_dev);
2447         if (ibdev->iboe.nb.notifier_call) {
2448                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2449                         pr_warn("failure unregistering notifier\n");
2450                 ibdev->iboe.nb.notifier_call = NULL;
2451         }
2452
2453         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2454                 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2455                                       ibdev->steer_qpn_count);
2456                 kfree(ibdev->ib_uc_qpns_bitmap);
2457         }
2458
2459         if (ibdev->iboe.nb_inet.notifier_call) {
2460                 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2461                         pr_warn("failure unregistering notifier\n");
2462                 ibdev->iboe.nb_inet.notifier_call = NULL;
2463         }
2464 #if IS_ENABLED(CONFIG_IPV6)
2465         if (ibdev->iboe.nb_inet6.notifier_call) {
2466                 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2467                         pr_warn("failure unregistering notifier\n");
2468                 ibdev->iboe.nb_inet6.notifier_call = NULL;
2469         }
2470 #endif
2471
2472         iounmap(ibdev->uar_map);
2473         for (p = 0; p < ibdev->num_ports; ++p)
2474                 if (ibdev->counters[p] != -1)
2475                         mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2476         mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2477                 mlx4_CLOSE_PORT(dev, p);
2478
2479         mlx4_ib_free_eqs(dev, ibdev);
2480
2481         mlx4_uar_free(dev, &ibdev->priv_uar);
2482         mlx4_pd_free(dev, ibdev->priv_pdn);
2483         ib_dealloc_device(&ibdev->ib_dev);
2484 }
2485
2486 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2487 {
2488         struct mlx4_ib_demux_work **dm = NULL;
2489         struct mlx4_dev *dev = ibdev->dev;
2490         int i;
2491         unsigned long flags;
2492         struct mlx4_active_ports actv_ports;
2493         unsigned int ports;
2494         unsigned int first_port;
2495
2496         if (!mlx4_is_master(dev))
2497                 return;
2498
2499         actv_ports = mlx4_get_active_ports(dev, slave);
2500         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2501         first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2502
2503         dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2504         if (!dm) {
2505                 pr_err("failed to allocate memory for tunneling qp update\n");
2506                 goto out;
2507         }
2508
2509         for (i = 0; i < ports; i++) {
2510                 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2511                 if (!dm[i]) {
2512                         pr_err("failed to allocate memory for tunneling qp update work struct\n");
2513                         for (i = 0; i < dev->caps.num_ports; i++) {
2514                                 if (dm[i])
2515                                         kfree(dm[i]);
2516                         }
2517                         goto out;
2518                 }
2519         }
2520         /* initialize or tear down tunnel QPs for the slave */
2521         for (i = 0; i < ports; i++) {
2522                 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2523                 dm[i]->port = first_port + i + 1;
2524                 dm[i]->slave = slave;
2525                 dm[i]->do_init = do_init;
2526                 dm[i]->dev = ibdev;
2527                 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2528                 if (!ibdev->sriov.is_going_down)
2529                         queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2530                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2531         }
2532 out:
2533         kfree(dm);
2534         return;
2535 }
2536
2537 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2538                           enum mlx4_dev_event event, unsigned long param)
2539 {
2540         struct ib_event ibev;
2541         struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2542         struct mlx4_eqe *eqe = NULL;
2543         struct ib_event_work *ew;
2544         int p = 0;
2545
2546         if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2547                 eqe = (struct mlx4_eqe *)param;
2548         else
2549                 p = (int) param;
2550
2551         switch (event) {
2552         case MLX4_DEV_EVENT_PORT_UP:
2553                 if (p > ibdev->num_ports)
2554                         return;
2555                 if (mlx4_is_master(dev) &&
2556                     rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2557                         IB_LINK_LAYER_INFINIBAND) {
2558                         mlx4_ib_invalidate_all_guid_record(ibdev, p);
2559                 }
2560                 ibev.event = IB_EVENT_PORT_ACTIVE;
2561                 break;
2562
2563         case MLX4_DEV_EVENT_PORT_DOWN:
2564                 if (p > ibdev->num_ports)
2565                         return;
2566                 ibev.event = IB_EVENT_PORT_ERR;
2567                 break;
2568
2569         case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2570                 ibdev->ib_active = false;
2571                 ibev.event = IB_EVENT_DEVICE_FATAL;
2572                 break;
2573
2574         case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2575                 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2576                 if (!ew) {
2577                         pr_err("failed to allocate memory for events work\n");
2578                         break;
2579                 }
2580
2581                 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2582                 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2583                 ew->ib_dev = ibdev;
2584                 /* need to queue only for port owner, which uses GEN_EQE */
2585                 if (mlx4_is_master(dev))
2586                         queue_work(wq, &ew->work);
2587                 else
2588                         handle_port_mgmt_change_event(&ew->work);
2589                 return;
2590
2591         case MLX4_DEV_EVENT_SLAVE_INIT:
2592                 /* here, p is the slave id */
2593                 do_slave_init(ibdev, p, 1);
2594                 return;
2595
2596         case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2597                 /* here, p is the slave id */
2598                 do_slave_init(ibdev, p, 0);
2599                 return;
2600
2601         default:
2602                 return;
2603         }
2604
2605         ibev.device           = ibdev_ptr;
2606         ibev.element.port_num = (u8) p;
2607
2608         ib_dispatch_event(&ibev);
2609 }
2610
2611 static struct mlx4_interface mlx4_ib_interface = {
2612         .add            = mlx4_ib_add,
2613         .remove         = mlx4_ib_remove,
2614         .event          = mlx4_ib_event,
2615         .protocol       = MLX4_PROT_IB_IPV6
2616 };
2617
2618 static int __init mlx4_ib_init(void)
2619 {
2620         int err;
2621
2622         wq = create_singlethread_workqueue("mlx4_ib");
2623         if (!wq)
2624                 return -ENOMEM;
2625
2626         err = mlx4_ib_mcg_init();
2627         if (err)
2628                 goto clean_wq;
2629
2630         err = mlx4_register_interface(&mlx4_ib_interface);
2631         if (err)
2632                 goto clean_mcg;
2633
2634         return 0;
2635
2636 clean_mcg:
2637         mlx4_ib_mcg_destroy();
2638
2639 clean_wq:
2640         destroy_workqueue(wq);
2641         return err;
2642 }
2643
2644 static void __exit mlx4_ib_cleanup(void)
2645 {
2646         mlx4_unregister_interface(&mlx4_ib_interface);
2647         mlx4_ib_mcg_destroy();
2648         destroy_workqueue(wq);
2649 }
2650
2651 module_init(mlx4_ib_init);
2652 module_exit(mlx4_ib_cleanup);