2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
59 switch (sizeof (dest)) { \
60 case 1: (dest) = *(u8 *) __p; break; \
61 case 2: (dest) = be16_to_cpup(__p); break; \
62 case 4: (dest) = be32_to_cpup(__p); break; \
63 case 8: (dest) = be64_to_cpup(__p); break; \
64 default: __buggy_use_of_MLX4_GET(); \
68 #define MLX4_PUT(dest, source, offset) \
70 void *__d = ((char *) (dest) + (offset)); \
71 switch (sizeof(source)) { \
72 case 1: *(u8 *) __d = (source); break; \
73 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
74 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
75 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
76 default: __buggy_use_of_MLX4_PUT(); \
80 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
82 static const char *fname[] = {
83 [ 0] = "RC transport",
84 [ 1] = "UC transport",
85 [ 2] = "UD transport",
86 [ 3] = "XRC transport",
87 [ 4] = "reliable multicast",
88 [ 5] = "FCoIB support",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers",
98 [18] = "Atomic ops support",
99 [19] = "Raw multicast support",
100 [20] = "Address vector port checking support",
101 [21] = "UD multicast support",
102 [24] = "Demand paging support",
103 [25] = "Router support",
104 [30] = "IBoE support",
105 [32] = "Unicast loopback support",
106 [34] = "FCS header control",
107 [38] = "Wake On LAN support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
114 [59] = "Port management change event support",
115 [61] = "64 byte EQE support",
116 [62] = "64 byte CQE support",
120 mlx4_dbg(dev, "DEV_CAP flags:\n");
121 for (i = 0; i < ARRAY_SIZE(fname); ++i)
122 if (fname[i] && (flags & (1LL << i)))
123 mlx4_dbg(dev, " %s\n", fname[i]);
126 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
128 static const char * const fname[] = {
130 [1] = "RSS Toeplitz Hash Function support",
131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device managed flow steering support",
133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support",
137 [8] = "Dynamic QP updates support",
138 [9] = "Device managed flow steering IPoIB support",
139 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
140 [11] = "MAD DEMUX (Secure-Host) support",
141 [12] = "Large cache line (>64B) CQE stride support",
142 [13] = "Large cache line (>64B) EQE stride support",
143 [14] = "Ethernet protocol control support",
144 [15] = "Ethernet Backplane autoneg support",
145 [16] = "CONFIG DEV support",
146 [17] = "Asymmetric EQs support"
150 for (i = 0; i < ARRAY_SIZE(fname); ++i)
151 if (fname[i] && (flags & (1LL << i)))
152 mlx4_dbg(dev, " %s\n", fname[i]);
155 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
157 struct mlx4_cmd_mailbox *mailbox;
161 #define MOD_STAT_CFG_IN_SIZE 0x100
163 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
164 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
166 mailbox = mlx4_alloc_cmd_mailbox(dev);
168 return PTR_ERR(mailbox);
169 inbox = mailbox->buf;
171 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
172 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
174 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
175 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
177 mlx4_free_cmd_mailbox(dev, mailbox);
181 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
183 struct mlx4_cmd_mailbox *mailbox;
190 #define QUERY_FUNC_BUS_OFFSET 0x00
191 #define QUERY_FUNC_DEVICE_OFFSET 0x01
192 #define QUERY_FUNC_FUNCTION_OFFSET 0x01
193 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
194 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
195 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06
196 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
198 mailbox = mlx4_alloc_cmd_mailbox(dev);
200 return PTR_ERR(mailbox);
201 outbox = mailbox->buf;
205 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
207 MLX4_CMD_TIME_CLASS_A,
212 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
213 func->bus = field & 0xf;
214 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
215 func->device = field & 0xf1;
216 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
217 func->function = field & 0x7;
218 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
219 func->physical_function = field & 0xf;
220 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
221 func->rsvd_eqs = field16 & 0xffff;
222 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
223 func->max_eq = field16 & 0xffff;
224 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
225 func->rsvd_uars = field & 0x0f;
227 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
228 func->bus, func->device, func->function, func->physical_function,
229 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
232 mlx4_free_cmd_mailbox(dev, mailbox);
236 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
237 struct mlx4_vhcr *vhcr,
238 struct mlx4_cmd_mailbox *inbox,
239 struct mlx4_cmd_mailbox *outbox,
240 struct mlx4_cmd_info *cmd)
242 struct mlx4_priv *priv = mlx4_priv(dev);
244 u32 size, proxy_qp, qkey;
246 struct mlx4_func func;
248 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
249 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
250 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
251 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
252 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
253 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
254 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
255 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
256 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
257 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
258 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
259 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
261 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
262 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
263 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
264 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
265 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
266 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
268 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
269 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
270 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
271 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
273 /* when opcode modifier = 1 */
274 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
275 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
276 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
277 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
279 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
280 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
281 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
282 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
283 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
285 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
286 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
287 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
288 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
290 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
291 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
293 if (vhcr->op_modifier == 1) {
294 struct mlx4_active_ports actv_ports =
295 mlx4_get_active_ports(dev, slave);
296 int converted_port = mlx4_slave_convert_port(
297 dev, slave, vhcr->in_modifier);
299 if (converted_port < 0)
302 vhcr->in_modifier = converted_port;
303 /* phys-port = logical-port */
304 field = vhcr->in_modifier -
305 find_first_bit(actv_ports.ports, dev->caps.num_ports);
306 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
308 port = vhcr->in_modifier;
309 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
311 /* Set nic_info bit to mark new fields support */
312 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
314 if (mlx4_vf_smi_enabled(dev, slave, port) &&
315 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
316 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
317 MLX4_PUT(outbox->buf, qkey,
318 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
320 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
322 /* size is now the QP number */
323 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
324 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
327 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
329 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
331 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
333 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
334 QUERY_FUNC_CAP_PHYS_PORT_ID);
336 } else if (vhcr->op_modifier == 0) {
337 struct mlx4_active_ports actv_ports =
338 mlx4_get_active_ports(dev, slave);
339 /* enable rdma and ethernet interfaces, and new quota locations */
340 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
341 QUERY_FUNC_CAP_FLAG_QUOTAS);
342 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
345 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
346 dev->caps.num_ports);
347 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
349 size = dev->caps.function_caps; /* set PF behaviours */
350 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
352 field = 0; /* protected FMR support not available as yet */
353 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
355 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
356 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
357 size = dev->caps.num_qps;
358 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
360 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
361 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
362 size = dev->caps.num_srqs;
363 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
365 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
366 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
367 size = dev->caps.num_cqs;
368 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
370 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
371 mlx4_QUERY_FUNC(dev, &func, slave)) {
372 size = vhcr->in_modifier &
373 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
375 rounddown_pow_of_two(dev->caps.num_eqs);
376 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
377 size = dev->caps.reserved_eqs;
378 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
380 size = vhcr->in_modifier &
381 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
383 rounddown_pow_of_two(func.max_eq);
384 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
385 size = func.rsvd_eqs;
386 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
389 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
390 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
391 size = dev->caps.num_mpts;
392 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
394 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
395 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
396 size = dev->caps.num_mtts;
397 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
399 size = dev->caps.num_mgms + dev->caps.num_amgms;
400 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
401 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
409 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
410 struct mlx4_func_cap *func_cap)
412 struct mlx4_cmd_mailbox *mailbox;
414 u8 field, op_modifier;
416 int err = 0, quotas = 0;
419 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
420 in_modifier = op_modifier ? gen_or_port :
421 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
423 mailbox = mlx4_alloc_cmd_mailbox(dev);
425 return PTR_ERR(mailbox);
427 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
428 MLX4_CMD_QUERY_FUNC_CAP,
429 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
433 outbox = mailbox->buf;
436 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
437 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
438 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
439 err = -EPROTONOSUPPORT;
442 func_cap->flags = field;
443 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
445 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
446 func_cap->num_ports = field;
448 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
449 func_cap->pf_context_behaviour = size;
452 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
453 func_cap->qp_quota = size & 0xFFFFFF;
455 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
456 func_cap->srq_quota = size & 0xFFFFFF;
458 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
459 func_cap->cq_quota = size & 0xFFFFFF;
461 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
462 func_cap->mpt_quota = size & 0xFFFFFF;
464 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
465 func_cap->mtt_quota = size & 0xFFFFFF;
467 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
468 func_cap->mcg_quota = size & 0xFFFFFF;
471 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
472 func_cap->qp_quota = size & 0xFFFFFF;
474 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
475 func_cap->srq_quota = size & 0xFFFFFF;
477 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
478 func_cap->cq_quota = size & 0xFFFFFF;
480 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
481 func_cap->mpt_quota = size & 0xFFFFFF;
483 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
484 func_cap->mtt_quota = size & 0xFFFFFF;
486 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
487 func_cap->mcg_quota = size & 0xFFFFFF;
489 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
490 func_cap->max_eq = size & 0xFFFFFF;
492 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
493 func_cap->reserved_eq = size & 0xFFFFFF;
498 /* logical port query */
499 if (gen_or_port > dev->caps.num_ports) {
504 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
505 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
506 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
507 mlx4_err(dev, "VLAN is enforced on this port\n");
508 err = -EPROTONOSUPPORT;
512 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
513 mlx4_err(dev, "Force mac is enabled on this port\n");
514 err = -EPROTONOSUPPORT;
517 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
518 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
519 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
520 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
521 err = -EPROTONOSUPPORT;
526 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
527 func_cap->physical_port = field;
528 if (func_cap->physical_port != gen_or_port) {
533 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
534 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
535 func_cap->qp0_qkey = qkey;
537 func_cap->qp0_qkey = 0;
540 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
541 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
543 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
544 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
546 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
547 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
549 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
550 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
552 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
553 MLX4_GET(func_cap->phys_port_id, outbox,
554 QUERY_FUNC_CAP_PHYS_PORT_ID);
556 /* All other resources are allocated by the master, but we still report
557 * 'num' and 'reserved' capabilities as follows:
558 * - num remains the maximum resource index
559 * - 'num - reserved' is the total available objects of a resource, but
560 * resource indices may be less than 'reserved'
561 * TODO: set per-resource quotas */
564 mlx4_free_cmd_mailbox(dev, mailbox);
569 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
571 struct mlx4_cmd_mailbox *mailbox;
574 u32 field32, flags, ext_flags;
580 #define QUERY_DEV_CAP_OUT_SIZE 0x100
581 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
582 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
583 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
584 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
585 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
586 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
587 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
588 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
589 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
590 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
591 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
592 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
593 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
594 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
595 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
596 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
597 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
598 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
599 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
600 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
601 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
602 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
603 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
604 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
605 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
606 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
607 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
608 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
609 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
610 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
611 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
612 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
613 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
614 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
615 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
616 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
617 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
618 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
619 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
620 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
621 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
622 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
623 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
624 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
625 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
626 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
627 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
628 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
629 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
630 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
631 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
632 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
633 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
634 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
635 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
636 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
637 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
638 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
639 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
640 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
641 #define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a
642 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
643 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
644 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
645 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
646 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
647 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
648 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
649 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
650 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
651 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
652 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
653 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
654 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
655 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
656 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
657 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
658 #define QUERY_DEV_CAP_VXLAN 0x9e
659 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
662 mailbox = mlx4_alloc_cmd_mailbox(dev);
664 return PTR_ERR(mailbox);
665 outbox = mailbox->buf;
667 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
668 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
672 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
673 dev_cap->reserved_qps = 1 << (field & 0xf);
674 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
675 dev_cap->max_qps = 1 << (field & 0x1f);
676 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
677 dev_cap->reserved_srqs = 1 << (field >> 4);
678 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
679 dev_cap->max_srqs = 1 << (field & 0x1f);
680 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
681 dev_cap->max_cq_sz = 1 << field;
682 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
683 dev_cap->reserved_cqs = 1 << (field & 0xf);
684 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
685 dev_cap->max_cqs = 1 << (field & 0x1f);
686 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
687 dev_cap->max_mpts = 1 << (field & 0x3f);
688 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
689 dev_cap->reserved_eqs = 1 << (field & 0xf);
690 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
691 dev_cap->max_eqs = 1 << (field & 0xf);
692 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
693 dev_cap->reserved_mtts = 1 << (field >> 4);
694 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
695 dev_cap->max_mrw_sz = 1 << field;
696 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
697 dev_cap->reserved_mrws = 1 << (field & 0xf);
698 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
699 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
700 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
701 dev_cap->num_sys_eqs = size & 0xfff;
702 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
703 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
704 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
705 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
706 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
709 dev_cap->max_gso_sz = 0;
711 dev_cap->max_gso_sz = 1 << field;
713 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
715 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
717 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
720 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
721 dev_cap->max_rss_tbl_sz = 1 << field;
723 dev_cap->max_rss_tbl_sz = 0;
724 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
725 dev_cap->max_rdma_global = 1 << (field & 0x3f);
726 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
727 dev_cap->local_ca_ack_delay = field & 0x1f;
728 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
729 dev_cap->num_ports = field & 0xf;
730 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
731 dev_cap->max_msg_sz = 1 << (field & 0x1f);
732 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
734 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
735 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
736 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
738 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
739 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
740 dev_cap->fs_max_num_qp_per_entry = field;
741 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
742 dev_cap->stat_rate_support = stat_rate;
743 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
745 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
746 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
747 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
748 dev_cap->flags = flags | (u64)ext_flags << 32;
749 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
750 dev_cap->reserved_uars = field >> 4;
751 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
752 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
753 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
754 dev_cap->min_page_sz = 1 << field;
756 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
758 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
759 dev_cap->bf_reg_size = 1 << (field & 0x1f);
760 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
761 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
763 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
764 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
765 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
767 dev_cap->bf_reg_size = 0;
768 mlx4_dbg(dev, "BlueFlame not available\n");
771 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
772 dev_cap->max_sq_sg = field;
773 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
774 dev_cap->max_sq_desc_sz = size;
776 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
777 dev_cap->max_qp_per_mcg = 1 << field;
778 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
779 dev_cap->reserved_mgms = field & 0xf;
780 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
781 dev_cap->max_mcgs = 1 << field;
782 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
783 dev_cap->reserved_pds = field >> 4;
784 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
785 dev_cap->max_pds = 1 << (field & 0x3f);
786 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
787 dev_cap->reserved_xrcds = field >> 4;
788 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
789 dev_cap->max_xrcds = 1 << (field & 0x1f);
791 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
792 dev_cap->rdmarc_entry_sz = size;
793 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
794 dev_cap->qpc_entry_sz = size;
795 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
796 dev_cap->aux_entry_sz = size;
797 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
798 dev_cap->altc_entry_sz = size;
799 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
800 dev_cap->eqc_entry_sz = size;
801 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
802 dev_cap->cqc_entry_sz = size;
803 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
804 dev_cap->srq_entry_sz = size;
805 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
806 dev_cap->cmpt_entry_sz = size;
807 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
808 dev_cap->mtt_entry_sz = size;
809 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
810 dev_cap->dmpt_entry_sz = size;
812 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
813 dev_cap->max_srq_sz = 1 << field;
814 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
815 dev_cap->max_qp_sz = 1 << field;
816 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
817 dev_cap->resize_srq = field & 1;
818 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
819 dev_cap->max_rq_sg = field;
820 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
821 dev_cap->max_rq_desc_sz = size;
822 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
823 if (field & (1 << 5))
824 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
825 if (field & (1 << 6))
826 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
827 if (field & (1 << 7))
828 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
829 MLX4_GET(dev_cap->bmme_flags, outbox,
830 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
831 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
833 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
834 MLX4_GET(dev_cap->reserved_lkey, outbox,
835 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
836 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
837 if (field32 & (1 << 0))
838 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
839 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
841 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
842 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
844 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
845 MLX4_GET(dev_cap->max_icm_sz, outbox,
846 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
847 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
848 MLX4_GET(dev_cap->max_counters, outbox,
849 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
851 MLX4_GET(field32, outbox,
852 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
853 if (field32 & (1 << 0))
854 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
856 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
857 if (field32 & (1 << 16))
858 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
859 if (field32 & (1 << 26))
860 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
861 if (field32 & (1 << 20))
862 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
864 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
865 for (i = 1; i <= dev_cap->num_ports; ++i) {
866 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
867 dev_cap->max_vl[i] = field >> 4;
868 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
869 dev_cap->ib_mtu[i] = field >> 4;
870 dev_cap->max_port_width[i] = field & 0xf;
871 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
872 dev_cap->max_gids[i] = 1 << (field & 0xf);
873 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
874 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
877 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
878 #define QUERY_PORT_MTU_OFFSET 0x01
879 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
880 #define QUERY_PORT_WIDTH_OFFSET 0x06
881 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
882 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
883 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
884 #define QUERY_PORT_MAC_OFFSET 0x10
885 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
886 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
887 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
889 for (i = 1; i <= dev_cap->num_ports; ++i) {
890 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
891 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
895 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
896 dev_cap->supported_port_types[i] = field & 3;
897 dev_cap->suggested_type[i] = (field >> 3) & 1;
898 dev_cap->default_sense[i] = (field >> 4) & 1;
899 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
900 dev_cap->ib_mtu[i] = field & 0xf;
901 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
902 dev_cap->max_port_width[i] = field & 0xf;
903 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
904 dev_cap->max_gids[i] = 1 << (field >> 4);
905 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
906 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
907 dev_cap->max_vl[i] = field & 0xf;
908 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
909 dev_cap->log_max_macs[i] = field & 0xf;
910 dev_cap->log_max_vlans[i] = field >> 4;
911 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
912 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
913 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
914 dev_cap->trans_type[i] = field32 >> 24;
915 dev_cap->vendor_oui[i] = field32 & 0xffffff;
916 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
917 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
921 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
922 dev_cap->bmme_flags, dev_cap->reserved_lkey);
925 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
926 * we can't use any EQs whose doorbell falls on that page,
927 * even if the EQ itself isn't reserved.
929 if (dev_cap->num_sys_eqs == 0)
930 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
931 dev_cap->reserved_eqs);
933 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
935 mlx4_dbg(dev, "Max ICM size %lld MB\n",
936 (unsigned long long) dev_cap->max_icm_sz >> 20);
937 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
938 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
939 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
940 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
941 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
942 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
943 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
944 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
945 dev_cap->eqc_entry_sz);
946 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
947 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
948 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
949 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
950 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
951 dev_cap->max_pds, dev_cap->reserved_mgms);
952 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
953 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
954 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
955 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
956 dev_cap->max_port_width[1]);
957 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
958 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
959 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
960 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
961 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
962 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
963 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
965 dump_dev_cap_flags(dev, dev_cap->flags);
966 dump_dev_cap_flags2(dev, dev_cap->flags2);
969 mlx4_free_cmd_mailbox(dev, mailbox);
973 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
974 struct mlx4_vhcr *vhcr,
975 struct mlx4_cmd_mailbox *inbox,
976 struct mlx4_cmd_mailbox *outbox,
977 struct mlx4_cmd_info *cmd)
986 struct mlx4_active_ports actv_ports;
988 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
989 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
993 /* add port mng change event capability and disable mw type 1
994 * unconditionally to slaves
996 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
997 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
998 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
999 actv_ports = mlx4_get_active_ports(dev, slave);
1000 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1001 for (slave_port = 0, real_port = first_port;
1002 real_port < first_port +
1003 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1004 ++real_port, ++slave_port) {
1005 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1006 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1008 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1010 for (; slave_port < dev->caps.num_ports; ++slave_port)
1011 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1012 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1014 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1016 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1017 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1019 /* For guests, disable timestamp */
1020 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1022 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1024 /* For guests, disable vxlan tunneling */
1025 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1027 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1029 /* For guests, report Blueflame disabled */
1030 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1032 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1034 /* For guests, disable mw type 2 */
1035 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1036 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1037 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1039 /* turn off device-managed steering capability if not enabled */
1040 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1041 MLX4_GET(field, outbox->buf,
1042 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1044 MLX4_PUT(outbox->buf, field,
1045 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1048 /* turn off ipoib managed steering for guests */
1049 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1051 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1056 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1057 struct mlx4_vhcr *vhcr,
1058 struct mlx4_cmd_mailbox *inbox,
1059 struct mlx4_cmd_mailbox *outbox,
1060 struct mlx4_cmd_info *cmd)
1062 struct mlx4_priv *priv = mlx4_priv(dev);
1067 int admin_link_state;
1068 int port = mlx4_slave_convert_port(dev, slave,
1069 vhcr->in_modifier & 0xFF);
1071 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1072 #define MLX4_PORT_LINK_UP_MASK 0x80
1073 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
1074 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
1079 /* Protect against untrusted guests: enforce that this is the
1080 * QUERY_PORT general query.
1082 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1085 vhcr->in_modifier = port;
1087 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1088 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1091 if (!err && dev->caps.function != slave) {
1092 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1093 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1095 /* get port type - currently only eth is enabled */
1096 MLX4_GET(port_type, outbox->buf,
1097 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1099 /* No link sensing allowed */
1100 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1101 /* set port type to currently operating port type */
1102 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1104 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1105 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1106 port_type |= MLX4_PORT_LINK_UP_MASK;
1107 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1108 port_type &= ~MLX4_PORT_LINK_UP_MASK;
1110 MLX4_PUT(outbox->buf, port_type,
1111 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1113 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1114 short_field = mlx4_get_slave_num_gids(dev, slave, port);
1116 short_field = 1; /* slave max gids */
1117 MLX4_PUT(outbox->buf, short_field,
1118 QUERY_PORT_CUR_MAX_GID_OFFSET);
1120 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1121 MLX4_PUT(outbox->buf, short_field,
1122 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1128 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1129 int *gid_tbl_len, int *pkey_tbl_len)
1131 struct mlx4_cmd_mailbox *mailbox;
1136 mailbox = mlx4_alloc_cmd_mailbox(dev);
1137 if (IS_ERR(mailbox))
1138 return PTR_ERR(mailbox);
1140 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1141 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1146 outbox = mailbox->buf;
1148 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1149 *gid_tbl_len = field;
1151 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1152 *pkey_tbl_len = field;
1155 mlx4_free_cmd_mailbox(dev, mailbox);
1158 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1160 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1162 struct mlx4_cmd_mailbox *mailbox;
1163 struct mlx4_icm_iter iter;
1171 mailbox = mlx4_alloc_cmd_mailbox(dev);
1172 if (IS_ERR(mailbox))
1173 return PTR_ERR(mailbox);
1174 pages = mailbox->buf;
1176 for (mlx4_icm_first(icm, &iter);
1177 !mlx4_icm_last(&iter);
1178 mlx4_icm_next(&iter)) {
1180 * We have to pass pages that are aligned to their
1181 * size, so find the least significant 1 in the
1182 * address or size and use that as our log2 size.
1184 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1185 if (lg < MLX4_ICM_PAGE_SHIFT) {
1186 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1188 (unsigned long long) mlx4_icm_addr(&iter),
1189 mlx4_icm_size(&iter));
1194 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1196 pages[nent * 2] = cpu_to_be64(virt);
1200 pages[nent * 2 + 1] =
1201 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1202 (lg - MLX4_ICM_PAGE_SHIFT));
1203 ts += 1 << (lg - 10);
1206 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1207 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1208 MLX4_CMD_TIME_CLASS_B,
1218 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1219 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1224 case MLX4_CMD_MAP_FA:
1225 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1227 case MLX4_CMD_MAP_ICM_AUX:
1228 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1230 case MLX4_CMD_MAP_ICM:
1231 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1232 tc, ts, (unsigned long long) virt - (ts << 10));
1237 mlx4_free_cmd_mailbox(dev, mailbox);
1241 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1243 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1246 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1248 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1249 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1253 int mlx4_RUN_FW(struct mlx4_dev *dev)
1255 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1256 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1259 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1261 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1262 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1263 struct mlx4_cmd_mailbox *mailbox;
1270 #define QUERY_FW_OUT_SIZE 0x100
1271 #define QUERY_FW_VER_OFFSET 0x00
1272 #define QUERY_FW_PPF_ID 0x09
1273 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1274 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1275 #define QUERY_FW_ERR_START_OFFSET 0x30
1276 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1277 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1279 #define QUERY_FW_SIZE_OFFSET 0x00
1280 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1281 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1283 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1284 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1286 #define QUERY_FW_CLOCK_OFFSET 0x50
1287 #define QUERY_FW_CLOCK_BAR 0x58
1289 mailbox = mlx4_alloc_cmd_mailbox(dev);
1290 if (IS_ERR(mailbox))
1291 return PTR_ERR(mailbox);
1292 outbox = mailbox->buf;
1294 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1295 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1299 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1301 * FW subminor version is at more significant bits than minor
1302 * version, so swap here.
1304 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1305 ((fw_ver & 0xffff0000ull) >> 16) |
1306 ((fw_ver & 0x0000ffffull) << 16);
1308 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1309 dev->caps.function = lg;
1311 if (mlx4_is_slave(dev))
1315 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1316 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1317 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1318 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1320 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1321 (int) (dev->caps.fw_ver >> 32),
1322 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1323 (int) dev->caps.fw_ver & 0xffff);
1324 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1325 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1330 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1331 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1333 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1334 cmd->max_cmds = 1 << lg;
1336 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1337 (int) (dev->caps.fw_ver >> 32),
1338 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1339 (int) dev->caps.fw_ver & 0xffff,
1340 cmd_if_rev, cmd->max_cmds);
1342 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1343 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1344 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1345 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1347 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1348 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1350 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1351 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1352 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1353 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1355 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1356 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1357 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1358 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1359 fw->comm_bar, fw->comm_base);
1360 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1362 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1363 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1364 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1365 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1366 fw->clock_bar, fw->clock_offset);
1369 * Round up number of system pages needed in case
1370 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1373 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1374 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1376 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1377 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1380 mlx4_free_cmd_mailbox(dev, mailbox);
1384 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1385 struct mlx4_vhcr *vhcr,
1386 struct mlx4_cmd_mailbox *inbox,
1387 struct mlx4_cmd_mailbox *outbox,
1388 struct mlx4_cmd_info *cmd)
1393 outbuf = outbox->buf;
1394 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1395 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1399 /* for slaves, set pci PPF ID to invalid and zero out everything
1400 * else except FW version */
1401 outbuf[0] = outbuf[1] = 0;
1402 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1403 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1408 static void get_board_id(void *vsd, char *board_id)
1412 #define VSD_OFFSET_SIG1 0x00
1413 #define VSD_OFFSET_SIG2 0xde
1414 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1415 #define VSD_OFFSET_TS_BOARD_ID 0x20
1417 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1419 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1421 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1422 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1423 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1426 * The board ID is a string but the firmware byte
1427 * swaps each 4-byte word before passing it back to
1428 * us. Therefore we need to swab it before printing.
1430 for (i = 0; i < 4; ++i)
1431 ((u32 *) board_id)[i] =
1432 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1436 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1438 struct mlx4_cmd_mailbox *mailbox;
1442 #define QUERY_ADAPTER_OUT_SIZE 0x100
1443 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1444 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1446 mailbox = mlx4_alloc_cmd_mailbox(dev);
1447 if (IS_ERR(mailbox))
1448 return PTR_ERR(mailbox);
1449 outbox = mailbox->buf;
1451 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1452 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1456 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1458 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1462 mlx4_free_cmd_mailbox(dev, mailbox);
1466 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1468 struct mlx4_cmd_mailbox *mailbox;
1472 #define INIT_HCA_IN_SIZE 0x200
1473 #define INIT_HCA_VERSION_OFFSET 0x000
1474 #define INIT_HCA_VERSION 2
1475 #define INIT_HCA_VXLAN_OFFSET 0x0c
1476 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1477 #define INIT_HCA_FLAGS_OFFSET 0x014
1478 #define INIT_HCA_QPC_OFFSET 0x020
1479 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1480 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1481 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1482 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1483 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1484 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1485 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1486 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1487 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1488 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1489 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1490 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1491 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1492 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1493 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1494 #define INIT_HCA_MCAST_OFFSET 0x0c0
1495 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1496 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1497 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
1498 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1499 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1500 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1501 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1502 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1503 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
1504 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1505 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1506 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1507 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1508 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1509 #define INIT_HCA_TPT_OFFSET 0x0f0
1510 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1511 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1512 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1513 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1514 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1515 #define INIT_HCA_UAR_OFFSET 0x120
1516 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1517 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1519 mailbox = mlx4_alloc_cmd_mailbox(dev);
1520 if (IS_ERR(mailbox))
1521 return PTR_ERR(mailbox);
1522 inbox = mailbox->buf;
1524 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1526 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1527 (ilog2(cache_line_size()) - 4) << 5;
1529 #if defined(__LITTLE_ENDIAN)
1530 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1531 #elif defined(__BIG_ENDIAN)
1532 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1534 #error Host endianness not defined
1536 /* Check port for UD address vector: */
1537 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1539 /* Enable IPoIB checksumming if we can: */
1540 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1541 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1543 /* Enable QoS support if module parameter set */
1545 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1547 /* enable counters */
1548 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1549 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1551 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1552 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1553 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1554 dev->caps.eqe_size = 64;
1555 dev->caps.eqe_factor = 1;
1557 dev->caps.eqe_size = 32;
1558 dev->caps.eqe_factor = 0;
1561 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1562 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1563 dev->caps.cqe_size = 64;
1564 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1566 dev->caps.cqe_size = 32;
1569 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1570 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1571 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1572 dev->caps.eqe_size = cache_line_size();
1573 dev->caps.cqe_size = cache_line_size();
1574 dev->caps.eqe_factor = 0;
1575 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1576 (ilog2(dev->caps.eqe_size) - 5)),
1577 INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1579 /* User still need to know to support CQE > 32B */
1580 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1583 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1585 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1586 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1587 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1588 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1589 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1590 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1591 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1592 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1593 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1594 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1595 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
1596 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1597 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1599 /* steering attributes */
1600 if (dev->caps.steering_mode ==
1601 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1602 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1604 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1606 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1607 MLX4_PUT(inbox, param->log_mc_entry_sz,
1608 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1609 MLX4_PUT(inbox, param->log_mc_table_sz,
1610 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1611 /* Enable Ethernet flow steering
1612 * with udp unicast and tcp unicast
1614 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1615 INIT_HCA_FS_ETH_BITS_OFFSET);
1616 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1617 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1618 /* Enable IPoIB flow steering
1619 * with udp unicast and tcp unicast
1621 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1622 INIT_HCA_FS_IB_BITS_OFFSET);
1623 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1624 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1626 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1627 MLX4_PUT(inbox, param->log_mc_entry_sz,
1628 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1629 MLX4_PUT(inbox, param->log_mc_hash_sz,
1630 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1631 MLX4_PUT(inbox, param->log_mc_table_sz,
1632 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1633 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1634 MLX4_PUT(inbox, (u8) (1 << 3),
1635 INIT_HCA_UC_STEERING_OFFSET);
1638 /* TPT attributes */
1640 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1641 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1642 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1643 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1644 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1646 /* UAR attributes */
1648 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1649 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1651 /* set parser VXLAN attributes */
1652 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1653 u8 parser_params = 0;
1654 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1657 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
1661 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1663 mlx4_free_cmd_mailbox(dev, mailbox);
1667 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1668 struct mlx4_init_hca_param *param)
1670 struct mlx4_cmd_mailbox *mailbox;
1676 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1677 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1679 mailbox = mlx4_alloc_cmd_mailbox(dev);
1680 if (IS_ERR(mailbox))
1681 return PTR_ERR(mailbox);
1682 outbox = mailbox->buf;
1684 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1686 MLX4_CMD_TIME_CLASS_B,
1687 !mlx4_is_slave(dev));
1691 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1692 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1694 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1696 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
1697 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
1698 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1699 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1700 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
1701 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
1702 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1703 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1704 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
1705 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
1706 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1707 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1708 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
1710 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1711 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1712 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1714 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1715 if (byte_field & 0x8)
1716 param->steering_mode = MLX4_STEERING_MODE_B0;
1718 param->steering_mode = MLX4_STEERING_MODE_A0;
1720 /* steering attributes */
1721 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1722 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1723 MLX4_GET(param->log_mc_entry_sz, outbox,
1724 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1725 MLX4_GET(param->log_mc_table_sz, outbox,
1726 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1728 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1729 MLX4_GET(param->log_mc_entry_sz, outbox,
1730 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1731 MLX4_GET(param->log_mc_hash_sz, outbox,
1732 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1733 MLX4_GET(param->log_mc_table_sz, outbox,
1734 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1737 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1738 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
1739 if (byte_field & 0x20) /* 64-bytes eqe enabled */
1740 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1741 if (byte_field & 0x40) /* 64-bytes cqe enabled */
1742 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1744 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1745 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1747 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
1748 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
1749 param->cqe_size = 1 << ((byte_field &
1750 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
1751 param->eqe_size = 1 << (((byte_field &
1752 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
1755 /* TPT attributes */
1757 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
1758 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
1759 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
1760 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
1761 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
1763 /* UAR attributes */
1765 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1766 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
1769 mlx4_free_cmd_mailbox(dev, mailbox);
1774 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
1775 * and real QP0 are active, so that the paravirtualized QP0 is ready
1777 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
1779 struct mlx4_priv *priv = mlx4_priv(dev);
1780 /* irrelevant if not infiniband */
1781 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
1782 priv->mfunc.master.qp0_state[port].qp0_active)
1787 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1788 struct mlx4_vhcr *vhcr,
1789 struct mlx4_cmd_mailbox *inbox,
1790 struct mlx4_cmd_mailbox *outbox,
1791 struct mlx4_cmd_info *cmd)
1793 struct mlx4_priv *priv = mlx4_priv(dev);
1794 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1800 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1803 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1804 /* Enable port only if it was previously disabled */
1805 if (!priv->mfunc.master.init_port_ref[port]) {
1806 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1807 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1811 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1813 if (slave == mlx4_master_func_num(dev)) {
1814 if (check_qp0_state(dev, slave, port) &&
1815 !priv->mfunc.master.qp0_state[port].port_active) {
1816 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1817 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1820 priv->mfunc.master.qp0_state[port].port_active = 1;
1821 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1824 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
1826 ++priv->mfunc.master.init_port_ref[port];
1830 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
1832 struct mlx4_cmd_mailbox *mailbox;
1838 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1839 #define INIT_PORT_IN_SIZE 256
1840 #define INIT_PORT_FLAGS_OFFSET 0x00
1841 #define INIT_PORT_FLAG_SIG (1 << 18)
1842 #define INIT_PORT_FLAG_NG (1 << 17)
1843 #define INIT_PORT_FLAG_G0 (1 << 16)
1844 #define INIT_PORT_VL_SHIFT 4
1845 #define INIT_PORT_PORT_WIDTH_SHIFT 8
1846 #define INIT_PORT_MTU_OFFSET 0x04
1847 #define INIT_PORT_MAX_GID_OFFSET 0x06
1848 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
1849 #define INIT_PORT_GUID0_OFFSET 0x10
1850 #define INIT_PORT_NODE_GUID_OFFSET 0x18
1851 #define INIT_PORT_SI_GUID_OFFSET 0x20
1853 mailbox = mlx4_alloc_cmd_mailbox(dev);
1854 if (IS_ERR(mailbox))
1855 return PTR_ERR(mailbox);
1856 inbox = mailbox->buf;
1859 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
1860 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
1861 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
1863 field = 128 << dev->caps.ib_mtu_cap[port];
1864 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
1865 field = dev->caps.gid_table_len[port];
1866 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
1867 field = dev->caps.pkey_table_len[port];
1868 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
1870 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
1871 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1873 mlx4_free_cmd_mailbox(dev, mailbox);
1875 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
1876 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1880 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
1882 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1883 struct mlx4_vhcr *vhcr,
1884 struct mlx4_cmd_mailbox *inbox,
1885 struct mlx4_cmd_mailbox *outbox,
1886 struct mlx4_cmd_info *cmd)
1888 struct mlx4_priv *priv = mlx4_priv(dev);
1889 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1895 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1899 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
1900 if (priv->mfunc.master.init_port_ref[port] == 1) {
1901 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1902 1000, MLX4_CMD_NATIVE);
1906 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1908 /* infiniband port */
1909 if (slave == mlx4_master_func_num(dev)) {
1910 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
1911 priv->mfunc.master.qp0_state[port].port_active) {
1912 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
1913 1000, MLX4_CMD_NATIVE);
1916 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1917 priv->mfunc.master.qp0_state[port].port_active = 0;
1920 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
1922 --priv->mfunc.master.init_port_ref[port];
1926 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
1928 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
1931 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
1933 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
1935 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
1939 struct mlx4_config_dev {
1940 __be32 update_flags;
1942 __be16 vxlan_udp_dport;
1950 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
1952 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
1955 struct mlx4_cmd_mailbox *mailbox;
1957 mailbox = mlx4_alloc_cmd_mailbox(dev);
1958 if (IS_ERR(mailbox))
1959 return PTR_ERR(mailbox);
1961 memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
1963 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
1964 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1966 mlx4_free_cmd_mailbox(dev, mailbox);
1970 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
1973 struct mlx4_cmd_mailbox *mailbox;
1975 mailbox = mlx4_alloc_cmd_mailbox(dev);
1976 if (IS_ERR(mailbox))
1977 return PTR_ERR(mailbox);
1979 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
1980 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1983 memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
1985 mlx4_free_cmd_mailbox(dev, mailbox);
1989 /* Conversion between the HW values and the actual functionality.
1990 * The value represented by the array index,
1991 * and the functionality determined by the flags.
1993 static const u8 config_dev_csum_flags[] = {
1995 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
1996 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
1997 MLX4_RX_CSUM_MODE_L4,
1998 [3] = MLX4_RX_CSUM_MODE_L4 |
1999 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
2000 MLX4_RX_CSUM_MODE_MULTI_VLAN
2003 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2004 struct mlx4_config_dev_params *params)
2006 struct mlx4_config_dev config_dev;
2010 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
2011 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
2012 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
2014 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2017 err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2021 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2022 CONFIG_DEV_RX_CSUM_MODE_MASK;
2024 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2026 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2028 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2029 CONFIG_DEV_RX_CSUM_MODE_MASK;
2031 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2033 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2035 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2039 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2041 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2043 struct mlx4_config_dev config_dev;
2045 memset(&config_dev, 0, sizeof(config_dev));
2046 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2047 config_dev.vxlan_udp_dport = udp_port;
2049 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2051 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2054 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2056 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2057 MLX4_CMD_SET_ICM_SIZE,
2058 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2063 * Round up number of system pages needed in case
2064 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2066 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2067 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2072 int mlx4_NOP(struct mlx4_dev *dev)
2074 /* Input modifier of 0x1f means "finish as soon as possible." */
2075 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
2078 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2082 struct mlx4_cmd_mailbox *mailbox;
2084 u32 guid_hi, guid_lo;
2086 #define MOD_STAT_CFG_PORT_OFFSET 8
2087 #define MOD_STAT_CFG_GUID_H 0X14
2088 #define MOD_STAT_CFG_GUID_L 0X1c
2090 mailbox = mlx4_alloc_cmd_mailbox(dev);
2091 if (IS_ERR(mailbox))
2092 return PTR_ERR(mailbox);
2093 outbox = mailbox->buf;
2095 for (port = 1; port <= dev->caps.num_ports; port++) {
2096 in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2097 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2098 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2101 mlx4_err(dev, "Fail to get port %d uplink guid\n",
2105 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2106 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2107 dev->caps.phys_port_id[port] = (u64)guid_lo |
2111 mlx4_free_cmd_mailbox(dev, mailbox);
2115 #define MLX4_WOL_SETUP_MODE (5 << 28)
2116 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2118 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2120 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2121 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2124 EXPORT_SYMBOL_GPL(mlx4_wol_read);
2126 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2128 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2130 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2131 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2133 EXPORT_SYMBOL_GPL(mlx4_wol_write);
2140 void mlx4_opreq_action(struct work_struct *work)
2142 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2144 struct mlx4_dev *dev = &priv->dev;
2145 int num_tasks = atomic_read(&priv->opreq_count);
2146 struct mlx4_cmd_mailbox *mailbox;
2147 struct mlx4_mgm *mgm;
2159 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
2160 #define GET_OP_REQ_TOKEN_OFFSET 0x14
2161 #define GET_OP_REQ_TYPE_OFFSET 0x1a
2162 #define GET_OP_REQ_DATA_OFFSET 0x20
2164 mailbox = mlx4_alloc_cmd_mailbox(dev);
2165 if (IS_ERR(mailbox)) {
2166 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2169 outbox = mailbox->buf;
2172 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2173 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2176 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2180 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2181 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2182 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2187 if (dev->caps.steering_mode ==
2188 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2189 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2193 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2194 GET_OP_REQ_DATA_OFFSET);
2195 num_qps = be32_to_cpu(mgm->members_count) &
2197 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2198 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2200 for (i = 0; i < num_qps; i++) {
2201 qp.qpn = be32_to_cpu(mgm->qp[i]);
2203 err = mlx4_multicast_detach(dev, &qp,
2207 err = mlx4_multicast_attach(dev, &qp,
2217 mlx4_warn(dev, "Bad type for required operation\n");
2221 err = mlx4_cmd(dev, 0, ((u32) err |
2222 (__force u32)cpu_to_be32(token) << 16),
2223 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2226 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2230 memset(outbox, 0, 0xffc);
2231 num_tasks = atomic_dec_return(&priv->opreq_count);
2235 mlx4_free_cmd_mailbox(dev, mailbox);
2238 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2239 struct mlx4_cmd_mailbox *mailbox)
2241 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2242 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2243 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2244 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2246 u32 set_attr_mask, getresp_attr_mask;
2247 u32 trap_attr_mask, traprepress_attr_mask;
2249 MLX4_GET(set_attr_mask, mailbox->buf,
2250 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2251 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2254 MLX4_GET(getresp_attr_mask, mailbox->buf,
2255 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2256 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2259 MLX4_GET(trap_attr_mask, mailbox->buf,
2260 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2261 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2264 MLX4_GET(traprepress_attr_mask, mailbox->buf,
2265 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2266 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2267 traprepress_attr_mask);
2269 if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2270 traprepress_attr_mask)
2276 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2278 struct mlx4_cmd_mailbox *mailbox;
2279 int secure_host_active;
2282 /* Check if mad_demux is supported */
2283 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2286 mailbox = mlx4_alloc_cmd_mailbox(dev);
2287 if (IS_ERR(mailbox)) {
2288 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2292 /* Query mad_demux to find out which MADs are handled by internal sma */
2293 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2294 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2295 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2297 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2302 secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
2304 /* Config mad_demux to handle all MADs returned by the query above */
2305 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2306 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2307 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2309 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2313 if (secure_host_active)
2314 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2316 mlx4_free_cmd_mailbox(dev, mailbox);
2320 /* Access Reg commands */
2321 enum mlx4_access_reg_masks {
2322 MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2323 MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2324 MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2327 struct mlx4_access_reg {
2337 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2338 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2339 } __attribute__((__packed__));
2342 * mlx4_ACCESS_REG - Generic access reg command.
2344 * @reg_id: register ID to access.
2345 * @method: Access method Read/Write.
2346 * @reg_len: register length to Read/Write in bytes.
2347 * @reg_data: reg_data pointer to Read/Write From/To.
2349 * Access ConnectX registers FW command.
2350 * Returns 0 on success and copies outbox mlx4_access_reg data
2351 * field into reg_data or a negative error code.
2353 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2354 enum mlx4_access_reg_method method,
2355 u16 reg_len, void *reg_data)
2357 struct mlx4_cmd_mailbox *inbox, *outbox;
2358 struct mlx4_access_reg *inbuf, *outbuf;
2361 inbox = mlx4_alloc_cmd_mailbox(dev);
2363 return PTR_ERR(inbox);
2365 outbox = mlx4_alloc_cmd_mailbox(dev);
2366 if (IS_ERR(outbox)) {
2367 mlx4_free_cmd_mailbox(dev, inbox);
2368 return PTR_ERR(outbox);
2372 outbuf = outbox->buf;
2374 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2375 inbuf->constant2 = 0x1;
2376 inbuf->reg_id = cpu_to_be16(reg_id);
2377 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2379 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2381 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2384 memcpy(inbuf->reg_data, reg_data, reg_len);
2385 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2386 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2391 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2392 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2394 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2399 memcpy(reg_data, outbuf->reg_data, reg_len);
2401 mlx4_free_cmd_mailbox(dev, inbox);
2402 mlx4_free_cmd_mailbox(dev, outbox);
2406 /* ConnectX registers IDs */
2408 MLX4_REG_ID_PTYS = 0x5004,
2412 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2415 * @method: Access method Read/Write.
2416 * @ptys_reg: PTYS register data pointer.
2418 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2420 * Returns 0 on success or a negative error code.
2422 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2423 enum mlx4_access_reg_method method,
2424 struct mlx4_ptys_reg *ptys_reg)
2426 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2427 method, sizeof(*ptys_reg), ptys_reg);
2429 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2431 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2432 struct mlx4_vhcr *vhcr,
2433 struct mlx4_cmd_mailbox *inbox,
2434 struct mlx4_cmd_mailbox *outbox,
2435 struct mlx4_cmd_info *cmd)
2437 struct mlx4_access_reg *inbuf = inbox->buf;
2438 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2439 u16 reg_id = be16_to_cpu(inbuf->reg_id);
2441 if (slave != mlx4_master_func_num(dev) &&
2442 method == MLX4_ACCESS_REG_WRITE)
2445 if (reg_id == MLX4_REG_ID_PTYS) {
2446 struct mlx4_ptys_reg *ptys_reg =
2447 (struct mlx4_ptys_reg *)inbuf->reg_data;
2449 ptys_reg->local_port =
2450 mlx4_slave_convert_port(dev, slave,
2451 ptys_reg->local_port);
2454 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
2455 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,