]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlx4/cmd.c
44b8f7715ade5952ac26bebd0df72cbfdad6392e
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlx4 / cmd.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
40
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
46
47 #include <asm/io.h>
48
49 #include "mlx4.h"
50 #include "fw.h"
51 #include "fw_qos.h"
52 #include "mlx4_stats.h"
53
54 #define CMD_POLL_TOKEN 0xffff
55 #define INBOX_MASK      0xffffffffffffff00ULL
56
57 #define CMD_CHAN_VER 1
58 #define CMD_CHAN_IF_REV 1
59
60 enum {
61         /* command completed successfully: */
62         CMD_STAT_OK             = 0x00,
63         /* Internal error (such as a bus error) occurred while processing command: */
64         CMD_STAT_INTERNAL_ERR   = 0x01,
65         /* Operation/command not supported or opcode modifier not supported: */
66         CMD_STAT_BAD_OP         = 0x02,
67         /* Parameter not supported or parameter out of range: */
68         CMD_STAT_BAD_PARAM      = 0x03,
69         /* System not enabled or bad system state: */
70         CMD_STAT_BAD_SYS_STATE  = 0x04,
71         /* Attempt to access reserved or unallocaterd resource: */
72         CMD_STAT_BAD_RESOURCE   = 0x05,
73         /* Requested resource is currently executing a command, or is otherwise busy: */
74         CMD_STAT_RESOURCE_BUSY  = 0x06,
75         /* Required capability exceeds device limits: */
76         CMD_STAT_EXCEED_LIM     = 0x08,
77         /* Resource is not in the appropriate state or ownership: */
78         CMD_STAT_BAD_RES_STATE  = 0x09,
79         /* Index out of range: */
80         CMD_STAT_BAD_INDEX      = 0x0a,
81         /* FW image corrupted: */
82         CMD_STAT_BAD_NVMEM      = 0x0b,
83         /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
84         CMD_STAT_ICM_ERROR      = 0x0c,
85         /* Attempt to modify a QP/EE which is not in the presumed state: */
86         CMD_STAT_BAD_QP_STATE   = 0x10,
87         /* Bad segment parameters (Address/Size): */
88         CMD_STAT_BAD_SEG_PARAM  = 0x20,
89         /* Memory Region has Memory Windows bound to: */
90         CMD_STAT_REG_BOUND      = 0x21,
91         /* HCA local attached memory not present: */
92         CMD_STAT_LAM_NOT_PRE    = 0x22,
93         /* Bad management packet (silently discarded): */
94         CMD_STAT_BAD_PKT        = 0x30,
95         /* More outstanding CQEs in CQ than new CQ size: */
96         CMD_STAT_BAD_SIZE       = 0x40,
97         /* Multi Function device support required: */
98         CMD_STAT_MULTI_FUNC_REQ = 0x50,
99 };
100
101 enum {
102         HCR_IN_PARAM_OFFSET     = 0x00,
103         HCR_IN_MODIFIER_OFFSET  = 0x08,
104         HCR_OUT_PARAM_OFFSET    = 0x0c,
105         HCR_TOKEN_OFFSET        = 0x14,
106         HCR_STATUS_OFFSET       = 0x18,
107
108         HCR_OPMOD_SHIFT         = 12,
109         HCR_T_BIT               = 21,
110         HCR_E_BIT               = 22,
111         HCR_GO_BIT              = 23
112 };
113
114 enum {
115         GO_BIT_TIMEOUT_MSECS    = 10000
116 };
117
118 enum mlx4_vlan_transition {
119         MLX4_VLAN_TRANSITION_VST_VST = 0,
120         MLX4_VLAN_TRANSITION_VST_VGT = 1,
121         MLX4_VLAN_TRANSITION_VGT_VST = 2,
122         MLX4_VLAN_TRANSITION_VGT_VGT = 3,
123 };
124
125
126 struct mlx4_cmd_context {
127         struct completion       done;
128         int                     result;
129         int                     next;
130         u64                     out_param;
131         u16                     token;
132         u8                      fw_status;
133 };
134
135 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
136                                     struct mlx4_vhcr_cmd *in_vhcr);
137
138 static int mlx4_status_to_errno(u8 status)
139 {
140         static const int trans_table[] = {
141                 [CMD_STAT_INTERNAL_ERR]   = -EIO,
142                 [CMD_STAT_BAD_OP]         = -EPERM,
143                 [CMD_STAT_BAD_PARAM]      = -EINVAL,
144                 [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
145                 [CMD_STAT_BAD_RESOURCE]   = -EBADF,
146                 [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
147                 [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
148                 [CMD_STAT_BAD_RES_STATE]  = -EBADF,
149                 [CMD_STAT_BAD_INDEX]      = -EBADF,
150                 [CMD_STAT_BAD_NVMEM]      = -EFAULT,
151                 [CMD_STAT_ICM_ERROR]      = -ENFILE,
152                 [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
153                 [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
154                 [CMD_STAT_REG_BOUND]      = -EBUSY,
155                 [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
156                 [CMD_STAT_BAD_PKT]        = -EINVAL,
157                 [CMD_STAT_BAD_SIZE]       = -ENOMEM,
158                 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
159         };
160
161         if (status >= ARRAY_SIZE(trans_table) ||
162             (status != CMD_STAT_OK && trans_table[status] == 0))
163                 return -EIO;
164
165         return trans_table[status];
166 }
167
168 static u8 mlx4_errno_to_status(int errno)
169 {
170         switch (errno) {
171         case -EPERM:
172                 return CMD_STAT_BAD_OP;
173         case -EINVAL:
174                 return CMD_STAT_BAD_PARAM;
175         case -ENXIO:
176                 return CMD_STAT_BAD_SYS_STATE;
177         case -EBUSY:
178                 return CMD_STAT_RESOURCE_BUSY;
179         case -ENOMEM:
180                 return CMD_STAT_EXCEED_LIM;
181         case -ENFILE:
182                 return CMD_STAT_ICM_ERROR;
183         default:
184                 return CMD_STAT_INTERNAL_ERR;
185         }
186 }
187
188 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
189                                        u8 op_modifier)
190 {
191         switch (op) {
192         case MLX4_CMD_UNMAP_ICM:
193         case MLX4_CMD_UNMAP_ICM_AUX:
194         case MLX4_CMD_UNMAP_FA:
195         case MLX4_CMD_2RST_QP:
196         case MLX4_CMD_HW2SW_EQ:
197         case MLX4_CMD_HW2SW_CQ:
198         case MLX4_CMD_HW2SW_SRQ:
199         case MLX4_CMD_HW2SW_MPT:
200         case MLX4_CMD_CLOSE_HCA:
201         case MLX4_QP_FLOW_STEERING_DETACH:
202         case MLX4_CMD_FREE_RES:
203         case MLX4_CMD_CLOSE_PORT:
204                 return CMD_STAT_OK;
205
206         case MLX4_CMD_QP_ATTACH:
207                 /* On Detach case return success */
208                 if (op_modifier == 0)
209                         return CMD_STAT_OK;
210                 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211
212         default:
213                 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
214         }
215 }
216
217 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
218 {
219         /* Any error during the closing commands below is considered fatal */
220         if (op == MLX4_CMD_CLOSE_HCA ||
221             op == MLX4_CMD_HW2SW_EQ ||
222             op == MLX4_CMD_HW2SW_CQ ||
223             op == MLX4_CMD_2RST_QP ||
224             op == MLX4_CMD_HW2SW_SRQ ||
225             op == MLX4_CMD_SYNC_TPT ||
226             op == MLX4_CMD_UNMAP_ICM ||
227             op == MLX4_CMD_UNMAP_ICM_AUX ||
228             op == MLX4_CMD_UNMAP_FA)
229                 return 1;
230         /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
231           * CMD_STAT_REG_BOUND.
232           * This status indicates that memory region has memory windows bound to it
233           * which may result from invalid user space usage and is not fatal.
234           */
235         if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
236                 return 1;
237         return 0;
238 }
239
240 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
241                                int err)
242 {
243         /* Only if reset flow is really active return code is based on
244           * command, otherwise current error code is returned.
245           */
246         if (mlx4_internal_err_reset) {
247                 mlx4_enter_error_state(dev->persist);
248                 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
249         }
250
251         return err;
252 }
253
254 static int comm_pending(struct mlx4_dev *dev)
255 {
256         struct mlx4_priv *priv = mlx4_priv(dev);
257         u32 status = readl(&priv->mfunc.comm->slave_read);
258
259         return (swab32(status) >> 31) != priv->cmd.comm_toggle;
260 }
261
262 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
263 {
264         struct mlx4_priv *priv = mlx4_priv(dev);
265         u32 val;
266
267         /* To avoid writing to unknown addresses after the device state was
268          * changed to internal error and the function was rest,
269          * check the INTERNAL_ERROR flag which is updated under
270          * device_state_mutex lock.
271          */
272         mutex_lock(&dev->persist->device_state_mutex);
273
274         if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
275                 mutex_unlock(&dev->persist->device_state_mutex);
276                 return -EIO;
277         }
278
279         priv->cmd.comm_toggle ^= 1;
280         val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
281         __raw_writel((__force u32) cpu_to_be32(val),
282                      &priv->mfunc.comm->slave_write);
283         mmiowb();
284         mutex_unlock(&dev->persist->device_state_mutex);
285         return 0;
286 }
287
288 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
289                        unsigned long timeout)
290 {
291         struct mlx4_priv *priv = mlx4_priv(dev);
292         unsigned long end;
293         int err = 0;
294         int ret_from_pending = 0;
295
296         /* First, verify that the master reports correct status */
297         if (comm_pending(dev)) {
298                 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299                           priv->cmd.comm_toggle, cmd);
300                 return -EAGAIN;
301         }
302
303         /* Write command */
304         down(&priv->cmd.poll_sem);
305         if (mlx4_comm_cmd_post(dev, cmd, param)) {
306                 /* Only in case the device state is INTERNAL_ERROR,
307                  * mlx4_comm_cmd_post returns with an error
308                  */
309                 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
310                 goto out;
311         }
312
313         end = msecs_to_jiffies(timeout) + jiffies;
314         while (comm_pending(dev) && time_before(jiffies, end))
315                 cond_resched();
316         ret_from_pending = comm_pending(dev);
317         if (ret_from_pending) {
318                 /* check if the slave is trying to boot in the middle of
319                  * FLR process. The only non-zero result in the RESET command
320                  * is MLX4_DELAY_RESET_SLAVE*/
321                 if ((MLX4_COMM_CMD_RESET == cmd)) {
322                         err = MLX4_DELAY_RESET_SLAVE;
323                         goto out;
324                 } else {
325                         mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
326                                   cmd);
327                         err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
328                 }
329         }
330
331         if (err)
332                 mlx4_enter_error_state(dev->persist);
333 out:
334         up(&priv->cmd.poll_sem);
335         return err;
336 }
337
338 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
339                               u16 param, u16 op, unsigned long timeout)
340 {
341         struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
342         struct mlx4_cmd_context *context;
343         unsigned long end;
344         int err = 0;
345
346         down(&cmd->event_sem);
347
348         spin_lock(&cmd->context_lock);
349         BUG_ON(cmd->free_head < 0);
350         context = &cmd->context[cmd->free_head];
351         context->token += cmd->token_mask + 1;
352         cmd->free_head = context->next;
353         spin_unlock(&cmd->context_lock);
354
355         reinit_completion(&context->done);
356
357         if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
358                 /* Only in case the device state is INTERNAL_ERROR,
359                  * mlx4_comm_cmd_post returns with an error
360                  */
361                 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
362                 goto out;
363         }
364
365         if (!wait_for_completion_timeout(&context->done,
366                                          msecs_to_jiffies(timeout))) {
367                 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
368                           vhcr_cmd, op);
369                 goto out_reset;
370         }
371
372         err = context->result;
373         if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
374                 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
375                          vhcr_cmd, context->fw_status);
376                 if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
377                         goto out_reset;
378         }
379
380         /* wait for comm channel ready
381          * this is necessary for prevention the race
382          * when switching between event to polling mode
383          * Skipping this section in case the device is in FATAL_ERROR state,
384          * In this state, no commands are sent via the comm channel until
385          * the device has returned from reset.
386          */
387         if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
388                 end = msecs_to_jiffies(timeout) + jiffies;
389                 while (comm_pending(dev) && time_before(jiffies, end))
390                         cond_resched();
391         }
392         goto out;
393
394 out_reset:
395         err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
396         mlx4_enter_error_state(dev->persist);
397 out:
398         spin_lock(&cmd->context_lock);
399         context->next = cmd->free_head;
400         cmd->free_head = context - cmd->context;
401         spin_unlock(&cmd->context_lock);
402
403         up(&cmd->event_sem);
404         return err;
405 }
406
407 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
408                   u16 op, unsigned long timeout)
409 {
410         if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
411                 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
412
413         if (mlx4_priv(dev)->cmd.use_events)
414                 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
415         return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
416 }
417
418 static int cmd_pending(struct mlx4_dev *dev)
419 {
420         u32 status;
421
422         if (pci_channel_offline(dev->persist->pdev))
423                 return -EIO;
424
425         status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
426
427         return (status & swab32(1 << HCR_GO_BIT)) ||
428                 (mlx4_priv(dev)->cmd.toggle ==
429                  !!(status & swab32(1 << HCR_T_BIT)));
430 }
431
432 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
433                          u32 in_modifier, u8 op_modifier, u16 op, u16 token,
434                          int event)
435 {
436         struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
437         u32 __iomem *hcr = cmd->hcr;
438         int ret = -EIO;
439         unsigned long end;
440
441         mutex_lock(&dev->persist->device_state_mutex);
442         /* To avoid writing to unknown addresses after the device state was
443           * changed to internal error and the chip was reset,
444           * check the INTERNAL_ERROR flag which is updated under
445           * device_state_mutex lock.
446           */
447         if (pci_channel_offline(dev->persist->pdev) ||
448             (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
449                 /*
450                  * Device is going through error recovery
451                  * and cannot accept commands.
452                  */
453                 goto out;
454         }
455
456         end = jiffies;
457         if (event)
458                 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
459
460         while (cmd_pending(dev)) {
461                 if (pci_channel_offline(dev->persist->pdev)) {
462                         /*
463                          * Device is going through error recovery
464                          * and cannot accept commands.
465                          */
466                         goto out;
467                 }
468
469                 if (time_after_eq(jiffies, end)) {
470                         mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
471                         goto out;
472                 }
473                 cond_resched();
474         }
475
476         /*
477          * We use writel (instead of something like memcpy_toio)
478          * because writes of less than 32 bits to the HCR don't work
479          * (and some architectures such as ia64 implement memcpy_toio
480          * in terms of writeb).
481          */
482         __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
483         __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
484         __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
485         __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
486         __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
487         __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
488
489         /* __raw_writel may not order writes. */
490         wmb();
491
492         __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
493                                                (cmd->toggle << HCR_T_BIT)       |
494                                                (event ? (1 << HCR_E_BIT) : 0)   |
495                                                (op_modifier << HCR_OPMOD_SHIFT) |
496                                                op), hcr + 6);
497
498         /*
499          * Make sure that our HCR writes don't get mixed in with
500          * writes from another CPU starting a FW command.
501          */
502         mmiowb();
503
504         cmd->toggle = cmd->toggle ^ 1;
505
506         ret = 0;
507
508 out:
509         if (ret)
510                 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
511                           op, ret, in_param, in_modifier, op_modifier);
512         mutex_unlock(&dev->persist->device_state_mutex);
513
514         return ret;
515 }
516
517 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
518                           int out_is_imm, u32 in_modifier, u8 op_modifier,
519                           u16 op, unsigned long timeout)
520 {
521         struct mlx4_priv *priv = mlx4_priv(dev);
522         struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
523         int ret;
524
525         mutex_lock(&priv->cmd.slave_cmd_mutex);
526
527         vhcr->in_param = cpu_to_be64(in_param);
528         vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
529         vhcr->in_modifier = cpu_to_be32(in_modifier);
530         vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
531         vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
532         vhcr->status = 0;
533         vhcr->flags = !!(priv->cmd.use_events) << 6;
534
535         if (mlx4_is_master(dev)) {
536                 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
537                 if (!ret) {
538                         if (out_is_imm) {
539                                 if (out_param)
540                                         *out_param =
541                                                 be64_to_cpu(vhcr->out_param);
542                                 else {
543                                         mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
544                                                  op);
545                                         vhcr->status = CMD_STAT_BAD_PARAM;
546                                 }
547                         }
548                         ret = mlx4_status_to_errno(vhcr->status);
549                 }
550                 if (ret &&
551                     dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
552                         ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
553         } else {
554                 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
555                                     MLX4_COMM_TIME + timeout);
556                 if (!ret) {
557                         if (out_is_imm) {
558                                 if (out_param)
559                                         *out_param =
560                                                 be64_to_cpu(vhcr->out_param);
561                                 else {
562                                         mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563                                                  op);
564                                         vhcr->status = CMD_STAT_BAD_PARAM;
565                                 }
566                         }
567                         ret = mlx4_status_to_errno(vhcr->status);
568                 } else {
569                         if (dev->persist->state &
570                             MLX4_DEVICE_STATE_INTERNAL_ERROR)
571                                 ret = mlx4_internal_err_ret_value(dev, op,
572                                                                   op_modifier);
573                         else
574                                 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
575                 }
576         }
577
578         mutex_unlock(&priv->cmd.slave_cmd_mutex);
579         return ret;
580 }
581
582 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
583                          int out_is_imm, u32 in_modifier, u8 op_modifier,
584                          u16 op, unsigned long timeout)
585 {
586         struct mlx4_priv *priv = mlx4_priv(dev);
587         void __iomem *hcr = priv->cmd.hcr;
588         int err = 0;
589         unsigned long end;
590         u32 stat;
591
592         down(&priv->cmd.poll_sem);
593
594         if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
595                 /*
596                  * Device is going through error recovery
597                  * and cannot accept commands.
598                  */
599                 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
600                 goto out;
601         }
602
603         if (out_is_imm && !out_param) {
604                 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
605                          op);
606                 err = -EINVAL;
607                 goto out;
608         }
609
610         err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
611                             in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
612         if (err)
613                 goto out_reset;
614
615         end = msecs_to_jiffies(timeout) + jiffies;
616         while (cmd_pending(dev) && time_before(jiffies, end)) {
617                 if (pci_channel_offline(dev->persist->pdev)) {
618                         /*
619                          * Device is going through error recovery
620                          * and cannot accept commands.
621                          */
622                         err = -EIO;
623                         goto out_reset;
624                 }
625
626                 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
627                         err = mlx4_internal_err_ret_value(dev, op, op_modifier);
628                         goto out;
629                 }
630
631                 cond_resched();
632         }
633
634         if (cmd_pending(dev)) {
635                 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
636                           op);
637                 err = -EIO;
638                 goto out_reset;
639         }
640
641         if (out_is_imm)
642                 *out_param =
643                         (u64) be32_to_cpu((__force __be32)
644                                           __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
645                         (u64) be32_to_cpu((__force __be32)
646                                           __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
647         stat = be32_to_cpu((__force __be32)
648                            __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
649         err = mlx4_status_to_errno(stat);
650         if (err) {
651                 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
652                          op, stat);
653                 if (mlx4_closing_cmd_fatal_error(op, stat))
654                         goto out_reset;
655                 goto out;
656         }
657
658 out_reset:
659         if (err)
660                 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
661 out:
662         up(&priv->cmd.poll_sem);
663         return err;
664 }
665
666 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
667 {
668         struct mlx4_priv *priv = mlx4_priv(dev);
669         struct mlx4_cmd_context *context =
670                 &priv->cmd.context[token & priv->cmd.token_mask];
671
672         /* previously timed out command completing at long last */
673         if (token != context->token)
674                 return;
675
676         context->fw_status = status;
677         context->result    = mlx4_status_to_errno(status);
678         context->out_param = out_param;
679
680         complete(&context->done);
681 }
682
683 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
684                          int out_is_imm, u32 in_modifier, u8 op_modifier,
685                          u16 op, unsigned long timeout)
686 {
687         struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688         struct mlx4_cmd_context *context;
689         int err = 0;
690
691         down(&cmd->event_sem);
692
693         spin_lock(&cmd->context_lock);
694         BUG_ON(cmd->free_head < 0);
695         context = &cmd->context[cmd->free_head];
696         context->token += cmd->token_mask + 1;
697         cmd->free_head = context->next;
698         spin_unlock(&cmd->context_lock);
699
700         if (out_is_imm && !out_param) {
701                 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
702                          op);
703                 err = -EINVAL;
704                 goto out;
705         }
706
707         reinit_completion(&context->done);
708
709         err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
710                             in_modifier, op_modifier, op, context->token, 1);
711         if (err)
712                 goto out_reset;
713
714         if (!wait_for_completion_timeout(&context->done,
715                                          msecs_to_jiffies(timeout))) {
716                 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717                           op);
718                 if (op == MLX4_CMD_NOP) {
719                         err = -EBUSY;
720                         goto out;
721                 } else {
722                         err = -EIO;
723                         goto out_reset;
724                 }
725         }
726
727         err = context->result;
728         if (err) {
729                 /* Since we do not want to have this error message always
730                  * displayed at driver start when there are ConnectX2 HCAs
731                  * on the host, we deprecate the error message for this
732                  * specific command/input_mod/opcode_mod/fw-status to be debug.
733                  */
734                 if (op == MLX4_CMD_SET_PORT &&
735                     (in_modifier == 1 || in_modifier == 2) &&
736                     op_modifier == MLX4_SET_PORT_IB_OPCODE &&
737                     context->fw_status == CMD_STAT_BAD_SIZE)
738                         mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
739                                  op, context->fw_status);
740                 else
741                         mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
742                                  op, context->fw_status);
743                 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
744                         err = mlx4_internal_err_ret_value(dev, op, op_modifier);
745                 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
746                         goto out_reset;
747
748                 goto out;
749         }
750
751         if (out_is_imm)
752                 *out_param = context->out_param;
753
754 out_reset:
755         if (err)
756                 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
757 out:
758         spin_lock(&cmd->context_lock);
759         context->next = cmd->free_head;
760         cmd->free_head = context - cmd->context;
761         spin_unlock(&cmd->context_lock);
762
763         up(&cmd->event_sem);
764         return err;
765 }
766
767 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
768                int out_is_imm, u32 in_modifier, u8 op_modifier,
769                u16 op, unsigned long timeout, int native)
770 {
771         if (pci_channel_offline(dev->persist->pdev))
772                 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
773
774         if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
775                 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
776                         return mlx4_internal_err_ret_value(dev, op,
777                                                           op_modifier);
778                 if (mlx4_priv(dev)->cmd.use_events)
779                         return mlx4_cmd_wait(dev, in_param, out_param,
780                                              out_is_imm, in_modifier,
781                                              op_modifier, op, timeout);
782                 else
783                         return mlx4_cmd_poll(dev, in_param, out_param,
784                                              out_is_imm, in_modifier,
785                                              op_modifier, op, timeout);
786         }
787         return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
788                               in_modifier, op_modifier, op, timeout);
789 }
790 EXPORT_SYMBOL_GPL(__mlx4_cmd);
791
792
793 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
794 {
795         return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
796                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
797 }
798
799 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
800                            int slave, u64 slave_addr,
801                            int size, int is_read)
802 {
803         u64 in_param;
804         u64 out_param;
805
806         if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
807             (slave & ~0x7f) | (size & 0xff)) {
808                 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
809                          slave_addr, master_addr, slave, size);
810                 return -EINVAL;
811         }
812
813         if (is_read) {
814                 in_param = (u64) slave | slave_addr;
815                 out_param = (u64) dev->caps.function | master_addr;
816         } else {
817                 in_param = (u64) dev->caps.function | master_addr;
818                 out_param = (u64) slave | slave_addr;
819         }
820
821         return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
822                             MLX4_CMD_ACCESS_MEM,
823                             MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
824 }
825
826 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
827                                struct mlx4_cmd_mailbox *inbox,
828                                struct mlx4_cmd_mailbox *outbox)
829 {
830         struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
831         struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
832         int err;
833         int i;
834
835         if (index & 0x1f)
836                 return -EINVAL;
837
838         in_mad->attr_mod = cpu_to_be32(index / 32);
839
840         err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
841                            MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
842                            MLX4_CMD_NATIVE);
843         if (err)
844                 return err;
845
846         for (i = 0; i < 32; ++i)
847                 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
848
849         return err;
850 }
851
852 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
853                                struct mlx4_cmd_mailbox *inbox,
854                                struct mlx4_cmd_mailbox *outbox)
855 {
856         int i;
857         int err;
858
859         for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
860                 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
861                 if (err)
862                         return err;
863         }
864
865         return 0;
866 }
867 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
868 #define PORT_STATE_OFFSET 32
869
870 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
871 {
872         if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
873                 return IB_PORT_ACTIVE;
874         else
875                 return IB_PORT_DOWN;
876 }
877
878 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
879                                 struct mlx4_vhcr *vhcr,
880                                 struct mlx4_cmd_mailbox *inbox,
881                                 struct mlx4_cmd_mailbox *outbox,
882                                 struct mlx4_cmd_info *cmd)
883 {
884         struct ib_smp *smp = inbox->buf;
885         u32 index;
886         u8 port, slave_port;
887         u8 opcode_modifier;
888         u16 *table;
889         int err;
890         int vidx, pidx;
891         int network_view;
892         struct mlx4_priv *priv = mlx4_priv(dev);
893         struct ib_smp *outsmp = outbox->buf;
894         __be16 *outtab = (__be16 *)(outsmp->data);
895         __be32 slave_cap_mask;
896         __be64 slave_node_guid;
897
898         slave_port = vhcr->in_modifier;
899         port = mlx4_slave_convert_port(dev, slave, slave_port);
900
901         /* network-view bit is for driver use only, and should not be passed to FW */
902         opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
903         network_view = !!(vhcr->op_modifier & 0x8);
904
905         if (smp->base_version == 1 &&
906             smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
907             smp->class_version == 1) {
908                 /* host view is paravirtualized */
909                 if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
910                         if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
911                                 index = be32_to_cpu(smp->attr_mod);
912                                 if (port < 1 || port > dev->caps.num_ports)
913                                         return -EINVAL;
914                                 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
915                                                 sizeof(*table) * 32, GFP_KERNEL);
916
917                                 if (!table)
918                                         return -ENOMEM;
919                                 /* need to get the full pkey table because the paravirtualized
920                                  * pkeys may be scattered among several pkey blocks.
921                                  */
922                                 err = get_full_pkey_table(dev, port, table, inbox, outbox);
923                                 if (!err) {
924                                         for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
925                                                 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
926                                                 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
927                                         }
928                                 }
929                                 kfree(table);
930                                 return err;
931                         }
932                         if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
933                                 /*get the slave specific caps:*/
934                                 /*do the command */
935                                 smp->attr_mod = cpu_to_be32(port);
936                                 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
937                                             port, opcode_modifier,
938                                             vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
939                                 /* modify the response for slaves */
940                                 if (!err && slave != mlx4_master_func_num(dev)) {
941                                         u8 *state = outsmp->data + PORT_STATE_OFFSET;
942
943                                         *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
944                                         slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
945                                         memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
946                                 }
947                                 return err;
948                         }
949                         if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
950                                 __be64 guid = mlx4_get_admin_guid(dev, slave,
951                                                                   port);
952
953                                 /* set the PF admin guid to the FW/HW burned
954                                  * GUID, if it wasn't yet set
955                                  */
956                                 if (slave == 0 && guid == 0) {
957                                         smp->attr_mod = 0;
958                                         err = mlx4_cmd_box(dev,
959                                                            inbox->dma,
960                                                            outbox->dma,
961                                                            vhcr->in_modifier,
962                                                            opcode_modifier,
963                                                            vhcr->op,
964                                                            MLX4_CMD_TIME_CLASS_C,
965                                                            MLX4_CMD_NATIVE);
966                                         if (err)
967                                                 return err;
968                                         mlx4_set_admin_guid(dev,
969                                                             *(__be64 *)outsmp->
970                                                             data, slave, port);
971                                 } else {
972                                         memcpy(outsmp->data, &guid, 8);
973                                 }
974
975                                 /* clean all other gids */
976                                 memset(outsmp->data + 8, 0, 56);
977                                 return 0;
978                         }
979                         if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
980                                 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
981                                              port, opcode_modifier,
982                                              vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
983                                 if (!err) {
984                                         slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
985                                         memcpy(outsmp->data + 12, &slave_node_guid, 8);
986                                 }
987                                 return err;
988                         }
989                 }
990         }
991
992         /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
993          * These are the MADs used by ib verbs (such as ib_query_gids).
994          */
995         if (slave != mlx4_master_func_num(dev) &&
996             !mlx4_vf_smi_enabled(dev, slave, port)) {
997                 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
998                       smp->method == IB_MGMT_METHOD_GET) || network_view) {
999                         mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1000                                  slave, smp->method, smp->mgmt_class,
1001                                  network_view ? "Network" : "Host",
1002                                  be16_to_cpu(smp->attr_id));
1003                         return -EPERM;
1004                 }
1005         }
1006
1007         return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1008                                     vhcr->in_modifier, opcode_modifier,
1009                                     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1010 }
1011
1012 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1013                      struct mlx4_vhcr *vhcr,
1014                      struct mlx4_cmd_mailbox *inbox,
1015                      struct mlx4_cmd_mailbox *outbox,
1016                      struct mlx4_cmd_info *cmd)
1017 {
1018         return -EPERM;
1019 }
1020
1021 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1022                      struct mlx4_vhcr *vhcr,
1023                      struct mlx4_cmd_mailbox *inbox,
1024                      struct mlx4_cmd_mailbox *outbox,
1025                      struct mlx4_cmd_info *cmd)
1026 {
1027         u64 in_param;
1028         u64 out_param;
1029         int err;
1030
1031         in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1032         out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1033         if (cmd->encode_slave_id) {
1034                 in_param &= 0xffffffffffffff00ll;
1035                 in_param |= slave;
1036         }
1037
1038         err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1039                          vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1040                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1041
1042         if (cmd->out_is_imm)
1043                 vhcr->out_param = out_param;
1044
1045         return err;
1046 }
1047
1048 static struct mlx4_cmd_info cmd_info[] = {
1049         {
1050                 .opcode = MLX4_CMD_QUERY_FW,
1051                 .has_inbox = false,
1052                 .has_outbox = true,
1053                 .out_is_imm = false,
1054                 .encode_slave_id = false,
1055                 .verify = NULL,
1056                 .wrapper = mlx4_QUERY_FW_wrapper
1057         },
1058         {
1059                 .opcode = MLX4_CMD_QUERY_HCA,
1060                 .has_inbox = false,
1061                 .has_outbox = true,
1062                 .out_is_imm = false,
1063                 .encode_slave_id = false,
1064                 .verify = NULL,
1065                 .wrapper = NULL
1066         },
1067         {
1068                 .opcode = MLX4_CMD_QUERY_DEV_CAP,
1069                 .has_inbox = false,
1070                 .has_outbox = true,
1071                 .out_is_imm = false,
1072                 .encode_slave_id = false,
1073                 .verify = NULL,
1074                 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1075         },
1076         {
1077                 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1078                 .has_inbox = false,
1079                 .has_outbox = true,
1080                 .out_is_imm = false,
1081                 .encode_slave_id = false,
1082                 .verify = NULL,
1083                 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1084         },
1085         {
1086                 .opcode = MLX4_CMD_QUERY_ADAPTER,
1087                 .has_inbox = false,
1088                 .has_outbox = true,
1089                 .out_is_imm = false,
1090                 .encode_slave_id = false,
1091                 .verify = NULL,
1092                 .wrapper = NULL
1093         },
1094         {
1095                 .opcode = MLX4_CMD_INIT_PORT,
1096                 .has_inbox = false,
1097                 .has_outbox = false,
1098                 .out_is_imm = false,
1099                 .encode_slave_id = false,
1100                 .verify = NULL,
1101                 .wrapper = mlx4_INIT_PORT_wrapper
1102         },
1103         {
1104                 .opcode = MLX4_CMD_CLOSE_PORT,
1105                 .has_inbox = false,
1106                 .has_outbox = false,
1107                 .out_is_imm  = false,
1108                 .encode_slave_id = false,
1109                 .verify = NULL,
1110                 .wrapper = mlx4_CLOSE_PORT_wrapper
1111         },
1112         {
1113                 .opcode = MLX4_CMD_QUERY_PORT,
1114                 .has_inbox = false,
1115                 .has_outbox = true,
1116                 .out_is_imm = false,
1117                 .encode_slave_id = false,
1118                 .verify = NULL,
1119                 .wrapper = mlx4_QUERY_PORT_wrapper
1120         },
1121         {
1122                 .opcode = MLX4_CMD_SET_PORT,
1123                 .has_inbox = true,
1124                 .has_outbox = false,
1125                 .out_is_imm = false,
1126                 .encode_slave_id = false,
1127                 .verify = NULL,
1128                 .wrapper = mlx4_SET_PORT_wrapper
1129         },
1130         {
1131                 .opcode = MLX4_CMD_MAP_EQ,
1132                 .has_inbox = false,
1133                 .has_outbox = false,
1134                 .out_is_imm = false,
1135                 .encode_slave_id = false,
1136                 .verify = NULL,
1137                 .wrapper = mlx4_MAP_EQ_wrapper
1138         },
1139         {
1140                 .opcode = MLX4_CMD_SW2HW_EQ,
1141                 .has_inbox = true,
1142                 .has_outbox = false,
1143                 .out_is_imm = false,
1144                 .encode_slave_id = true,
1145                 .verify = NULL,
1146                 .wrapper = mlx4_SW2HW_EQ_wrapper
1147         },
1148         {
1149                 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1150                 .has_inbox = false,
1151                 .has_outbox = false,
1152                 .out_is_imm = false,
1153                 .encode_slave_id = false,
1154                 .verify = NULL,
1155                 .wrapper = NULL
1156         },
1157         {
1158                 .opcode = MLX4_CMD_NOP,
1159                 .has_inbox = false,
1160                 .has_outbox = false,
1161                 .out_is_imm = false,
1162                 .encode_slave_id = false,
1163                 .verify = NULL,
1164                 .wrapper = NULL
1165         },
1166         {
1167                 .opcode = MLX4_CMD_CONFIG_DEV,
1168                 .has_inbox = false,
1169                 .has_outbox = true,
1170                 .out_is_imm = false,
1171                 .encode_slave_id = false,
1172                 .verify = NULL,
1173                 .wrapper = mlx4_CONFIG_DEV_wrapper
1174         },
1175         {
1176                 .opcode = MLX4_CMD_ALLOC_RES,
1177                 .has_inbox = false,
1178                 .has_outbox = false,
1179                 .out_is_imm = true,
1180                 .encode_slave_id = false,
1181                 .verify = NULL,
1182                 .wrapper = mlx4_ALLOC_RES_wrapper
1183         },
1184         {
1185                 .opcode = MLX4_CMD_FREE_RES,
1186                 .has_inbox = false,
1187                 .has_outbox = false,
1188                 .out_is_imm = false,
1189                 .encode_slave_id = false,
1190                 .verify = NULL,
1191                 .wrapper = mlx4_FREE_RES_wrapper
1192         },
1193         {
1194                 .opcode = MLX4_CMD_SW2HW_MPT,
1195                 .has_inbox = true,
1196                 .has_outbox = false,
1197                 .out_is_imm = false,
1198                 .encode_slave_id = true,
1199                 .verify = NULL,
1200                 .wrapper = mlx4_SW2HW_MPT_wrapper
1201         },
1202         {
1203                 .opcode = MLX4_CMD_QUERY_MPT,
1204                 .has_inbox = false,
1205                 .has_outbox = true,
1206                 .out_is_imm = false,
1207                 .encode_slave_id = false,
1208                 .verify = NULL,
1209                 .wrapper = mlx4_QUERY_MPT_wrapper
1210         },
1211         {
1212                 .opcode = MLX4_CMD_HW2SW_MPT,
1213                 .has_inbox = false,
1214                 .has_outbox = false,
1215                 .out_is_imm = false,
1216                 .encode_slave_id = false,
1217                 .verify = NULL,
1218                 .wrapper = mlx4_HW2SW_MPT_wrapper
1219         },
1220         {
1221                 .opcode = MLX4_CMD_READ_MTT,
1222                 .has_inbox = false,
1223                 .has_outbox = true,
1224                 .out_is_imm = false,
1225                 .encode_slave_id = false,
1226                 .verify = NULL,
1227                 .wrapper = NULL
1228         },
1229         {
1230                 .opcode = MLX4_CMD_WRITE_MTT,
1231                 .has_inbox = true,
1232                 .has_outbox = false,
1233                 .out_is_imm = false,
1234                 .encode_slave_id = false,
1235                 .verify = NULL,
1236                 .wrapper = mlx4_WRITE_MTT_wrapper
1237         },
1238         {
1239                 .opcode = MLX4_CMD_SYNC_TPT,
1240                 .has_inbox = true,
1241                 .has_outbox = false,
1242                 .out_is_imm = false,
1243                 .encode_slave_id = false,
1244                 .verify = NULL,
1245                 .wrapper = NULL
1246         },
1247         {
1248                 .opcode = MLX4_CMD_HW2SW_EQ,
1249                 .has_inbox = false,
1250                 .has_outbox = false,
1251                 .out_is_imm = false,
1252                 .encode_slave_id = true,
1253                 .verify = NULL,
1254                 .wrapper = mlx4_HW2SW_EQ_wrapper
1255         },
1256         {
1257                 .opcode = MLX4_CMD_QUERY_EQ,
1258                 .has_inbox = false,
1259                 .has_outbox = true,
1260                 .out_is_imm = false,
1261                 .encode_slave_id = true,
1262                 .verify = NULL,
1263                 .wrapper = mlx4_QUERY_EQ_wrapper
1264         },
1265         {
1266                 .opcode = MLX4_CMD_SW2HW_CQ,
1267                 .has_inbox = true,
1268                 .has_outbox = false,
1269                 .out_is_imm = false,
1270                 .encode_slave_id = true,
1271                 .verify = NULL,
1272                 .wrapper = mlx4_SW2HW_CQ_wrapper
1273         },
1274         {
1275                 .opcode = MLX4_CMD_HW2SW_CQ,
1276                 .has_inbox = false,
1277                 .has_outbox = false,
1278                 .out_is_imm = false,
1279                 .encode_slave_id = false,
1280                 .verify = NULL,
1281                 .wrapper = mlx4_HW2SW_CQ_wrapper
1282         },
1283         {
1284                 .opcode = MLX4_CMD_QUERY_CQ,
1285                 .has_inbox = false,
1286                 .has_outbox = true,
1287                 .out_is_imm = false,
1288                 .encode_slave_id = false,
1289                 .verify = NULL,
1290                 .wrapper = mlx4_QUERY_CQ_wrapper
1291         },
1292         {
1293                 .opcode = MLX4_CMD_MODIFY_CQ,
1294                 .has_inbox = true,
1295                 .has_outbox = false,
1296                 .out_is_imm = true,
1297                 .encode_slave_id = false,
1298                 .verify = NULL,
1299                 .wrapper = mlx4_MODIFY_CQ_wrapper
1300         },
1301         {
1302                 .opcode = MLX4_CMD_SW2HW_SRQ,
1303                 .has_inbox = true,
1304                 .has_outbox = false,
1305                 .out_is_imm = false,
1306                 .encode_slave_id = true,
1307                 .verify = NULL,
1308                 .wrapper = mlx4_SW2HW_SRQ_wrapper
1309         },
1310         {
1311                 .opcode = MLX4_CMD_HW2SW_SRQ,
1312                 .has_inbox = false,
1313                 .has_outbox = false,
1314                 .out_is_imm = false,
1315                 .encode_slave_id = false,
1316                 .verify = NULL,
1317                 .wrapper = mlx4_HW2SW_SRQ_wrapper
1318         },
1319         {
1320                 .opcode = MLX4_CMD_QUERY_SRQ,
1321                 .has_inbox = false,
1322                 .has_outbox = true,
1323                 .out_is_imm = false,
1324                 .encode_slave_id = false,
1325                 .verify = NULL,
1326                 .wrapper = mlx4_QUERY_SRQ_wrapper
1327         },
1328         {
1329                 .opcode = MLX4_CMD_ARM_SRQ,
1330                 .has_inbox = false,
1331                 .has_outbox = false,
1332                 .out_is_imm = false,
1333                 .encode_slave_id = false,
1334                 .verify = NULL,
1335                 .wrapper = mlx4_ARM_SRQ_wrapper
1336         },
1337         {
1338                 .opcode = MLX4_CMD_RST2INIT_QP,
1339                 .has_inbox = true,
1340                 .has_outbox = false,
1341                 .out_is_imm = false,
1342                 .encode_slave_id = true,
1343                 .verify = NULL,
1344                 .wrapper = mlx4_RST2INIT_QP_wrapper
1345         },
1346         {
1347                 .opcode = MLX4_CMD_INIT2INIT_QP,
1348                 .has_inbox = true,
1349                 .has_outbox = false,
1350                 .out_is_imm = false,
1351                 .encode_slave_id = false,
1352                 .verify = NULL,
1353                 .wrapper = mlx4_INIT2INIT_QP_wrapper
1354         },
1355         {
1356                 .opcode = MLX4_CMD_INIT2RTR_QP,
1357                 .has_inbox = true,
1358                 .has_outbox = false,
1359                 .out_is_imm = false,
1360                 .encode_slave_id = false,
1361                 .verify = NULL,
1362                 .wrapper = mlx4_INIT2RTR_QP_wrapper
1363         },
1364         {
1365                 .opcode = MLX4_CMD_RTR2RTS_QP,
1366                 .has_inbox = true,
1367                 .has_outbox = false,
1368                 .out_is_imm = false,
1369                 .encode_slave_id = false,
1370                 .verify = NULL,
1371                 .wrapper = mlx4_RTR2RTS_QP_wrapper
1372         },
1373         {
1374                 .opcode = MLX4_CMD_RTS2RTS_QP,
1375                 .has_inbox = true,
1376                 .has_outbox = false,
1377                 .out_is_imm = false,
1378                 .encode_slave_id = false,
1379                 .verify = NULL,
1380                 .wrapper = mlx4_RTS2RTS_QP_wrapper
1381         },
1382         {
1383                 .opcode = MLX4_CMD_SQERR2RTS_QP,
1384                 .has_inbox = true,
1385                 .has_outbox = false,
1386                 .out_is_imm = false,
1387                 .encode_slave_id = false,
1388                 .verify = NULL,
1389                 .wrapper = mlx4_SQERR2RTS_QP_wrapper
1390         },
1391         {
1392                 .opcode = MLX4_CMD_2ERR_QP,
1393                 .has_inbox = false,
1394                 .has_outbox = false,
1395                 .out_is_imm = false,
1396                 .encode_slave_id = false,
1397                 .verify = NULL,
1398                 .wrapper = mlx4_GEN_QP_wrapper
1399         },
1400         {
1401                 .opcode = MLX4_CMD_RTS2SQD_QP,
1402                 .has_inbox = false,
1403                 .has_outbox = false,
1404                 .out_is_imm = false,
1405                 .encode_slave_id = false,
1406                 .verify = NULL,
1407                 .wrapper = mlx4_GEN_QP_wrapper
1408         },
1409         {
1410                 .opcode = MLX4_CMD_SQD2SQD_QP,
1411                 .has_inbox = true,
1412                 .has_outbox = false,
1413                 .out_is_imm = false,
1414                 .encode_slave_id = false,
1415                 .verify = NULL,
1416                 .wrapper = mlx4_SQD2SQD_QP_wrapper
1417         },
1418         {
1419                 .opcode = MLX4_CMD_SQD2RTS_QP,
1420                 .has_inbox = true,
1421                 .has_outbox = false,
1422                 .out_is_imm = false,
1423                 .encode_slave_id = false,
1424                 .verify = NULL,
1425                 .wrapper = mlx4_SQD2RTS_QP_wrapper
1426         },
1427         {
1428                 .opcode = MLX4_CMD_2RST_QP,
1429                 .has_inbox = false,
1430                 .has_outbox = false,
1431                 .out_is_imm = false,
1432                 .encode_slave_id = false,
1433                 .verify = NULL,
1434                 .wrapper = mlx4_2RST_QP_wrapper
1435         },
1436         {
1437                 .opcode = MLX4_CMD_QUERY_QP,
1438                 .has_inbox = false,
1439                 .has_outbox = true,
1440                 .out_is_imm = false,
1441                 .encode_slave_id = false,
1442                 .verify = NULL,
1443                 .wrapper = mlx4_GEN_QP_wrapper
1444         },
1445         {
1446                 .opcode = MLX4_CMD_SUSPEND_QP,
1447                 .has_inbox = false,
1448                 .has_outbox = false,
1449                 .out_is_imm = false,
1450                 .encode_slave_id = false,
1451                 .verify = NULL,
1452                 .wrapper = mlx4_GEN_QP_wrapper
1453         },
1454         {
1455                 .opcode = MLX4_CMD_UNSUSPEND_QP,
1456                 .has_inbox = false,
1457                 .has_outbox = false,
1458                 .out_is_imm = false,
1459                 .encode_slave_id = false,
1460                 .verify = NULL,
1461                 .wrapper = mlx4_GEN_QP_wrapper
1462         },
1463         {
1464                 .opcode = MLX4_CMD_UPDATE_QP,
1465                 .has_inbox = true,
1466                 .has_outbox = false,
1467                 .out_is_imm = false,
1468                 .encode_slave_id = false,
1469                 .verify = NULL,
1470                 .wrapper = mlx4_UPDATE_QP_wrapper
1471         },
1472         {
1473                 .opcode = MLX4_CMD_GET_OP_REQ,
1474                 .has_inbox = false,
1475                 .has_outbox = false,
1476                 .out_is_imm = false,
1477                 .encode_slave_id = false,
1478                 .verify = NULL,
1479                 .wrapper = mlx4_CMD_EPERM_wrapper,
1480         },
1481         {
1482                 .opcode = MLX4_CMD_ALLOCATE_VPP,
1483                 .has_inbox = false,
1484                 .has_outbox = true,
1485                 .out_is_imm = false,
1486                 .encode_slave_id = false,
1487                 .verify = NULL,
1488                 .wrapper = mlx4_CMD_EPERM_wrapper,
1489         },
1490         {
1491                 .opcode = MLX4_CMD_SET_VPORT_QOS,
1492                 .has_inbox = false,
1493                 .has_outbox = true,
1494                 .out_is_imm = false,
1495                 .encode_slave_id = false,
1496                 .verify = NULL,
1497                 .wrapper = mlx4_CMD_EPERM_wrapper,
1498         },
1499         {
1500                 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1501                 .has_inbox = false,
1502                 .has_outbox = false,
1503                 .out_is_imm = false,
1504                 .encode_slave_id = false,
1505                 .verify = NULL, /* XXX verify: only demux can do this */
1506                 .wrapper = NULL
1507         },
1508         {
1509                 .opcode = MLX4_CMD_MAD_IFC,
1510                 .has_inbox = true,
1511                 .has_outbox = true,
1512                 .out_is_imm = false,
1513                 .encode_slave_id = false,
1514                 .verify = NULL,
1515                 .wrapper = mlx4_MAD_IFC_wrapper
1516         },
1517         {
1518                 .opcode = MLX4_CMD_MAD_DEMUX,
1519                 .has_inbox = false,
1520                 .has_outbox = false,
1521                 .out_is_imm = false,
1522                 .encode_slave_id = false,
1523                 .verify = NULL,
1524                 .wrapper = mlx4_CMD_EPERM_wrapper
1525         },
1526         {
1527                 .opcode = MLX4_CMD_QUERY_IF_STAT,
1528                 .has_inbox = false,
1529                 .has_outbox = true,
1530                 .out_is_imm = false,
1531                 .encode_slave_id = false,
1532                 .verify = NULL,
1533                 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1534         },
1535         {
1536                 .opcode = MLX4_CMD_ACCESS_REG,
1537                 .has_inbox = true,
1538                 .has_outbox = true,
1539                 .out_is_imm = false,
1540                 .encode_slave_id = false,
1541                 .verify = NULL,
1542                 .wrapper = mlx4_ACCESS_REG_wrapper,
1543         },
1544         {
1545                 .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1546                 .has_inbox = false,
1547                 .has_outbox = false,
1548                 .out_is_imm = false,
1549                 .encode_slave_id = false,
1550                 .verify = NULL,
1551                 .wrapper = mlx4_CMD_EPERM_wrapper,
1552         },
1553         /* Native multicast commands are not available for guests */
1554         {
1555                 .opcode = MLX4_CMD_QP_ATTACH,
1556                 .has_inbox = true,
1557                 .has_outbox = false,
1558                 .out_is_imm = false,
1559                 .encode_slave_id = false,
1560                 .verify = NULL,
1561                 .wrapper = mlx4_QP_ATTACH_wrapper
1562         },
1563         {
1564                 .opcode = MLX4_CMD_PROMISC,
1565                 .has_inbox = false,
1566                 .has_outbox = false,
1567                 .out_is_imm = false,
1568                 .encode_slave_id = false,
1569                 .verify = NULL,
1570                 .wrapper = mlx4_PROMISC_wrapper
1571         },
1572         /* Ethernet specific commands */
1573         {
1574                 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1575                 .has_inbox = true,
1576                 .has_outbox = false,
1577                 .out_is_imm = false,
1578                 .encode_slave_id = false,
1579                 .verify = NULL,
1580                 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1581         },
1582         {
1583                 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1584                 .has_inbox = false,
1585                 .has_outbox = false,
1586                 .out_is_imm = false,
1587                 .encode_slave_id = false,
1588                 .verify = NULL,
1589                 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1590         },
1591         {
1592                 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1593                 .has_inbox = false,
1594                 .has_outbox = true,
1595                 .out_is_imm = false,
1596                 .encode_slave_id = false,
1597                 .verify = NULL,
1598                 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1599         },
1600         {
1601                 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1602                 .has_inbox = false,
1603                 .has_outbox = false,
1604                 .out_is_imm = false,
1605                 .encode_slave_id = false,
1606                 .verify = NULL,
1607                 .wrapper = NULL
1608         },
1609         /* flow steering commands */
1610         {
1611                 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1612                 .has_inbox = true,
1613                 .has_outbox = false,
1614                 .out_is_imm = true,
1615                 .encode_slave_id = false,
1616                 .verify = NULL,
1617                 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1618         },
1619         {
1620                 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1621                 .has_inbox = false,
1622                 .has_outbox = false,
1623                 .out_is_imm = false,
1624                 .encode_slave_id = false,
1625                 .verify = NULL,
1626                 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1627         },
1628         {
1629                 .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1630                 .has_inbox = false,
1631                 .has_outbox = false,
1632                 .out_is_imm = false,
1633                 .encode_slave_id = false,
1634                 .verify = NULL,
1635                 .wrapper = mlx4_CMD_EPERM_wrapper
1636         },
1637         {
1638                 .opcode = MLX4_CMD_VIRT_PORT_MAP,
1639                 .has_inbox = false,
1640                 .has_outbox = false,
1641                 .out_is_imm = false,
1642                 .encode_slave_id = false,
1643                 .verify = NULL,
1644                 .wrapper = mlx4_CMD_EPERM_wrapper
1645         },
1646 };
1647
1648 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1649                                     struct mlx4_vhcr_cmd *in_vhcr)
1650 {
1651         struct mlx4_priv *priv = mlx4_priv(dev);
1652         struct mlx4_cmd_info *cmd = NULL;
1653         struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1654         struct mlx4_vhcr *vhcr;
1655         struct mlx4_cmd_mailbox *inbox = NULL;
1656         struct mlx4_cmd_mailbox *outbox = NULL;
1657         u64 in_param;
1658         u64 out_param;
1659         int ret = 0;
1660         int i;
1661         int err = 0;
1662
1663         /* Create sw representation of Virtual HCR */
1664         vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1665         if (!vhcr)
1666                 return -ENOMEM;
1667
1668         /* DMA in the vHCR */
1669         if (!in_vhcr) {
1670                 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1671                                       priv->mfunc.master.slave_state[slave].vhcr_dma,
1672                                       ALIGN(sizeof(struct mlx4_vhcr_cmd),
1673                                             MLX4_ACCESS_MEM_ALIGN), 1);
1674                 if (ret) {
1675                         if (!(dev->persist->state &
1676                             MLX4_DEVICE_STATE_INTERNAL_ERROR))
1677                                 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1678                                          __func__, ret);
1679                         kfree(vhcr);
1680                         return ret;
1681                 }
1682         }
1683
1684         /* Fill SW VHCR fields */
1685         vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1686         vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1687         vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1688         vhcr->token = be16_to_cpu(vhcr_cmd->token);
1689         vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1690         vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1691         vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1692
1693         /* Lookup command */
1694         for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1695                 if (vhcr->op == cmd_info[i].opcode) {
1696                         cmd = &cmd_info[i];
1697                         break;
1698                 }
1699         }
1700         if (!cmd) {
1701                 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1702                          vhcr->op, slave);
1703                 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1704                 goto out_status;
1705         }
1706
1707         /* Read inbox */
1708         if (cmd->has_inbox) {
1709                 vhcr->in_param &= INBOX_MASK;
1710                 inbox = mlx4_alloc_cmd_mailbox(dev);
1711                 if (IS_ERR(inbox)) {
1712                         vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1713                         inbox = NULL;
1714                         goto out_status;
1715                 }
1716
1717                 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1718                                       vhcr->in_param,
1719                                       MLX4_MAILBOX_SIZE, 1);
1720                 if (ret) {
1721                         if (!(dev->persist->state &
1722                             MLX4_DEVICE_STATE_INTERNAL_ERROR))
1723                                 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1724                                          __func__, cmd->opcode);
1725                         vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1726                         goto out_status;
1727                 }
1728         }
1729
1730         /* Apply permission and bound checks if applicable */
1731         if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1732                 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1733                           vhcr->op, slave, vhcr->in_modifier);
1734                 vhcr_cmd->status = CMD_STAT_BAD_OP;
1735                 goto out_status;
1736         }
1737
1738         /* Allocate outbox */
1739         if (cmd->has_outbox) {
1740                 outbox = mlx4_alloc_cmd_mailbox(dev);
1741                 if (IS_ERR(outbox)) {
1742                         vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1743                         outbox = NULL;
1744                         goto out_status;
1745                 }
1746         }
1747
1748         /* Execute the command! */
1749         if (cmd->wrapper) {
1750                 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1751                                    cmd);
1752                 if (cmd->out_is_imm)
1753                         vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1754         } else {
1755                 in_param = cmd->has_inbox ? (u64) inbox->dma :
1756                         vhcr->in_param;
1757                 out_param = cmd->has_outbox ? (u64) outbox->dma :
1758                         vhcr->out_param;
1759                 err = __mlx4_cmd(dev, in_param, &out_param,
1760                                  cmd->out_is_imm, vhcr->in_modifier,
1761                                  vhcr->op_modifier, vhcr->op,
1762                                  MLX4_CMD_TIME_CLASS_A,
1763                                  MLX4_CMD_NATIVE);
1764
1765                 if (cmd->out_is_imm) {
1766                         vhcr->out_param = out_param;
1767                         vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1768                 }
1769         }
1770
1771         if (err) {
1772                 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1773                         mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1774                                   vhcr->op, slave, vhcr->errno, err);
1775                 vhcr_cmd->status = mlx4_errno_to_status(err);
1776                 goto out_status;
1777         }
1778
1779
1780         /* Write outbox if command completed successfully */
1781         if (cmd->has_outbox && !vhcr_cmd->status) {
1782                 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1783                                       vhcr->out_param,
1784                                       MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1785                 if (ret) {
1786                         /* If we failed to write back the outbox after the
1787                          *command was successfully executed, we must fail this
1788                          * slave, as it is now in undefined state */
1789                         if (!(dev->persist->state &
1790                             MLX4_DEVICE_STATE_INTERNAL_ERROR))
1791                                 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1792                         goto out;
1793                 }
1794         }
1795
1796 out_status:
1797         /* DMA back vhcr result */
1798         if (!in_vhcr) {
1799                 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1800                                       priv->mfunc.master.slave_state[slave].vhcr_dma,
1801                                       ALIGN(sizeof(struct mlx4_vhcr),
1802                                             MLX4_ACCESS_MEM_ALIGN),
1803                                       MLX4_CMD_WRAPPED);
1804                 if (ret)
1805                         mlx4_err(dev, "%s:Failed writing vhcr result\n",
1806                                  __func__);
1807                 else if (vhcr->e_bit &&
1808                          mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1809                                 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1810                                           slave);
1811         }
1812
1813 out:
1814         kfree(vhcr);
1815         mlx4_free_cmd_mailbox(dev, inbox);
1816         mlx4_free_cmd_mailbox(dev, outbox);
1817         return ret;
1818 }
1819
1820 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1821                                             int slave, int port)
1822 {
1823         struct mlx4_vport_oper_state *vp_oper;
1824         struct mlx4_vport_state *vp_admin;
1825         struct mlx4_vf_immed_vlan_work *work;
1826         struct mlx4_dev *dev = &(priv->dev);
1827         int err;
1828         int admin_vlan_ix = NO_INDX;
1829
1830         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1831         vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1832
1833         if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1834             vp_oper->state.default_qos == vp_admin->default_qos &&
1835             vp_oper->state.link_state == vp_admin->link_state &&
1836             vp_oper->state.qos_vport == vp_admin->qos_vport)
1837                 return 0;
1838
1839         if (!(priv->mfunc.master.slave_state[slave].active &&
1840               dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1841                 /* even if the UPDATE_QP command isn't supported, we still want
1842                  * to set this VF link according to the admin directive
1843                  */
1844                 vp_oper->state.link_state = vp_admin->link_state;
1845                 return -1;
1846         }
1847
1848         mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1849                  slave, port);
1850         mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1851                  vp_admin->default_vlan, vp_admin->default_qos,
1852                  vp_admin->link_state);
1853
1854         work = kzalloc(sizeof(*work), GFP_KERNEL);
1855         if (!work)
1856                 return -ENOMEM;
1857
1858         if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1859                 if (MLX4_VGT != vp_admin->default_vlan) {
1860                         err = __mlx4_register_vlan(&priv->dev, port,
1861                                                    vp_admin->default_vlan,
1862                                                    &admin_vlan_ix);
1863                         if (err) {
1864                                 kfree(work);
1865                                 mlx4_warn(&priv->dev,
1866                                           "No vlan resources slave %d, port %d\n",
1867                                           slave, port);
1868                                 return err;
1869                         }
1870                 } else {
1871                         admin_vlan_ix = NO_INDX;
1872                 }
1873                 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1874                 mlx4_dbg(&priv->dev,
1875                          "alloc vlan %d idx  %d slave %d port %d\n",
1876                          (int)(vp_admin->default_vlan),
1877                          admin_vlan_ix, slave, port);
1878         }
1879
1880         /* save original vlan ix and vlan id */
1881         work->orig_vlan_id = vp_oper->state.default_vlan;
1882         work->orig_vlan_ix = vp_oper->vlan_idx;
1883
1884         /* handle new qos */
1885         if (vp_oper->state.default_qos != vp_admin->default_qos)
1886                 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1887
1888         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1889                 vp_oper->vlan_idx = admin_vlan_ix;
1890
1891         vp_oper->state.default_vlan = vp_admin->default_vlan;
1892         vp_oper->state.default_qos = vp_admin->default_qos;
1893         vp_oper->state.link_state = vp_admin->link_state;
1894         vp_oper->state.qos_vport = vp_admin->qos_vport;
1895
1896         if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1897                 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1898
1899         /* iterate over QPs owned by this slave, using UPDATE_QP */
1900         work->port = port;
1901         work->slave = slave;
1902         work->qos = vp_oper->state.default_qos;
1903         work->qos_vport = vp_oper->state.qos_vport;
1904         work->vlan_id = vp_oper->state.default_vlan;
1905         work->vlan_ix = vp_oper->vlan_idx;
1906         work->priv = priv;
1907         INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1908         queue_work(priv->mfunc.master.comm_wq, &work->work);
1909
1910         return 0;
1911 }
1912
1913 static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1914 {
1915         struct mlx4_qos_manager *port_qos_ctl;
1916         struct mlx4_priv *priv = mlx4_priv(dev);
1917
1918         port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1919         bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1920
1921         /* Enable only default prio at PF init routine */
1922         set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1923 }
1924
1925 static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1926 {
1927         int i;
1928         int err;
1929         int num_vfs;
1930         u16 availible_vpp;
1931         u8 vpp_param[MLX4_NUM_UP];
1932         struct mlx4_qos_manager *port_qos;
1933         struct mlx4_priv *priv = mlx4_priv(dev);
1934
1935         err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1936         if (err) {
1937                 mlx4_info(dev, "Failed query availible VPPs\n");
1938                 return;
1939         }
1940
1941         port_qos = &priv->mfunc.master.qos_ctl[port];
1942         num_vfs = (availible_vpp /
1943                    bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1944
1945         for (i = 0; i < MLX4_NUM_UP; i++) {
1946                 if (test_bit(i, port_qos->priority_bm))
1947                         vpp_param[i] = num_vfs;
1948         }
1949
1950         err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1951         if (err) {
1952                 mlx4_info(dev, "Failed allocating VPPs\n");
1953                 return;
1954         }
1955
1956         /* Query actual allocated VPP, just to make sure */
1957         err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1958         if (err) {
1959                 mlx4_info(dev, "Failed query availible VPPs\n");
1960                 return;
1961         }
1962
1963         port_qos->num_of_qos_vfs = num_vfs;
1964         mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1965
1966         for (i = 0; i < MLX4_NUM_UP; i++)
1967                 mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1968                          vpp_param[i]);
1969 }
1970
1971 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1972 {
1973         int port, err;
1974         struct mlx4_vport_state *vp_admin;
1975         struct mlx4_vport_oper_state *vp_oper;
1976         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1977                         &priv->dev, slave);
1978         int min_port = find_first_bit(actv_ports.ports,
1979                                       priv->dev.caps.num_ports) + 1;
1980         int max_port = min_port - 1 +
1981                 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1982
1983         for (port = min_port; port <= max_port; port++) {
1984                 if (!test_bit(port - 1, actv_ports.ports))
1985                         continue;
1986                 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1987                         priv->mfunc.master.vf_admin[slave].enable_smi[port];
1988                 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1989                 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1990                 vp_oper->state = *vp_admin;
1991                 if (MLX4_VGT != vp_admin->default_vlan) {
1992                         err = __mlx4_register_vlan(&priv->dev, port,
1993                                                    vp_admin->default_vlan, &(vp_oper->vlan_idx));
1994                         if (err) {
1995                                 vp_oper->vlan_idx = NO_INDX;
1996                                 mlx4_warn(&priv->dev,
1997                                           "No vlan resources slave %d, port %d\n",
1998                                           slave, port);
1999                                 return err;
2000                         }
2001                         mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2002                                  (int)(vp_oper->state.default_vlan),
2003                                  vp_oper->vlan_idx, slave, port);
2004                 }
2005                 if (vp_admin->spoofchk) {
2006                         vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2007                                                                port,
2008                                                                vp_admin->mac);
2009                         if (0 > vp_oper->mac_idx) {
2010                                 err = vp_oper->mac_idx;
2011                                 vp_oper->mac_idx = NO_INDX;
2012                                 mlx4_warn(&priv->dev,
2013                                           "No mac resources slave %d, port %d\n",
2014                                           slave, port);
2015                                 return err;
2016                         }
2017                         mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2018                                  vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2019                 }
2020         }
2021         return 0;
2022 }
2023
2024 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2025 {
2026         int port;
2027         struct mlx4_vport_oper_state *vp_oper;
2028         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2029                         &priv->dev, slave);
2030         int min_port = find_first_bit(actv_ports.ports,
2031                                       priv->dev.caps.num_ports) + 1;
2032         int max_port = min_port - 1 +
2033                 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2034
2035
2036         for (port = min_port; port <= max_port; port++) {
2037                 if (!test_bit(port - 1, actv_ports.ports))
2038                         continue;
2039                 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2040                         MLX4_VF_SMI_DISABLED;
2041                 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2042                 if (NO_INDX != vp_oper->vlan_idx) {
2043                         __mlx4_unregister_vlan(&priv->dev,
2044                                                port, vp_oper->state.default_vlan);
2045                         vp_oper->vlan_idx = NO_INDX;
2046                 }
2047                 if (NO_INDX != vp_oper->mac_idx) {
2048                         __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2049                         vp_oper->mac_idx = NO_INDX;
2050                 }
2051         }
2052         return;
2053 }
2054
2055 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2056                                u16 param, u8 toggle)
2057 {
2058         struct mlx4_priv *priv = mlx4_priv(dev);
2059         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2060         u32 reply;
2061         u8 is_going_down = 0;
2062         int i;
2063         unsigned long flags;
2064
2065         slave_state[slave].comm_toggle ^= 1;
2066         reply = (u32) slave_state[slave].comm_toggle << 31;
2067         if (toggle != slave_state[slave].comm_toggle) {
2068                 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2069                           toggle, slave);
2070                 goto reset_slave;
2071         }
2072         if (cmd == MLX4_COMM_CMD_RESET) {
2073                 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2074                 slave_state[slave].active = false;
2075                 slave_state[slave].old_vlan_api = false;
2076                 mlx4_master_deactivate_admin_state(priv, slave);
2077                 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2078                                 slave_state[slave].event_eq[i].eqn = -1;
2079                                 slave_state[slave].event_eq[i].token = 0;
2080                 }
2081                 /*check if we are in the middle of FLR process,
2082                 if so return "retry" status to the slave*/
2083                 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2084                         goto inform_slave_state;
2085
2086                 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2087
2088                 /* write the version in the event field */
2089                 reply |= mlx4_comm_get_version();
2090
2091                 goto reset_slave;
2092         }
2093         /*command from slave in the middle of FLR*/
2094         if (cmd != MLX4_COMM_CMD_RESET &&
2095             MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2096                 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2097                           slave, cmd);
2098                 return;
2099         }
2100
2101         switch (cmd) {
2102         case MLX4_COMM_CMD_VHCR0:
2103                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2104                         goto reset_slave;
2105                 slave_state[slave].vhcr_dma = ((u64) param) << 48;
2106                 priv->mfunc.master.slave_state[slave].cookie = 0;
2107                 break;
2108         case MLX4_COMM_CMD_VHCR1:
2109                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2110                         goto reset_slave;
2111                 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2112                 break;
2113         case MLX4_COMM_CMD_VHCR2:
2114                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2115                         goto reset_slave;
2116                 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2117                 break;
2118         case MLX4_COMM_CMD_VHCR_EN:
2119                 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2120                         goto reset_slave;
2121                 slave_state[slave].vhcr_dma |= param;
2122                 if (mlx4_master_activate_admin_state(priv, slave))
2123                                 goto reset_slave;
2124                 slave_state[slave].active = true;
2125                 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2126                 break;
2127         case MLX4_COMM_CMD_VHCR_POST:
2128                 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2129                     (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2130                         mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2131                                   slave, cmd, slave_state[slave].last_cmd);
2132                         goto reset_slave;
2133                 }
2134
2135                 mutex_lock(&priv->cmd.slave_cmd_mutex);
2136                 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2137                         mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2138                                  slave);
2139                         mutex_unlock(&priv->cmd.slave_cmd_mutex);
2140                         goto reset_slave;
2141                 }
2142                 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2143                 break;
2144         default:
2145                 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2146                 goto reset_slave;
2147         }
2148         spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2149         if (!slave_state[slave].is_slave_going_down)
2150                 slave_state[slave].last_cmd = cmd;
2151         else
2152                 is_going_down = 1;
2153         spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2154         if (is_going_down) {
2155                 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2156                           cmd, slave);
2157                 return;
2158         }
2159         __raw_writel((__force u32) cpu_to_be32(reply),
2160                      &priv->mfunc.comm[slave].slave_read);
2161         mmiowb();
2162
2163         return;
2164
2165 reset_slave:
2166         /* cleanup any slave resources */
2167         if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2168                 mlx4_delete_all_resources_for_slave(dev, slave);
2169
2170         if (cmd != MLX4_COMM_CMD_RESET) {
2171                 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2172                           slave, cmd);
2173                 /* Turn on internal error letting slave reset itself immeditaly,
2174                  * otherwise it might take till timeout on command is passed
2175                  */
2176                 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2177         }
2178
2179         spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2180         if (!slave_state[slave].is_slave_going_down)
2181                 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2182         spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2183         /*with slave in the middle of flr, no need to clean resources again.*/
2184 inform_slave_state:
2185         memset(&slave_state[slave].event_eq, 0,
2186                sizeof(struct mlx4_slave_event_eq_info));
2187         __raw_writel((__force u32) cpu_to_be32(reply),
2188                      &priv->mfunc.comm[slave].slave_read);
2189         wmb();
2190 }
2191
2192 /* master command processing */
2193 void mlx4_master_comm_channel(struct work_struct *work)
2194 {
2195         struct mlx4_mfunc_master_ctx *master =
2196                 container_of(work,
2197                              struct mlx4_mfunc_master_ctx,
2198                              comm_work);
2199         struct mlx4_mfunc *mfunc =
2200                 container_of(master, struct mlx4_mfunc, master);
2201         struct mlx4_priv *priv =
2202                 container_of(mfunc, struct mlx4_priv, mfunc);
2203         struct mlx4_dev *dev = &priv->dev;
2204         __be32 *bit_vec;
2205         u32 comm_cmd;
2206         u32 vec;
2207         int i, j, slave;
2208         int toggle;
2209         int served = 0;
2210         int reported = 0;
2211         u32 slt;
2212
2213         bit_vec = master->comm_arm_bit_vector;
2214         for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2215                 vec = be32_to_cpu(bit_vec[i]);
2216                 for (j = 0; j < 32; j++) {
2217                         if (!(vec & (1 << j)))
2218                                 continue;
2219                         ++reported;
2220                         slave = (i * 32) + j;
2221                         comm_cmd = swab32(readl(
2222                                           &mfunc->comm[slave].slave_write));
2223                         slt = swab32(readl(&mfunc->comm[slave].slave_read))
2224                                      >> 31;
2225                         toggle = comm_cmd >> 31;
2226                         if (toggle != slt) {
2227                                 if (master->slave_state[slave].comm_toggle
2228                                     != slt) {
2229                                         pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2230                                                 slave, slt,
2231                                                 master->slave_state[slave].comm_toggle);
2232                                         master->slave_state[slave].comm_toggle =
2233                                                 slt;
2234                                 }
2235                                 mlx4_master_do_cmd(dev, slave,
2236                                                    comm_cmd >> 16 & 0xff,
2237                                                    comm_cmd & 0xffff, toggle);
2238                                 ++served;
2239                         }
2240                 }
2241         }
2242
2243         if (reported && reported != served)
2244                 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2245                           reported, served);
2246
2247         if (mlx4_ARM_COMM_CHANNEL(dev))
2248                 mlx4_warn(dev, "Failed to arm comm channel events\n");
2249 }
2250
2251 static int sync_toggles(struct mlx4_dev *dev)
2252 {
2253         struct mlx4_priv *priv = mlx4_priv(dev);
2254         u32 wr_toggle;
2255         u32 rd_toggle;
2256         unsigned long end;
2257
2258         wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2259         if (wr_toggle == 0xffffffff)
2260                 end = jiffies + msecs_to_jiffies(30000);
2261         else
2262                 end = jiffies + msecs_to_jiffies(5000);
2263
2264         while (time_before(jiffies, end)) {
2265                 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2266                 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2267                         /* PCI might be offline */
2268                         msleep(100);
2269                         wr_toggle = swab32(readl(&priv->mfunc.comm->
2270                                            slave_write));
2271                         continue;
2272                 }
2273
2274                 if (rd_toggle >> 31 == wr_toggle >> 31) {
2275                         priv->cmd.comm_toggle = rd_toggle >> 31;
2276                         return 0;
2277                 }
2278
2279                 cond_resched();
2280         }
2281
2282         /*
2283          * we could reach here if for example the previous VM using this
2284          * function misbehaved and left the channel with unsynced state. We
2285          * should fix this here and give this VM a chance to use a properly
2286          * synced channel
2287          */
2288         mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2289         __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2290         __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2291         priv->cmd.comm_toggle = 0;
2292
2293         return 0;
2294 }
2295
2296 int mlx4_multi_func_init(struct mlx4_dev *dev)
2297 {
2298         struct mlx4_priv *priv = mlx4_priv(dev);
2299         struct mlx4_slave_state *s_state;
2300         int i, j, err, port;
2301
2302         if (mlx4_is_master(dev))
2303                 priv->mfunc.comm =
2304                 ioremap(pci_resource_start(dev->persist->pdev,
2305                                            priv->fw.comm_bar) +
2306                         priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2307         else
2308                 priv->mfunc.comm =
2309                 ioremap(pci_resource_start(dev->persist->pdev, 2) +
2310                         MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2311         if (!priv->mfunc.comm) {
2312                 mlx4_err(dev, "Couldn't map communication vector\n");
2313                 goto err_vhcr;
2314         }
2315
2316         if (mlx4_is_master(dev)) {
2317                 struct mlx4_vf_oper_state *vf_oper;
2318                 struct mlx4_vf_admin_state *vf_admin;
2319
2320                 priv->mfunc.master.slave_state =
2321                         kzalloc(dev->num_slaves *
2322                                 sizeof(struct mlx4_slave_state), GFP_KERNEL);
2323                 if (!priv->mfunc.master.slave_state)
2324                         goto err_comm;
2325
2326                 priv->mfunc.master.vf_admin =
2327                         kzalloc(dev->num_slaves *
2328                                 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2329                 if (!priv->mfunc.master.vf_admin)
2330                         goto err_comm_admin;
2331
2332                 priv->mfunc.master.vf_oper =
2333                         kzalloc(dev->num_slaves *
2334                                 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2335                 if (!priv->mfunc.master.vf_oper)
2336                         goto err_comm_oper;
2337
2338                 for (i = 0; i < dev->num_slaves; ++i) {
2339                         vf_admin = &priv->mfunc.master.vf_admin[i];
2340                         vf_oper = &priv->mfunc.master.vf_oper[i];
2341                         s_state = &priv->mfunc.master.slave_state[i];
2342                         s_state->last_cmd = MLX4_COMM_CMD_RESET;
2343                         mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2344                         for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2345                                 s_state->event_eq[j].eqn = -1;
2346                         __raw_writel((__force u32) 0,
2347                                      &priv->mfunc.comm[i].slave_write);
2348                         __raw_writel((__force u32) 0,
2349                                      &priv->mfunc.comm[i].slave_read);
2350                         mmiowb();
2351                         for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2352                                 struct mlx4_vport_state *admin_vport;
2353                                 struct mlx4_vport_state *oper_vport;
2354
2355                                 s_state->vlan_filter[port] =
2356                                         kzalloc(sizeof(struct mlx4_vlan_fltr),
2357                                                 GFP_KERNEL);
2358                                 if (!s_state->vlan_filter[port]) {
2359                                         if (--port)
2360                                                 kfree(s_state->vlan_filter[port]);
2361                                         goto err_slaves;
2362                                 }
2363
2364                                 admin_vport = &vf_admin->vport[port];
2365                                 oper_vport = &vf_oper->vport[port].state;
2366                                 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2367                                 admin_vport->default_vlan = MLX4_VGT;
2368                                 oper_vport->default_vlan = MLX4_VGT;
2369                                 admin_vport->qos_vport =
2370                                                 MLX4_VPP_DEFAULT_VPORT;
2371                                 oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2372                                 vf_oper->vport[port].vlan_idx = NO_INDX;
2373                                 vf_oper->vport[port].mac_idx = NO_INDX;
2374                                 mlx4_set_random_admin_guid(dev, i, port);
2375                         }
2376                         spin_lock_init(&s_state->lock);
2377                 }
2378
2379                 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2380                         for (port = 1; port <= dev->caps.num_ports; port++) {
2381                                 if (mlx4_is_eth(dev, port)) {
2382                                         mlx4_set_default_port_qos(dev, port);
2383                                         mlx4_allocate_port_vpps(dev, port);
2384                                 }
2385                         }
2386                 }
2387
2388                 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2389                 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2390                 INIT_WORK(&priv->mfunc.master.comm_work,
2391                           mlx4_master_comm_channel);
2392                 INIT_WORK(&priv->mfunc.master.slave_event_work,
2393                           mlx4_gen_slave_eqe);
2394                 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2395                           mlx4_master_handle_slave_flr);
2396                 spin_lock_init(&priv->mfunc.master.slave_state_lock);
2397                 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2398                 priv->mfunc.master.comm_wq =
2399                         create_singlethread_workqueue("mlx4_comm");
2400                 if (!priv->mfunc.master.comm_wq)
2401                         goto err_slaves;
2402
2403                 if (mlx4_init_resource_tracker(dev))
2404                         goto err_thread;
2405
2406         } else {
2407                 err = sync_toggles(dev);
2408                 if (err) {
2409                         mlx4_err(dev, "Couldn't sync toggles\n");
2410                         goto err_comm;
2411                 }
2412         }
2413         return 0;
2414
2415 err_thread:
2416         flush_workqueue(priv->mfunc.master.comm_wq);
2417         destroy_workqueue(priv->mfunc.master.comm_wq);
2418 err_slaves:
2419         while (--i) {
2420                 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2421                         kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2422         }
2423         kfree(priv->mfunc.master.vf_oper);
2424 err_comm_oper:
2425         kfree(priv->mfunc.master.vf_admin);
2426 err_comm_admin:
2427         kfree(priv->mfunc.master.slave_state);
2428 err_comm:
2429         iounmap(priv->mfunc.comm);
2430 err_vhcr:
2431         dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2432                           priv->mfunc.vhcr,
2433                           priv->mfunc.vhcr_dma);
2434         priv->mfunc.vhcr = NULL;
2435         return -ENOMEM;
2436 }
2437
2438 int mlx4_cmd_init(struct mlx4_dev *dev)
2439 {
2440         struct mlx4_priv *priv = mlx4_priv(dev);
2441         int flags = 0;
2442
2443         if (!priv->cmd.initialized) {
2444                 mutex_init(&priv->cmd.slave_cmd_mutex);
2445                 sema_init(&priv->cmd.poll_sem, 1);
2446                 priv->cmd.use_events = 0;
2447                 priv->cmd.toggle     = 1;
2448                 priv->cmd.initialized = 1;
2449                 flags |= MLX4_CMD_CLEANUP_STRUCT;
2450         }
2451
2452         if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2453                 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2454                                         0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2455                 if (!priv->cmd.hcr) {
2456                         mlx4_err(dev, "Couldn't map command register\n");
2457                         goto err;
2458                 }
2459                 flags |= MLX4_CMD_CLEANUP_HCR;
2460         }
2461
2462         if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2463                 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2464                                                       PAGE_SIZE,
2465                                                       &priv->mfunc.vhcr_dma,
2466                                                       GFP_KERNEL);
2467                 if (!priv->mfunc.vhcr)
2468                         goto err;
2469
2470                 flags |= MLX4_CMD_CLEANUP_VHCR;
2471         }
2472
2473         if (!priv->cmd.pool) {
2474                 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2475                                                  dev->persist->pdev,
2476                                                  MLX4_MAILBOX_SIZE,
2477                                                  MLX4_MAILBOX_SIZE, 0);
2478                 if (!priv->cmd.pool)
2479                         goto err;
2480
2481                 flags |= MLX4_CMD_CLEANUP_POOL;
2482         }
2483
2484         return 0;
2485
2486 err:
2487         mlx4_cmd_cleanup(dev, flags);
2488         return -ENOMEM;
2489 }
2490
2491 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2492 {
2493         struct mlx4_priv *priv = mlx4_priv(dev);
2494         int slave;
2495         u32 slave_read;
2496
2497         /* Report an internal error event to all
2498          * communication channels.
2499          */
2500         for (slave = 0; slave < dev->num_slaves; slave++) {
2501                 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2502                 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2503                 __raw_writel((__force u32)cpu_to_be32(slave_read),
2504                              &priv->mfunc.comm[slave].slave_read);
2505                 /* Make sure that our comm channel write doesn't
2506                  * get mixed in with writes from another CPU.
2507                  */
2508                 mmiowb();
2509         }
2510 }
2511
2512 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2513 {
2514         struct mlx4_priv *priv = mlx4_priv(dev);
2515         int i, port;
2516
2517         if (mlx4_is_master(dev)) {
2518                 flush_workqueue(priv->mfunc.master.comm_wq);
2519                 destroy_workqueue(priv->mfunc.master.comm_wq);
2520                 for (i = 0; i < dev->num_slaves; i++) {
2521                         for (port = 1; port <= MLX4_MAX_PORTS; port++)
2522                                 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2523                 }
2524                 kfree(priv->mfunc.master.slave_state);
2525                 kfree(priv->mfunc.master.vf_admin);
2526                 kfree(priv->mfunc.master.vf_oper);
2527                 dev->num_slaves = 0;
2528         }
2529
2530         iounmap(priv->mfunc.comm);
2531 }
2532
2533 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2534 {
2535         struct mlx4_priv *priv = mlx4_priv(dev);
2536
2537         if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2538                 pci_pool_destroy(priv->cmd.pool);
2539                 priv->cmd.pool = NULL;
2540         }
2541
2542         if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2543             (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2544                 iounmap(priv->cmd.hcr);
2545                 priv->cmd.hcr = NULL;
2546         }
2547         if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2548             (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2549                 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2550                                   priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2551                 priv->mfunc.vhcr = NULL;
2552         }
2553         if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2554                 priv->cmd.initialized = 0;
2555 }
2556
2557 /*
2558  * Switch to using events to issue FW commands (can only be called
2559  * after event queue for command events has been initialized).
2560  */
2561 int mlx4_cmd_use_events(struct mlx4_dev *dev)
2562 {
2563         struct mlx4_priv *priv = mlx4_priv(dev);
2564         int i;
2565         int err = 0;
2566
2567         priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2568                                    sizeof (struct mlx4_cmd_context),
2569                                    GFP_KERNEL);
2570         if (!priv->cmd.context)
2571                 return -ENOMEM;
2572
2573         for (i = 0; i < priv->cmd.max_cmds; ++i) {
2574                 priv->cmd.context[i].token = i;
2575                 priv->cmd.context[i].next  = i + 1;
2576                 /* To support fatal error flow, initialize all
2577                  * cmd contexts to allow simulating completions
2578                  * with complete() at any time.
2579                  */
2580                 init_completion(&priv->cmd.context[i].done);
2581         }
2582
2583         priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2584         priv->cmd.free_head = 0;
2585
2586         sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2587         spin_lock_init(&priv->cmd.context_lock);
2588
2589         for (priv->cmd.token_mask = 1;
2590              priv->cmd.token_mask < priv->cmd.max_cmds;
2591              priv->cmd.token_mask <<= 1)
2592                 ; /* nothing */
2593         --priv->cmd.token_mask;
2594
2595         down(&priv->cmd.poll_sem);
2596         priv->cmd.use_events = 1;
2597
2598         return err;
2599 }
2600
2601 /*
2602  * Switch back to polling (used when shutting down the device)
2603  */
2604 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2605 {
2606         struct mlx4_priv *priv = mlx4_priv(dev);
2607         int i;
2608
2609         priv->cmd.use_events = 0;
2610
2611         for (i = 0; i < priv->cmd.max_cmds; ++i)
2612                 down(&priv->cmd.event_sem);
2613
2614         kfree(priv->cmd.context);
2615
2616         up(&priv->cmd.poll_sem);
2617 }
2618
2619 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2620 {
2621         struct mlx4_cmd_mailbox *mailbox;
2622
2623         mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2624         if (!mailbox)
2625                 return ERR_PTR(-ENOMEM);
2626
2627         mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2628                                       &mailbox->dma);
2629         if (!mailbox->buf) {
2630                 kfree(mailbox);
2631                 return ERR_PTR(-ENOMEM);
2632         }
2633
2634         memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2635
2636         return mailbox;
2637 }
2638 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2639
2640 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2641                            struct mlx4_cmd_mailbox *mailbox)
2642 {
2643         if (!mailbox)
2644                 return;
2645
2646         pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2647         kfree(mailbox);
2648 }
2649 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2650
2651 u32 mlx4_comm_get_version(void)
2652 {
2653          return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2654 }
2655
2656 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2657 {
2658         if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2659                 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2660                          vf, dev->persist->num_vfs);
2661                 return -EINVAL;
2662         }
2663
2664         return vf+1;
2665 }
2666
2667 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2668 {
2669         if (slave < 1 || slave > dev->persist->num_vfs) {
2670                 mlx4_err(dev,
2671                          "Bad slave number:%d (number of activated slaves: %lu)\n",
2672                          slave, dev->num_slaves);
2673                 return -EINVAL;
2674         }
2675         return slave - 1;
2676 }
2677
2678 void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2679 {
2680         struct mlx4_priv *priv = mlx4_priv(dev);
2681         struct mlx4_cmd_context *context;
2682         int i;
2683
2684         spin_lock(&priv->cmd.context_lock);
2685         if (priv->cmd.context) {
2686                 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2687                         context = &priv->cmd.context[i];
2688                         context->fw_status = CMD_STAT_INTERNAL_ERR;
2689                         context->result    =
2690                                 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2691                         complete(&context->done);
2692                 }
2693         }
2694         spin_unlock(&priv->cmd.context_lock);
2695 }
2696
2697 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2698 {
2699         struct mlx4_active_ports actv_ports;
2700         int vf;
2701
2702         bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2703
2704         if (slave == 0) {
2705                 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2706                 return actv_ports;
2707         }
2708
2709         vf = mlx4_get_vf_indx(dev, slave);
2710         if (vf < 0)
2711                 return actv_ports;
2712
2713         bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2714                    min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2715                    dev->caps.num_ports));
2716
2717         return actv_ports;
2718 }
2719 EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2720
2721 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2722 {
2723         unsigned n;
2724         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2725         unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2726
2727         if (port <= 0 || port > m)
2728                 return -EINVAL;
2729
2730         n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2731         if (port <= n)
2732                 port = n + 1;
2733
2734         return port;
2735 }
2736 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2737
2738 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2739 {
2740         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2741         if (test_bit(port - 1, actv_ports.ports))
2742                 return port -
2743                         find_first_bit(actv_ports.ports, dev->caps.num_ports);
2744
2745         return -1;
2746 }
2747 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2748
2749 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2750                                                    int port)
2751 {
2752         unsigned i;
2753         struct mlx4_slaves_pport slaves_pport;
2754
2755         bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2756
2757         if (port <= 0 || port > dev->caps.num_ports)
2758                 return slaves_pport;
2759
2760         for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2761                 struct mlx4_active_ports actv_ports =
2762                         mlx4_get_active_ports(dev, i);
2763                 if (test_bit(port - 1, actv_ports.ports))
2764                         set_bit(i, slaves_pport.slaves);
2765         }
2766
2767         return slaves_pport;
2768 }
2769 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2770
2771 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2772                 struct mlx4_dev *dev,
2773                 const struct mlx4_active_ports *crit_ports)
2774 {
2775         unsigned i;
2776         struct mlx4_slaves_pport slaves_pport;
2777
2778         bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2779
2780         for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2781                 struct mlx4_active_ports actv_ports =
2782                         mlx4_get_active_ports(dev, i);
2783                 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2784                                  dev->caps.num_ports))
2785                         set_bit(i, slaves_pport.slaves);
2786         }
2787
2788         return slaves_pport;
2789 }
2790 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2791
2792 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2793 {
2794         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2795         int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2796                         + 1;
2797         int max_port = min_port +
2798                 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2799
2800         if (port < min_port)
2801                 port = min_port;
2802         else if (port >= max_port)
2803                 port = max_port - 1;
2804
2805         return port;
2806 }
2807
2808 static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2809                               int max_tx_rate)
2810 {
2811         int i;
2812         int err;
2813         struct mlx4_qos_manager *port_qos;
2814         struct mlx4_dev *dev = &priv->dev;
2815         struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2816
2817         port_qos = &priv->mfunc.master.qos_ctl[port];
2818         memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2819
2820         if (slave > port_qos->num_of_qos_vfs) {
2821                 mlx4_info(dev, "No availible VPP resources for this VF\n");
2822                 return -EINVAL;
2823         }
2824
2825         /* Query for default QoS values from Vport 0 is needed */
2826         err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2827         if (err) {
2828                 mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2829                 return err;
2830         }
2831
2832         for (i = 0; i < MLX4_NUM_UP; i++) {
2833                 if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2834                         vpp_qos[i].max_avg_bw = max_tx_rate;
2835                         vpp_qos[i].enable = 1;
2836                 } else {
2837                         /* if user supplied tx_rate == 0, meaning no rate limit
2838                          * configuration is required. so we are leaving the
2839                          * value of max_avg_bw as queried from Vport 0.
2840                          */
2841                         vpp_qos[i].enable = 0;
2842                 }
2843         }
2844
2845         err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2846         if (err) {
2847                 mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2848                 return err;
2849         }
2850
2851         return 0;
2852 }
2853
2854 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2855                                         struct mlx4_vport_state *vf_admin)
2856 {
2857         struct mlx4_qos_manager *info;
2858         struct mlx4_priv *priv = mlx4_priv(dev);
2859
2860         if (!mlx4_is_master(dev) ||
2861             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2862                 return false;
2863
2864         info = &priv->mfunc.master.qos_ctl[port];
2865
2866         if (vf_admin->default_vlan != MLX4_VGT &&
2867             test_bit(vf_admin->default_qos, info->priority_bm))
2868                 return true;
2869
2870         return false;
2871 }
2872
2873 static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2874                                        struct mlx4_vport_state *vf_admin,
2875                                        int vlan, int qos)
2876 {
2877         struct mlx4_vport_state dummy_admin = {0};
2878
2879         if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2880             !vf_admin->tx_rate)
2881                 return true;
2882
2883         dummy_admin.default_qos = qos;
2884         dummy_admin.default_vlan = vlan;
2885
2886         /* VF wants to move to other VST state which is valid with current
2887          * rate limit. Either differnt default vlan in VST or other
2888          * supported QoS priority. Otherwise we don't allow this change when
2889          * the TX rate is still configured.
2890          */
2891         if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2892                 return true;
2893
2894         mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2895                   (vlan == MLX4_VGT) ? "VGT" : "VST");
2896
2897         if (vlan != MLX4_VGT)
2898                 mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2899
2900         mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2901
2902         return false;
2903 }
2904
2905 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2906 {
2907         struct mlx4_priv *priv = mlx4_priv(dev);
2908         struct mlx4_vport_state *s_info;
2909         int slave;
2910
2911         if (!mlx4_is_master(dev))
2912                 return -EPROTONOSUPPORT;
2913
2914         slave = mlx4_get_slave_indx(dev, vf);
2915         if (slave < 0)
2916                 return -EINVAL;
2917
2918         port = mlx4_slaves_closest_port(dev, slave, port);
2919         s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2920         s_info->mac = mac;
2921         mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2922                   vf, port, s_info->mac);
2923         return 0;
2924 }
2925 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2926
2927
2928 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2929 {
2930         struct mlx4_priv *priv = mlx4_priv(dev);
2931         struct mlx4_vport_state *vf_admin;
2932         int slave;
2933
2934         if ((!mlx4_is_master(dev)) ||
2935             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2936                 return -EPROTONOSUPPORT;
2937
2938         if ((vlan > 4095) || (qos > 7))
2939                 return -EINVAL;
2940
2941         slave = mlx4_get_slave_indx(dev, vf);
2942         if (slave < 0)
2943                 return -EINVAL;
2944
2945         port = mlx4_slaves_closest_port(dev, slave, port);
2946         vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2947
2948         if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
2949                 return -EPERM;
2950
2951         if ((0 == vlan) && (0 == qos))
2952                 vf_admin->default_vlan = MLX4_VGT;
2953         else
2954                 vf_admin->default_vlan = vlan;
2955         vf_admin->default_qos = qos;
2956
2957         /* If rate was configured prior to VST, we saved the configured rate
2958          * in vf_admin->rate and now, if priority supported we enforce the QoS
2959          */
2960         if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
2961             vf_admin->tx_rate)
2962                 vf_admin->qos_vport = slave;
2963
2964         if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2965                 mlx4_info(dev,
2966                           "updating vf %d port %d config will take effect on next VF restart\n",
2967                           vf, port);
2968         return 0;
2969 }
2970 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2971
2972 int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
2973                      int max_tx_rate)
2974 {
2975         int err;
2976         int slave;
2977         struct mlx4_vport_state *vf_admin;
2978         struct mlx4_priv *priv = mlx4_priv(dev);
2979
2980         if (!mlx4_is_master(dev) ||
2981             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2982                 return -EPROTONOSUPPORT;
2983
2984         if (min_tx_rate) {
2985                 mlx4_info(dev, "Minimum BW share not supported\n");
2986                 return -EPROTONOSUPPORT;
2987         }
2988
2989         slave = mlx4_get_slave_indx(dev, vf);
2990         if (slave < 0)
2991                 return -EINVAL;
2992
2993         port = mlx4_slaves_closest_port(dev, slave, port);
2994         vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2995
2996         err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
2997         if (err) {
2998                 mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
2999                           max_tx_rate);
3000                 return err;
3001         }
3002
3003         vf_admin->tx_rate = max_tx_rate;
3004         /* if VF is not in supported mode (VST with supported prio),
3005          * we do not change vport configuration for its QPs, but save
3006          * the rate, so it will be enforced when it moves to supported
3007          * mode next time.
3008          */
3009         if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3010                 mlx4_info(dev,
3011                           "rate set for VF %d when not in valid state\n", vf);
3012
3013                 if (vf_admin->default_vlan != MLX4_VGT)
3014                         mlx4_info(dev, "VST priority not supported by QoS\n");
3015                 else
3016                         mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3017
3018                 mlx4_info(dev,
3019                           "rate %d take affect when VF moves to valid state\n",
3020                           max_tx_rate);
3021                 return 0;
3022         }
3023
3024         /* If user sets rate 0 assigning default vport for its QPs */
3025         vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3026
3027         if (priv->mfunc.master.slave_state[slave].active &&
3028             dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3029                 mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3030
3031         return 0;
3032 }
3033 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3034
3035  /* mlx4_get_slave_default_vlan -
3036  * return true if VST ( default vlan)
3037  * if VST, will return vlan & qos (if not NULL)
3038  */
3039 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3040                                  u16 *vlan, u8 *qos)
3041 {
3042         struct mlx4_vport_oper_state *vp_oper;
3043         struct mlx4_priv *priv;
3044
3045         priv = mlx4_priv(dev);
3046         port = mlx4_slaves_closest_port(dev, slave, port);
3047         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3048
3049         if (MLX4_VGT != vp_oper->state.default_vlan) {
3050                 if (vlan)
3051                         *vlan = vp_oper->state.default_vlan;
3052                 if (qos)
3053                         *qos = vp_oper->state.default_qos;
3054                 return true;
3055         }
3056         return false;
3057 }
3058 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3059
3060 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3061 {
3062         struct mlx4_priv *priv = mlx4_priv(dev);
3063         struct mlx4_vport_state *s_info;
3064         int slave;
3065
3066         if ((!mlx4_is_master(dev)) ||
3067             !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3068                 return -EPROTONOSUPPORT;
3069
3070         slave = mlx4_get_slave_indx(dev, vf);
3071         if (slave < 0)
3072                 return -EINVAL;
3073
3074         port = mlx4_slaves_closest_port(dev, slave, port);
3075         s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3076         s_info->spoofchk = setting;
3077
3078         return 0;
3079 }
3080 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3081
3082 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3083 {
3084         struct mlx4_priv *priv = mlx4_priv(dev);
3085         struct mlx4_vport_state *s_info;
3086         int slave;
3087
3088         if (!mlx4_is_master(dev))
3089                 return -EPROTONOSUPPORT;
3090
3091         slave = mlx4_get_slave_indx(dev, vf);
3092         if (slave < 0)
3093                 return -EINVAL;
3094
3095         s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3096         ivf->vf = vf;
3097
3098         /* need to convert it to a func */
3099         ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3100         ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3101         ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3102         ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3103         ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3104         ivf->mac[5] = ((s_info->mac)  & 0xff);
3105
3106         ivf->vlan               = s_info->default_vlan;
3107         ivf->qos                = s_info->default_qos;
3108
3109         if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3110                 ivf->max_tx_rate = s_info->tx_rate;
3111         else
3112                 ivf->max_tx_rate = 0;
3113
3114         ivf->min_tx_rate        = 0;
3115         ivf->spoofchk           = s_info->spoofchk;
3116         ivf->linkstate          = s_info->link_state;
3117
3118         return 0;
3119 }
3120 EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3121
3122 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3123 {
3124         struct mlx4_priv *priv = mlx4_priv(dev);
3125         struct mlx4_vport_state *s_info;
3126         int slave;
3127         u8 link_stat_event;
3128
3129         slave = mlx4_get_slave_indx(dev, vf);
3130         if (slave < 0)
3131                 return -EINVAL;
3132
3133         port = mlx4_slaves_closest_port(dev, slave, port);
3134         switch (link_state) {
3135         case IFLA_VF_LINK_STATE_AUTO:
3136                 /* get current link state */
3137                 if (!priv->sense.do_sense_port[port])
3138                         link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3139                 else
3140                         link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3141             break;
3142
3143         case IFLA_VF_LINK_STATE_ENABLE:
3144                 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3145             break;
3146
3147         case IFLA_VF_LINK_STATE_DISABLE:
3148                 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3149             break;
3150
3151         default:
3152                 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3153                           link_state, slave, port);
3154                 return -EINVAL;
3155         };
3156         s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3157         s_info->link_state = link_state;
3158
3159         /* send event */
3160         mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3161
3162         if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3163                 mlx4_dbg(dev,
3164                          "updating vf %d port %d no link state HW enforcment\n",
3165                          vf, port);
3166         return 0;
3167 }
3168 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3169
3170 int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3171                            struct mlx4_counter *counter_stats, int reset)
3172 {
3173         struct mlx4_cmd_mailbox *mailbox = NULL;
3174         struct mlx4_counter *tmp_counter;
3175         int err;
3176         u32 if_stat_in_mod;
3177
3178         if (!counter_stats)
3179                 return -EINVAL;
3180
3181         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3182                 return 0;
3183
3184         mailbox = mlx4_alloc_cmd_mailbox(dev);
3185         if (IS_ERR(mailbox))
3186                 return PTR_ERR(mailbox);
3187
3188         memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3189         if_stat_in_mod = counter_index;
3190         if (reset)
3191                 if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3192         err = mlx4_cmd_box(dev, 0, mailbox->dma,
3193                            if_stat_in_mod, 0,
3194                            MLX4_CMD_QUERY_IF_STAT,
3195                            MLX4_CMD_TIME_CLASS_C,
3196                            MLX4_CMD_NATIVE);
3197         if (err) {
3198                 mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3199                          __func__, counter_index);
3200                 goto if_stat_out;
3201         }
3202         tmp_counter = (struct mlx4_counter *)mailbox->buf;
3203         counter_stats->counter_mode = tmp_counter->counter_mode;
3204         if (counter_stats->counter_mode == 0) {
3205                 counter_stats->rx_frames =
3206                         cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3207                                     be64_to_cpu(tmp_counter->rx_frames));
3208                 counter_stats->tx_frames =
3209                         cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3210                                     be64_to_cpu(tmp_counter->tx_frames));
3211                 counter_stats->rx_bytes =
3212                         cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3213                                     be64_to_cpu(tmp_counter->rx_bytes));
3214                 counter_stats->tx_bytes =
3215                         cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3216                                     be64_to_cpu(tmp_counter->tx_bytes));
3217         }
3218
3219 if_stat_out:
3220         mlx4_free_cmd_mailbox(dev, mailbox);
3221
3222         return err;
3223 }
3224 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3225
3226 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3227 {
3228         struct mlx4_priv *priv = mlx4_priv(dev);
3229
3230         if (slave < 1 || slave >= dev->num_slaves ||
3231             port < 1 || port > MLX4_MAX_PORTS)
3232                 return 0;
3233
3234         return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3235                 MLX4_VF_SMI_ENABLED;
3236 }
3237 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3238
3239 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3240 {
3241         struct mlx4_priv *priv = mlx4_priv(dev);
3242
3243         if (slave == mlx4_master_func_num(dev))
3244                 return 1;
3245
3246         if (slave < 1 || slave >= dev->num_slaves ||
3247             port < 1 || port > MLX4_MAX_PORTS)
3248                 return 0;
3249
3250         return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3251                 MLX4_VF_SMI_ENABLED;
3252 }
3253 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3254
3255 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3256                                  int enabled)
3257 {
3258         struct mlx4_priv *priv = mlx4_priv(dev);
3259         struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3260                         &priv->dev, slave);
3261         int min_port = find_first_bit(actv_ports.ports,
3262                                       priv->dev.caps.num_ports) + 1;
3263         int max_port = min_port - 1 +
3264                 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3265
3266         if (slave == mlx4_master_func_num(dev))
3267                 return 0;
3268
3269         if (slave < 1 || slave >= dev->num_slaves ||
3270             port < 1 || port > MLX4_MAX_PORTS ||
3271             enabled < 0 || enabled > 1)
3272                 return -EINVAL;
3273
3274         if (min_port == max_port && dev->caps.num_ports > 1) {
3275                 mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3276                 return -EPROTONOSUPPORT;
3277         }
3278
3279         priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3280         return 0;
3281 }
3282 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);