]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qed/qed_mcp.c
98dc913fd76d3f8c94363cc80b8b934f4f875eed
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qed / qed_mcp.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/string.h>
17 #include <linux/etherdevice.h>
18 #include "qed.h"
19 #include "qed_dcbx.h"
20 #include "qed_hsi.h"
21 #include "qed_hw.h"
22 #include "qed_mcp.h"
23 #include "qed_reg_addr.h"
24 #include "qed_sriov.h"
25
26 #define CHIP_MCP_RESP_ITER_US 10
27
28 #define QED_DRV_MB_MAX_RETRIES  (500 * 1000)    /* Account for 5 sec */
29 #define QED_MCP_RESET_RETRIES   (50 * 1000)     /* Account for 500 msec */
30
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)           \
32         qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
33                _val)
34
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36         qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
37
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
39         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40                      offsetof(struct public_drv_mb, _field), _val)
41
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)         \
43         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44                      offsetof(struct public_drv_mb, _field))
45
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47                   DRV_ID_PDA_COMP_VER_SHIFT)
48
49 #define MCP_BYTES_PER_MBIT_SHIFT 17
50
51 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
52 {
53         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
54                 return false;
55         return true;
56 }
57
58 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
59 {
60         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
61                                         PUBLIC_PORT);
62         u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
63
64         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
65                                                    MFW_PORT(p_hwfn));
66         DP_VERBOSE(p_hwfn, QED_MSG_SP,
67                    "port_addr = 0x%x, port_id 0x%02x\n",
68                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
69 }
70
71 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
72 {
73         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
74         u32 tmp, i;
75
76         if (!p_hwfn->mcp_info->public_base)
77                 return;
78
79         for (i = 0; i < length; i++) {
80                 tmp = qed_rd(p_hwfn, p_ptt,
81                              p_hwfn->mcp_info->mfw_mb_addr +
82                              (i << 2) + sizeof(u32));
83
84                 /* The MB data is actually BE; Need to force it to cpu */
85                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
86                         be32_to_cpu((__force __be32)tmp);
87         }
88 }
89
90 int qed_mcp_free(struct qed_hwfn *p_hwfn)
91 {
92         if (p_hwfn->mcp_info) {
93                 kfree(p_hwfn->mcp_info->mfw_mb_cur);
94                 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
95         }
96         kfree(p_hwfn->mcp_info);
97
98         return 0;
99 }
100
101 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
102 {
103         struct qed_mcp_info *p_info = p_hwfn->mcp_info;
104         u32 drv_mb_offsize, mfw_mb_offsize;
105         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
106
107         p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
108         if (!p_info->public_base)
109                 return 0;
110
111         p_info->public_base |= GRCBASE_MCP;
112
113         /* Calculate the driver and MFW mailbox address */
114         drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
115                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
116                                                      PUBLIC_DRV_MB));
117         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
118         DP_VERBOSE(p_hwfn, QED_MSG_SP,
119                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
120                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
121
122         /* Set the MFW MB address */
123         mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
124                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
125                                                      PUBLIC_MFW_MB));
126         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
127         p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
128
129         /* Get the current driver mailbox sequence before sending
130          * the first command
131          */
132         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
133                              DRV_MSG_SEQ_NUMBER_MASK;
134
135         /* Get current FW pulse sequence */
136         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
137                                 DRV_PULSE_SEQ_MASK;
138
139         p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
140
141         return 0;
142 }
143
144 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
145 {
146         struct qed_mcp_info *p_info;
147         u32 size;
148
149         /* Allocate mcp_info structure */
150         p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
151         if (!p_hwfn->mcp_info)
152                 goto err;
153         p_info = p_hwfn->mcp_info;
154
155         if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
156                 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
157                 /* Do not free mcp_info here, since public_base indicate that
158                  * the MCP is not initialized
159                  */
160                 return 0;
161         }
162
163         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
164         p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
165         p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
166         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
167                 goto err;
168
169         /* Initialize the MFW spinlock */
170         spin_lock_init(&p_info->lock);
171
172         return 0;
173
174 err:
175         qed_mcp_free(p_hwfn);
176         return -ENOMEM;
177 }
178
179 /* Locks the MFW mailbox of a PF to ensure a single access.
180  * The lock is achieved in most cases by holding a spinlock, causing other
181  * threads to wait till a previous access is done.
182  * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
183  * access is achieved by setting a blocking flag, which will fail other
184  * competing contexts to send their mailboxes.
185  */
186 static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
187 {
188         spin_lock_bh(&p_hwfn->mcp_info->lock);
189
190         /* The spinlock shouldn't be acquired when the mailbox command is
191          * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
192          * pending [UN]LOAD_REQ command of another PF together with a spinlock
193          * (i.e. interrupts are disabled) - can lead to a deadlock.
194          * It is assumed that for a single PF, no other mailbox commands can be
195          * sent from another context while sending LOAD_REQ, and that any
196          * parallel commands to UNLOAD_REQ can be cancelled.
197          */
198         if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
199                 p_hwfn->mcp_info->block_mb_sending = false;
200
201         if (p_hwfn->mcp_info->block_mb_sending) {
202                 DP_NOTICE(p_hwfn,
203                           "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
204                           cmd);
205                 spin_unlock_bh(&p_hwfn->mcp_info->lock);
206                 return -EBUSY;
207         }
208
209         if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
210                 p_hwfn->mcp_info->block_mb_sending = true;
211                 spin_unlock_bh(&p_hwfn->mcp_info->lock);
212         }
213
214         return 0;
215 }
216
217 static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
218 {
219         if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
220                 spin_unlock_bh(&p_hwfn->mcp_info->lock);
221 }
222
223 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
224 {
225         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
226         u8 delay = CHIP_MCP_RESP_ITER_US;
227         u32 org_mcp_reset_seq, cnt = 0;
228         int rc = 0;
229
230         /* Ensure that only a single thread is accessing the mailbox at a
231          * certain time.
232          */
233         rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
234         if (rc != 0)
235                 return rc;
236
237         /* Set drv command along with the updated sequence */
238         org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
239         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
240                   (DRV_MSG_CODE_MCP_RESET | seq));
241
242         do {
243                 /* Wait for MFW response */
244                 udelay(delay);
245                 /* Give the FW up to 500 second (50*1000*10usec) */
246         } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
247                                               MISCS_REG_GENERIC_POR_0)) &&
248                  (cnt++ < QED_MCP_RESET_RETRIES));
249
250         if (org_mcp_reset_seq !=
251             qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
252                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
253                            "MCP was reset after %d usec\n", cnt * delay);
254         } else {
255                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
256                 rc = -EAGAIN;
257         }
258
259         qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
260
261         return rc;
262 }
263
264 static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
265                           struct qed_ptt *p_ptt,
266                           u32 cmd,
267                           u32 param,
268                           u32 *o_mcp_resp,
269                           u32 *o_mcp_param)
270 {
271         u8 delay = CHIP_MCP_RESP_ITER_US;
272         u32 seq, cnt = 1, actual_mb_seq;
273         int rc = 0;
274
275         /* Get actual driver mailbox sequence */
276         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
277                         DRV_MSG_SEQ_NUMBER_MASK;
278
279         /* Use MCP history register to check if MCP reset occurred between
280          * init time and now.
281          */
282         if (p_hwfn->mcp_info->mcp_hist !=
283             qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
284                 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
285                 qed_load_mcp_offsets(p_hwfn, p_ptt);
286                 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
287         }
288         seq = ++p_hwfn->mcp_info->drv_mb_seq;
289
290         /* Set drv param */
291         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
292
293         /* Set drv command along with the updated sequence */
294         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
295
296         DP_VERBOSE(p_hwfn, QED_MSG_SP,
297                    "wrote command (%x) to MFW MB param 0x%08x\n",
298                    (cmd | seq), param);
299
300         do {
301                 /* Wait for MFW response */
302                 udelay(delay);
303                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
304
305                 /* Give the FW up to 5 second (500*10ms) */
306         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
307                  (cnt++ < QED_DRV_MB_MAX_RETRIES));
308
309         DP_VERBOSE(p_hwfn, QED_MSG_SP,
310                    "[after %d ms] read (%x) seq is (%x) from FW MB\n",
311                    cnt * delay, *o_mcp_resp, seq);
312
313         /* Is this a reply to our command? */
314         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
315                 *o_mcp_resp &= FW_MSG_CODE_MASK;
316                 /* Get the MCP param */
317                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
318         } else {
319                 /* FW BUG! */
320                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
321                        cmd, param);
322                 *o_mcp_resp = 0;
323                 rc = -EAGAIN;
324         }
325         return rc;
326 }
327
328 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
329                                  struct qed_ptt *p_ptt,
330                                  struct qed_mcp_mb_params *p_mb_params)
331 {
332         u32 union_data_addr;
333         int rc;
334
335         /* MCP not initialized */
336         if (!qed_mcp_is_init(p_hwfn)) {
337                 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
338                 return -EBUSY;
339         }
340
341         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
342                           offsetof(struct public_drv_mb, union_data);
343
344         /* Ensure that only a single thread is accessing the mailbox at a
345          * certain time.
346          */
347         rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
348         if (rc)
349                 return rc;
350
351         if (p_mb_params->p_data_src != NULL)
352                 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
353                               p_mb_params->p_data_src,
354                               sizeof(*p_mb_params->p_data_src));
355
356         rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
357                             p_mb_params->param, &p_mb_params->mcp_resp,
358                             &p_mb_params->mcp_param);
359
360         if (p_mb_params->p_data_dst != NULL)
361                 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
362                                 union_data_addr,
363                                 sizeof(*p_mb_params->p_data_dst));
364
365         qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
366
367         return rc;
368 }
369
370 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
371                 struct qed_ptt *p_ptt,
372                 u32 cmd,
373                 u32 param,
374                 u32 *o_mcp_resp,
375                 u32 *o_mcp_param)
376 {
377         struct qed_mcp_mb_params mb_params;
378         int rc;
379
380         memset(&mb_params, 0, sizeof(mb_params));
381         mb_params.cmd = cmd;
382         mb_params.param = param;
383         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
384         if (rc)
385                 return rc;
386
387         *o_mcp_resp = mb_params.mcp_resp;
388         *o_mcp_param = mb_params.mcp_param;
389
390         return 0;
391 }
392
393 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
394                        struct qed_ptt *p_ptt,
395                        u32 cmd,
396                        u32 param,
397                        u32 *o_mcp_resp,
398                        u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
399 {
400         struct qed_mcp_mb_params mb_params;
401         union drv_union_data union_data;
402         int rc;
403
404         memset(&mb_params, 0, sizeof(mb_params));
405         mb_params.cmd = cmd;
406         mb_params.param = param;
407         mb_params.p_data_dst = &union_data;
408         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
409         if (rc)
410                 return rc;
411
412         *o_mcp_resp = mb_params.mcp_resp;
413         *o_mcp_param = mb_params.mcp_param;
414
415         *o_txn_size = *o_mcp_param;
416         memcpy(o_buf, &union_data.raw_data, *o_txn_size);
417
418         return 0;
419 }
420
421 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
422                      struct qed_ptt *p_ptt, u32 *p_load_code)
423 {
424         struct qed_dev *cdev = p_hwfn->cdev;
425         struct qed_mcp_mb_params mb_params;
426         union drv_union_data union_data;
427         int rc;
428
429         memset(&mb_params, 0, sizeof(mb_params));
430         /* Load Request */
431         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
432         mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
433                           cdev->drv_type;
434         memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
435         mb_params.p_data_src = &union_data;
436         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
437
438         /* if mcp fails to respond we must abort */
439         if (rc) {
440                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
441                 return rc;
442         }
443
444         *p_load_code = mb_params.mcp_resp;
445
446         /* If MFW refused (e.g. other port is in diagnostic mode) we
447          * must abort. This can happen in the following cases:
448          * - Other port is in diagnostic mode
449          * - Previously loaded function on the engine is not compliant with
450          *   the requester.
451          * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
452          *      -
453          */
454         if (!(*p_load_code) ||
455             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
456             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
457             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
458                 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
459                 return -EBUSY;
460         }
461
462         return 0;
463 }
464
465 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
466                                   struct qed_ptt *p_ptt)
467 {
468         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
469                                         PUBLIC_PATH);
470         u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
471         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
472                                      QED_PATH_ID(p_hwfn));
473         u32 disabled_vfs[VF_MAX_STATIC / 32];
474         int i;
475
476         DP_VERBOSE(p_hwfn,
477                    QED_MSG_SP,
478                    "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
479                    mfw_path_offsize, path_addr);
480
481         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
482                 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
483                                          path_addr +
484                                          offsetof(struct public_path,
485                                                   mcp_vf_disabled) +
486                                          sizeof(u32) * i);
487                 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
488                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
489                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
490         }
491
492         if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
493                 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
494 }
495
496 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
497                        struct qed_ptt *p_ptt, u32 *vfs_to_ack)
498 {
499         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
500                                         PUBLIC_FUNC);
501         u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
502         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
503                                      MCP_PF_ID(p_hwfn));
504         struct qed_mcp_mb_params mb_params;
505         union drv_union_data union_data;
506         int rc;
507         int i;
508
509         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
510                 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
511                            "Acking VFs [%08x,...,%08x] - %08x\n",
512                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
513
514         memset(&mb_params, 0, sizeof(mb_params));
515         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
516         memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
517         mb_params.p_data_src = &union_data;
518         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
519         if (rc) {
520                 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
521                 return -EBUSY;
522         }
523
524         /* Clear the ACK bits */
525         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
526                 qed_wr(p_hwfn, p_ptt,
527                        func_addr +
528                        offsetof(struct public_func, drv_ack_vf_disabled) +
529                        i * sizeof(u32), 0);
530
531         return rc;
532 }
533
534 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
535                                               struct qed_ptt *p_ptt)
536 {
537         u32 transceiver_state;
538
539         transceiver_state = qed_rd(p_hwfn, p_ptt,
540                                    p_hwfn->mcp_info->port_addr +
541                                    offsetof(struct public_port,
542                                             transceiver_data));
543
544         DP_VERBOSE(p_hwfn,
545                    (NETIF_MSG_HW | QED_MSG_SP),
546                    "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
547                    transceiver_state,
548                    (u32)(p_hwfn->mcp_info->port_addr +
549                           offsetof(struct public_port, transceiver_data)));
550
551         transceiver_state = GET_FIELD(transceiver_state,
552                                       ETH_TRANSCEIVER_STATE);
553
554         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
555                 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
556         else
557                 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
558 }
559
560 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
561                                        struct qed_ptt *p_ptt, bool b_reset)
562 {
563         struct qed_mcp_link_state *p_link;
564         u8 max_bw, min_bw;
565         u32 status = 0;
566
567         p_link = &p_hwfn->mcp_info->link_output;
568         memset(p_link, 0, sizeof(*p_link));
569         if (!b_reset) {
570                 status = qed_rd(p_hwfn, p_ptt,
571                                 p_hwfn->mcp_info->port_addr +
572                                 offsetof(struct public_port, link_status));
573                 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
574                            "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
575                            status,
576                            (u32)(p_hwfn->mcp_info->port_addr +
577                                  offsetof(struct public_port, link_status)));
578         } else {
579                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
580                            "Resetting link indications\n");
581                 return;
582         }
583
584         if (p_hwfn->b_drv_link_init)
585                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
586         else
587                 p_link->link_up = false;
588
589         p_link->full_duplex = true;
590         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
591         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
592                 p_link->speed = 100000;
593                 break;
594         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
595                 p_link->speed = 50000;
596                 break;
597         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
598                 p_link->speed = 40000;
599                 break;
600         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
601                 p_link->speed = 25000;
602                 break;
603         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
604                 p_link->speed = 20000;
605                 break;
606         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
607                 p_link->speed = 10000;
608                 break;
609         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
610                 p_link->full_duplex = false;
611         /* Fall-through */
612         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
613                 p_link->speed = 1000;
614                 break;
615         default:
616                 p_link->speed = 0;
617         }
618
619         if (p_link->link_up && p_link->speed)
620                 p_link->line_speed = p_link->speed;
621         else
622                 p_link->line_speed = 0;
623
624         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
625         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
626
627         /* Max bandwidth configuration */
628         __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
629
630         /* Min bandwidth configuration */
631         __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
632         qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
633
634         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
635         p_link->an_complete = !!(status &
636                                  LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
637         p_link->parallel_detection = !!(status &
638                                         LINK_STATUS_PARALLEL_DETECTION_USED);
639         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
640
641         p_link->partner_adv_speed |=
642                 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
643                 QED_LINK_PARTNER_SPEED_1G_FD : 0;
644         p_link->partner_adv_speed |=
645                 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
646                 QED_LINK_PARTNER_SPEED_1G_HD : 0;
647         p_link->partner_adv_speed |=
648                 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
649                 QED_LINK_PARTNER_SPEED_10G : 0;
650         p_link->partner_adv_speed |=
651                 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
652                 QED_LINK_PARTNER_SPEED_20G : 0;
653         p_link->partner_adv_speed |=
654                 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
655                 QED_LINK_PARTNER_SPEED_25G : 0;
656         p_link->partner_adv_speed |=
657                 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
658                 QED_LINK_PARTNER_SPEED_40G : 0;
659         p_link->partner_adv_speed |=
660                 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
661                 QED_LINK_PARTNER_SPEED_50G : 0;
662         p_link->partner_adv_speed |=
663                 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
664                 QED_LINK_PARTNER_SPEED_100G : 0;
665
666         p_link->partner_tx_flow_ctrl_en =
667                 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
668         p_link->partner_rx_flow_ctrl_en =
669                 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
670
671         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
672         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
673                 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
674                 break;
675         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
676                 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
677                 break;
678         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
679                 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
680                 break;
681         default:
682                 p_link->partner_adv_pause = 0;
683         }
684
685         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
686
687         qed_link_update(p_hwfn);
688 }
689
690 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
691 {
692         struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
693         struct qed_mcp_mb_params mb_params;
694         union drv_union_data union_data;
695         struct eth_phy_cfg *phy_cfg;
696         int rc = 0;
697         u32 cmd;
698
699         /* Set the shmem configuration according to params */
700         phy_cfg = &union_data.drv_phy_cfg;
701         memset(phy_cfg, 0, sizeof(*phy_cfg));
702         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
703         if (!params->speed.autoneg)
704                 phy_cfg->speed = params->speed.forced_speed;
705         phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
706         phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
707         phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
708         phy_cfg->adv_speed = params->speed.advertised_speeds;
709         phy_cfg->loopback_mode = params->loopback_mode;
710
711         p_hwfn->b_drv_link_init = b_up;
712
713         if (b_up) {
714                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
715                            "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
716                            phy_cfg->speed,
717                            phy_cfg->pause,
718                            phy_cfg->adv_speed,
719                            phy_cfg->loopback_mode,
720                            phy_cfg->feature_config_flags);
721         } else {
722                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
723                            "Resetting link\n");
724         }
725
726         memset(&mb_params, 0, sizeof(mb_params));
727         mb_params.cmd = cmd;
728         mb_params.p_data_src = &union_data;
729         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
730
731         /* if mcp fails to respond we must abort */
732         if (rc) {
733                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
734                 return rc;
735         }
736
737         /* Reset the link status if needed */
738         if (!b_up)
739                 qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
740
741         return 0;
742 }
743
744 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
745                                         struct qed_ptt *p_ptt,
746                                         enum MFW_DRV_MSG_TYPE type)
747 {
748         enum qed_mcp_protocol_type stats_type;
749         union qed_mcp_protocol_stats stats;
750         struct qed_mcp_mb_params mb_params;
751         union drv_union_data union_data;
752         u32 hsi_param;
753
754         switch (type) {
755         case MFW_DRV_MSG_GET_LAN_STATS:
756                 stats_type = QED_MCP_LAN_STATS;
757                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
758                 break;
759         case MFW_DRV_MSG_GET_FCOE_STATS:
760                 stats_type = QED_MCP_FCOE_STATS;
761                 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
762                 break;
763         case MFW_DRV_MSG_GET_ISCSI_STATS:
764                 stats_type = QED_MCP_ISCSI_STATS;
765                 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
766                 break;
767         case MFW_DRV_MSG_GET_RDMA_STATS:
768                 stats_type = QED_MCP_RDMA_STATS;
769                 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
770                 break;
771         default:
772                 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
773                 return;
774         }
775
776         qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
777
778         memset(&mb_params, 0, sizeof(mb_params));
779         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
780         mb_params.param = hsi_param;
781         memcpy(&union_data, &stats, sizeof(stats));
782         mb_params.p_data_src = &union_data;
783         qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
784 }
785
786 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
787                                   struct public_func *p_shmem_info)
788 {
789         struct qed_mcp_function_info *p_info;
790
791         p_info = &p_hwfn->mcp_info->func_info;
792
793         p_info->bandwidth_min = (p_shmem_info->config &
794                                  FUNC_MF_CFG_MIN_BW_MASK) >>
795                                         FUNC_MF_CFG_MIN_BW_SHIFT;
796         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
797                 DP_INFO(p_hwfn,
798                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
799                         p_info->bandwidth_min);
800                 p_info->bandwidth_min = 1;
801         }
802
803         p_info->bandwidth_max = (p_shmem_info->config &
804                                  FUNC_MF_CFG_MAX_BW_MASK) >>
805                                         FUNC_MF_CFG_MAX_BW_SHIFT;
806         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
807                 DP_INFO(p_hwfn,
808                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
809                         p_info->bandwidth_max);
810                 p_info->bandwidth_max = 100;
811         }
812 }
813
814 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
815                                   struct qed_ptt *p_ptt,
816                                   struct public_func *p_data, int pfid)
817 {
818         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
819                                         PUBLIC_FUNC);
820         u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
821         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
822         u32 i, size;
823
824         memset(p_data, 0, sizeof(*p_data));
825
826         size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
827         for (i = 0; i < size / sizeof(u32); i++)
828                 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
829                                             func_addr + (i << 2));
830         return size;
831 }
832
833 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
834 {
835         struct qed_mcp_function_info *p_info;
836         struct public_func shmem_info;
837         u32 resp = 0, param = 0;
838
839         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
840
841         qed_read_pf_bandwidth(p_hwfn, &shmem_info);
842
843         p_info = &p_hwfn->mcp_info->func_info;
844
845         qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
846         qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
847
848         /* Acknowledge the MFW */
849         qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
850                     &param);
851 }
852
853 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
854                           struct qed_ptt *p_ptt)
855 {
856         struct qed_mcp_info *info = p_hwfn->mcp_info;
857         int rc = 0;
858         bool found = false;
859         u16 i;
860
861         DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
862
863         /* Read Messages from MFW */
864         qed_mcp_read_mb(p_hwfn, p_ptt);
865
866         /* Compare current messages to old ones */
867         for (i = 0; i < info->mfw_mb_length; i++) {
868                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
869                         continue;
870
871                 found = true;
872
873                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
874                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
875                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
876
877                 switch (i) {
878                 case MFW_DRV_MSG_LINK_CHANGE:
879                         qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
880                         break;
881                 case MFW_DRV_MSG_VF_DISABLED:
882                         qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
883                         break;
884                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
885                         qed_dcbx_mib_update_event(p_hwfn, p_ptt,
886                                                   QED_DCBX_REMOTE_LLDP_MIB);
887                         break;
888                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
889                         qed_dcbx_mib_update_event(p_hwfn, p_ptt,
890                                                   QED_DCBX_REMOTE_MIB);
891                         break;
892                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
893                         qed_dcbx_mib_update_event(p_hwfn, p_ptt,
894                                                   QED_DCBX_OPERATIONAL_MIB);
895                         break;
896                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
897                         qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
898                         break;
899                 case MFW_DRV_MSG_GET_LAN_STATS:
900                 case MFW_DRV_MSG_GET_FCOE_STATS:
901                 case MFW_DRV_MSG_GET_ISCSI_STATS:
902                 case MFW_DRV_MSG_GET_RDMA_STATS:
903                         qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
904                         break;
905                 case MFW_DRV_MSG_BW_UPDATE:
906                         qed_mcp_update_bw(p_hwfn, p_ptt);
907                         break;
908                 default:
909                         DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
910                         rc = -EINVAL;
911                 }
912         }
913
914         /* ACK everything */
915         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
916                 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
917
918                 /* MFW expect answer in BE, so we force write in that format */
919                 qed_wr(p_hwfn, p_ptt,
920                        info->mfw_mb_addr + sizeof(u32) +
921                        MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
922                        sizeof(u32) + i * sizeof(u32),
923                        (__force u32)val);
924         }
925
926         if (!found) {
927                 DP_NOTICE(p_hwfn,
928                           "Received an MFW message indication but no new message!\n");
929                 rc = -EINVAL;
930         }
931
932         /* Copy the new mfw messages into the shadow */
933         memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
934
935         return rc;
936 }
937
938 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
939                         struct qed_ptt *p_ptt,
940                         u32 *p_mfw_ver, u32 *p_running_bundle_id)
941 {
942         u32 global_offsize;
943
944         if (IS_VF(p_hwfn->cdev)) {
945                 if (p_hwfn->vf_iov_info) {
946                         struct pfvf_acquire_resp_tlv *p_resp;
947
948                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
949                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
950                         return 0;
951                 } else {
952                         DP_VERBOSE(p_hwfn,
953                                    QED_MSG_IOV,
954                                    "VF requested MFW version prior to ACQUIRE\n");
955                         return -EINVAL;
956                 }
957         }
958
959         global_offsize = qed_rd(p_hwfn, p_ptt,
960                                 SECTION_OFFSIZE_ADDR(p_hwfn->
961                                                      mcp_info->public_base,
962                                                      PUBLIC_GLOBAL));
963         *p_mfw_ver =
964             qed_rd(p_hwfn, p_ptt,
965                    SECTION_ADDR(global_offsize,
966                                 0) + offsetof(struct public_global, mfw_ver));
967
968         if (p_running_bundle_id != NULL) {
969                 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
970                                               SECTION_ADDR(global_offsize, 0) +
971                                               offsetof(struct public_global,
972                                                        running_bundle_id));
973         }
974
975         return 0;
976 }
977
978 int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
979 {
980         struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
981         struct qed_ptt  *p_ptt;
982
983         if (IS_VF(cdev))
984                 return -EINVAL;
985
986         if (!qed_mcp_is_init(p_hwfn)) {
987                 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
988                 return -EBUSY;
989         }
990
991         *p_media_type = MEDIA_UNSPECIFIED;
992
993         p_ptt = qed_ptt_acquire(p_hwfn);
994         if (!p_ptt)
995                 return -EBUSY;
996
997         *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
998                                offsetof(struct public_port, media_type));
999
1000         qed_ptt_release(p_hwfn, p_ptt);
1001
1002         return 0;
1003 }
1004
1005 static int
1006 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1007                         struct public_func *p_info,
1008                         enum qed_pci_personality *p_proto)
1009 {
1010         int rc = 0;
1011
1012         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1013         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1014                 if (test_bit(QED_DEV_CAP_ROCE,
1015                              &p_hwfn->hw_info.device_capabilities))
1016                         *p_proto = QED_PCI_ETH_ROCE;
1017                 else
1018                         *p_proto = QED_PCI_ETH;
1019                 break;
1020         case FUNC_MF_CFG_PROTOCOL_ISCSI:
1021                 *p_proto = QED_PCI_ISCSI;
1022                 break;
1023         case FUNC_MF_CFG_PROTOCOL_ROCE:
1024                 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
1025                 rc = -EINVAL;
1026                 break;
1027         default:
1028                 rc = -EINVAL;
1029         }
1030
1031         return rc;
1032 }
1033
1034 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1035                                  struct qed_ptt *p_ptt)
1036 {
1037         struct qed_mcp_function_info *info;
1038         struct public_func shmem_info;
1039
1040         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1041         info = &p_hwfn->mcp_info->func_info;
1042
1043         info->pause_on_host = (shmem_info.config &
1044                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1045
1046         if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1047                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1048                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1049                 return -EINVAL;
1050         }
1051
1052         qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1053
1054         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1055                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1056                 info->mac[1] = (u8)(shmem_info.mac_upper);
1057                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1058                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1059                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1060                 info->mac[5] = (u8)(shmem_info.mac_lower);
1061         } else {
1062                 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1063         }
1064
1065         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1066                          (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1067         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1068                          (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1069
1070         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1071
1072         info->mtu = (u16)shmem_info.mtu_size;
1073
1074         DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
1075                    "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
1076                 info->pause_on_host, info->protocol,
1077                 info->bandwidth_min, info->bandwidth_max,
1078                 info->mac[0], info->mac[1], info->mac[2],
1079                 info->mac[3], info->mac[4], info->mac[5],
1080                 info->wwn_port, info->wwn_node, info->ovlan);
1081
1082         return 0;
1083 }
1084
1085 struct qed_mcp_link_params
1086 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1087 {
1088         if (!p_hwfn || !p_hwfn->mcp_info)
1089                 return NULL;
1090         return &p_hwfn->mcp_info->link_input;
1091 }
1092
1093 struct qed_mcp_link_state
1094 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1095 {
1096         if (!p_hwfn || !p_hwfn->mcp_info)
1097                 return NULL;
1098         return &p_hwfn->mcp_info->link_output;
1099 }
1100
1101 struct qed_mcp_link_capabilities
1102 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1103 {
1104         if (!p_hwfn || !p_hwfn->mcp_info)
1105                 return NULL;
1106         return &p_hwfn->mcp_info->link_capabilities;
1107 }
1108
1109 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1110 {
1111         u32 resp = 0, param = 0;
1112         int rc;
1113
1114         rc = qed_mcp_cmd(p_hwfn, p_ptt,
1115                          DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1116
1117         /* Wait for the drain to complete before returning */
1118         msleep(1020);
1119
1120         return rc;
1121 }
1122
1123 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
1124                            struct qed_ptt *p_ptt, u32 *p_flash_size)
1125 {
1126         u32 flash_size;
1127
1128         if (IS_VF(p_hwfn->cdev))
1129                 return -EINVAL;
1130
1131         flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1132         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1133                       MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1134         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1135
1136         *p_flash_size = flash_size;
1137
1138         return 0;
1139 }
1140
1141 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1142                            struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1143 {
1144         u32 resp = 0, param = 0, rc_param = 0;
1145         int rc;
1146
1147         /* Only Leader can configure MSIX, and need to take CMT into account */
1148         if (!IS_LEAD_HWFN(p_hwfn))
1149                 return 0;
1150         num *= p_hwfn->cdev->num_hwfns;
1151
1152         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1153                  DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1154         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1155                  DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1156
1157         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1158                          &resp, &rc_param);
1159
1160         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1161                 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1162                 rc = -EINVAL;
1163         } else {
1164                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1165                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1166                            num, vf_id);
1167         }
1168
1169         return rc;
1170 }
1171
1172 int
1173 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1174                          struct qed_ptt *p_ptt,
1175                          struct qed_mcp_drv_version *p_ver)
1176 {
1177         struct drv_version_stc *p_drv_version;
1178         struct qed_mcp_mb_params mb_params;
1179         union drv_union_data union_data;
1180         __be32 val;
1181         u32 i;
1182         int rc;
1183
1184         p_drv_version = &union_data.drv_version;
1185         p_drv_version->version = p_ver->version;
1186
1187         for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
1188                 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
1189                 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1190         }
1191
1192         memset(&mb_params, 0, sizeof(mb_params));
1193         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1194         mb_params.p_data_src = &union_data;
1195         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1196         if (rc)
1197                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1198
1199         return rc;
1200 }
1201
1202 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1203 {
1204         u32 resp = 0, param = 0;
1205         int rc;
1206
1207         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1208                          &param);
1209         if (rc)
1210                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1211
1212         return rc;
1213 }
1214
1215 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1216 {
1217         u32 value, cpu_mode;
1218
1219         qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1220
1221         value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1222         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1223         qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1224         cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1225
1226         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
1227 }
1228
1229 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
1230                                      struct qed_ptt *p_ptt,
1231                                      enum qed_ov_client client)
1232 {
1233         u32 resp = 0, param = 0;
1234         u32 drv_mb_param;
1235         int rc;
1236
1237         switch (client) {
1238         case QED_OV_CLIENT_DRV:
1239                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1240                 break;
1241         case QED_OV_CLIENT_USER:
1242                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1243                 break;
1244         case QED_OV_CLIENT_VENDOR_SPEC:
1245                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
1246                 break;
1247         default:
1248                 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
1249                 return -EINVAL;
1250         }
1251
1252         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1253                          drv_mb_param, &resp, &param);
1254         if (rc)
1255                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1256
1257         return rc;
1258 }
1259
1260 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
1261                                    struct qed_ptt *p_ptt,
1262                                    enum qed_ov_driver_state drv_state)
1263 {
1264         u32 resp = 0, param = 0;
1265         u32 drv_mb_param;
1266         int rc;
1267
1268         switch (drv_state) {
1269         case QED_OV_DRIVER_STATE_NOT_LOADED:
1270                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1271                 break;
1272         case QED_OV_DRIVER_STATE_DISABLED:
1273                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1274                 break;
1275         case QED_OV_DRIVER_STATE_ACTIVE:
1276                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1277                 break;
1278         default:
1279                 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
1280                 return -EINVAL;
1281         }
1282
1283         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1284                          drv_mb_param, &resp, &param);
1285         if (rc)
1286                 DP_ERR(p_hwfn, "Failed to send driver state\n");
1287
1288         return rc;
1289 }
1290
1291 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
1292                           struct qed_ptt *p_ptt, u16 mtu)
1293 {
1294         u32 resp = 0, param = 0;
1295         u32 drv_mb_param;
1296         int rc;
1297
1298         drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
1299         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
1300                          drv_mb_param, &resp, &param);
1301         if (rc)
1302                 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
1303
1304         return rc;
1305 }
1306
1307 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
1308                           struct qed_ptt *p_ptt, u8 *mac)
1309 {
1310         struct qed_mcp_mb_params mb_params;
1311         union drv_union_data union_data;
1312         int rc;
1313
1314         memset(&mb_params, 0, sizeof(mb_params));
1315         mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
1316         mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
1317                           DRV_MSG_CODE_VMAC_TYPE_SHIFT;
1318         mb_params.param |= MCP_PF_ID(p_hwfn);
1319         ether_addr_copy(&union_data.raw_data[0], mac);
1320         mb_params.p_data_src = &union_data;
1321         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1322         if (rc)
1323                 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
1324
1325         return rc;
1326 }
1327
1328 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
1329                           struct qed_ptt *p_ptt, enum qed_ov_wol wol)
1330 {
1331         u32 resp = 0, param = 0;
1332         u32 drv_mb_param;
1333         int rc;
1334
1335         switch (wol) {
1336         case QED_OV_WOL_DEFAULT:
1337                 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
1338                 break;
1339         case QED_OV_WOL_DISABLED:
1340                 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
1341                 break;
1342         case QED_OV_WOL_ENABLED:
1343                 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
1344                 break;
1345         default:
1346                 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
1347                 return -EINVAL;
1348         }
1349
1350         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
1351                          drv_mb_param, &resp, &param);
1352         if (rc)
1353                 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
1354
1355         return rc;
1356 }
1357
1358 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
1359                               struct qed_ptt *p_ptt,
1360                               enum qed_ov_eswitch eswitch)
1361 {
1362         u32 resp = 0, param = 0;
1363         u32 drv_mb_param;
1364         int rc;
1365
1366         switch (eswitch) {
1367         case QED_OV_ESWITCH_NONE:
1368                 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
1369                 break;
1370         case QED_OV_ESWITCH_VEB:
1371                 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
1372                 break;
1373         case QED_OV_ESWITCH_VEPA:
1374                 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
1375                 break;
1376         default:
1377                 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
1378                 return -EINVAL;
1379         }
1380
1381         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
1382                          drv_mb_param, &resp, &param);
1383         if (rc)
1384                 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
1385
1386         return rc;
1387 }
1388
1389 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
1390                     struct qed_ptt *p_ptt, enum qed_led_mode mode)
1391 {
1392         u32 resp = 0, param = 0, drv_mb_param;
1393         int rc;
1394
1395         switch (mode) {
1396         case QED_LED_MODE_ON:
1397                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1398                 break;
1399         case QED_LED_MODE_OFF:
1400                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1401                 break;
1402         case QED_LED_MODE_RESTORE:
1403                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1404                 break;
1405         default:
1406                 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
1407                 return -EINVAL;
1408         }
1409
1410         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1411                          drv_mb_param, &resp, &param);
1412
1413         return rc;
1414 }
1415
1416 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1417                           struct qed_ptt *p_ptt, u32 mask_parities)
1418 {
1419         u32 resp = 0, param = 0;
1420         int rc;
1421
1422         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1423                          mask_parities, &resp, &param);
1424
1425         if (rc) {
1426                 DP_ERR(p_hwfn,
1427                        "MCP response failure for mask parities, aborting\n");
1428         } else if (resp != FW_MSG_CODE_OK) {
1429                 DP_ERR(p_hwfn,
1430                        "MCP did not acknowledge mask parity request. Old MFW?\n");
1431                 rc = -EINVAL;
1432         }
1433
1434         return rc;
1435 }
1436
1437 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1438 {
1439         u32 drv_mb_param = 0, rsp, param;
1440         int rc = 0;
1441
1442         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
1443                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1444
1445         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1446                          drv_mb_param, &rsp, &param);
1447
1448         if (rc)
1449                 return rc;
1450
1451         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1452             (param != DRV_MB_PARAM_BIST_RC_PASSED))
1453                 rc = -EAGAIN;
1454
1455         return rc;
1456 }
1457
1458 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1459 {
1460         u32 drv_mb_param, rsp, param;
1461         int rc = 0;
1462
1463         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
1464                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1465
1466         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1467                          drv_mb_param, &rsp, &param);
1468
1469         if (rc)
1470                 return rc;
1471
1472         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1473             (param != DRV_MB_PARAM_BIST_RC_PASSED))
1474                 rc = -EAGAIN;
1475
1476         return rc;
1477 }