]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qed/qed_dev.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qed / qed_dev.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
23 #include "qed.h"
24 #include "qed_cxt.h"
25 #include "qed_dev_api.h"
26 #include "qed_hsi.h"
27 #include "qed_hw.h"
28 #include "qed_init_ops.h"
29 #include "qed_int.h"
30 #include "qed_mcp.h"
31 #include "qed_reg_addr.h"
32 #include "qed_sp.h"
33
34 /* API common to all protocols */
35 void qed_init_dp(struct qed_dev *cdev,
36                  u32 dp_module, u8 dp_level)
37 {
38         u32 i;
39
40         cdev->dp_level = dp_level;
41         cdev->dp_module = dp_module;
42         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
43                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
44
45                 p_hwfn->dp_level = dp_level;
46                 p_hwfn->dp_module = dp_module;
47         }
48 }
49
50 void qed_init_struct(struct qed_dev *cdev)
51 {
52         u8 i;
53
54         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
55                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
56
57                 p_hwfn->cdev = cdev;
58                 p_hwfn->my_id = i;
59                 p_hwfn->b_active = false;
60
61                 mutex_init(&p_hwfn->dmae_info.mutex);
62         }
63
64         /* hwfn 0 is always active */
65         cdev->hwfns[0].b_active = true;
66
67         /* set the default cache alignment to 128 */
68         cdev->cache_shift = 7;
69 }
70
71 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
72 {
73         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
74
75         kfree(qm_info->qm_pq_params);
76         qm_info->qm_pq_params = NULL;
77         kfree(qm_info->qm_vport_params);
78         qm_info->qm_vport_params = NULL;
79         kfree(qm_info->qm_port_params);
80         qm_info->qm_port_params = NULL;
81 }
82
83 void qed_resc_free(struct qed_dev *cdev)
84 {
85         int i;
86
87         kfree(cdev->fw_data);
88         cdev->fw_data = NULL;
89
90         kfree(cdev->reset_stats);
91
92         for_each_hwfn(cdev, i) {
93                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
94
95                 kfree(p_hwfn->p_tx_cids);
96                 p_hwfn->p_tx_cids = NULL;
97                 kfree(p_hwfn->p_rx_cids);
98                 p_hwfn->p_rx_cids = NULL;
99         }
100
101         for_each_hwfn(cdev, i) {
102                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
103
104                 qed_cxt_mngr_free(p_hwfn);
105                 qed_qm_info_free(p_hwfn);
106                 qed_spq_free(p_hwfn);
107                 qed_eq_free(p_hwfn, p_hwfn->p_eq);
108                 qed_consq_free(p_hwfn, p_hwfn->p_consq);
109                 qed_int_free(p_hwfn);
110                 qed_dmae_info_free(p_hwfn);
111         }
112 }
113
114 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
115 {
116         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
117         struct init_qm_port_params *p_qm_port;
118         u8 num_vports, i, vport_id, num_ports;
119         u16 num_pqs, multi_cos_tcs = 1;
120
121         memset(qm_info, 0, sizeof(*qm_info));
122
123         num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
124         num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
125
126         /* Sanity checking that setup requires legal number of resources */
127         if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
128                 DP_ERR(p_hwfn,
129                        "Need too many Physical queues - 0x%04x when only %04x are available\n",
130                        num_pqs, RESC_NUM(p_hwfn, QED_PQ));
131                 return -EINVAL;
132         }
133
134         /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
135          */
136         qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
137                                         num_pqs, GFP_ATOMIC);
138         if (!qm_info->qm_pq_params)
139                 goto alloc_err;
140
141         qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
142                                            num_vports, GFP_ATOMIC);
143         if (!qm_info->qm_vport_params)
144                 goto alloc_err;
145
146         qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
147                                           MAX_NUM_PORTS, GFP_ATOMIC);
148         if (!qm_info->qm_port_params)
149                 goto alloc_err;
150
151         vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
152
153         /* First init per-TC PQs */
154         for (i = 0; i < multi_cos_tcs; i++) {
155                 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
156
157                 params->vport_id = vport_id;
158                 params->tc_id = p_hwfn->hw_info.non_offload_tc;
159                 params->wrr_group = 1;
160         }
161
162         /* Then init pure-LB PQ */
163         qm_info->pure_lb_pq = i;
164         qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
165         qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
166         qm_info->qm_pq_params[i].wrr_group = 1;
167         i++;
168
169         qm_info->offload_pq = 0;
170         qm_info->num_pqs = num_pqs;
171         qm_info->num_vports = num_vports;
172
173         /* Initialize qm port parameters */
174         num_ports = p_hwfn->cdev->num_ports_in_engines;
175         for (i = 0; i < num_ports; i++) {
176                 p_qm_port = &qm_info->qm_port_params[i];
177                 p_qm_port->active = 1;
178                 p_qm_port->num_active_phys_tcs = 4;
179                 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
180                 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
181         }
182
183         qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
184
185         qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
186
187         qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
188
189         qm_info->pf_wfq = 0;
190         qm_info->pf_rl = 0;
191         qm_info->vport_rl_en = 1;
192
193         return 0;
194
195 alloc_err:
196         DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
197         kfree(qm_info->qm_pq_params);
198         kfree(qm_info->qm_vport_params);
199         kfree(qm_info->qm_port_params);
200
201         return -ENOMEM;
202 }
203
204 int qed_resc_alloc(struct qed_dev *cdev)
205 {
206         struct qed_consq *p_consq;
207         struct qed_eq *p_eq;
208         int i, rc = 0;
209
210         cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
211         if (!cdev->fw_data)
212                 return -ENOMEM;
213
214         /* Allocate Memory for the Queue->CID mapping */
215         for_each_hwfn(cdev, i) {
216                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
217                 int tx_size = sizeof(struct qed_hw_cid_data) *
218                                      RESC_NUM(p_hwfn, QED_L2_QUEUE);
219                 int rx_size = sizeof(struct qed_hw_cid_data) *
220                                      RESC_NUM(p_hwfn, QED_L2_QUEUE);
221
222                 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
223                 if (!p_hwfn->p_tx_cids) {
224                         DP_NOTICE(p_hwfn,
225                                   "Failed to allocate memory for Tx Cids\n");
226                         goto alloc_err;
227                 }
228
229                 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
230                 if (!p_hwfn->p_rx_cids) {
231                         DP_NOTICE(p_hwfn,
232                                   "Failed to allocate memory for Rx Cids\n");
233                         goto alloc_err;
234                 }
235         }
236
237         for_each_hwfn(cdev, i) {
238                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
239
240                 /* First allocate the context manager structure */
241                 rc = qed_cxt_mngr_alloc(p_hwfn);
242                 if (rc)
243                         goto alloc_err;
244
245                 /* Set the HW cid/tid numbers (in the contest manager)
246                  * Must be done prior to any further computations.
247                  */
248                 rc = qed_cxt_set_pf_params(p_hwfn);
249                 if (rc)
250                         goto alloc_err;
251
252                 /* Prepare and process QM requirements */
253                 rc = qed_init_qm_info(p_hwfn);
254                 if (rc)
255                         goto alloc_err;
256
257                 /* Compute the ILT client partition */
258                 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
259                 if (rc)
260                         goto alloc_err;
261
262                 /* CID map / ILT shadow table / T2
263                  * The talbes sizes are determined by the computations above
264                  */
265                 rc = qed_cxt_tables_alloc(p_hwfn);
266                 if (rc)
267                         goto alloc_err;
268
269                 /* SPQ, must follow ILT because initializes SPQ context */
270                 rc = qed_spq_alloc(p_hwfn);
271                 if (rc)
272                         goto alloc_err;
273
274                 /* SP status block allocation */
275                 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
276                                                          RESERVED_PTT_DPC);
277
278                 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
279                 if (rc)
280                         goto alloc_err;
281
282                 /* EQ */
283                 p_eq = qed_eq_alloc(p_hwfn, 256);
284
285                 if (!p_eq)
286                         goto alloc_err;
287                 p_hwfn->p_eq = p_eq;
288
289                 p_consq = qed_consq_alloc(p_hwfn);
290                 if (!p_consq)
291                         goto alloc_err;
292                 p_hwfn->p_consq = p_consq;
293
294                 /* DMA info initialization */
295                 rc = qed_dmae_info_alloc(p_hwfn);
296                 if (rc) {
297                         DP_NOTICE(p_hwfn,
298                                   "Failed to allocate memory for dmae_info structure\n");
299                         goto alloc_err;
300                 }
301         }
302
303         cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
304         if (!cdev->reset_stats) {
305                 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
306                 goto alloc_err;
307         }
308
309         return 0;
310
311 alloc_err:
312         qed_resc_free(cdev);
313         return rc;
314 }
315
316 void qed_resc_setup(struct qed_dev *cdev)
317 {
318         int i;
319
320         for_each_hwfn(cdev, i) {
321                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
322
323                 qed_cxt_mngr_setup(p_hwfn);
324                 qed_spq_setup(p_hwfn);
325                 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
326                 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
327
328                 /* Read shadow of current MFW mailbox */
329                 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
330                 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
331                        p_hwfn->mcp_info->mfw_mb_cur,
332                        p_hwfn->mcp_info->mfw_mb_length);
333
334                 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
335         }
336 }
337
338 #define FINAL_CLEANUP_CMD_OFFSET        (0)
339 #define FINAL_CLEANUP_CMD (0x1)
340 #define FINAL_CLEANUP_VALID_OFFSET      (6)
341 #define FINAL_CLEANUP_VFPF_ID_SHIFT     (7)
342 #define FINAL_CLEANUP_COMP (0x2)
343 #define FINAL_CLEANUP_POLL_CNT          (100)
344 #define FINAL_CLEANUP_POLL_TIME         (10)
345 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
346                       struct qed_ptt *p_ptt,
347                       u16 id)
348 {
349         u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
350         int rc = -EBUSY;
351
352         addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
353
354         command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
355         command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
356         command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
357         command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
358
359         /* Make sure notification is not set before initiating final cleanup */
360         if (REG_RD(p_hwfn, addr)) {
361                 DP_NOTICE(
362                         p_hwfn,
363                         "Unexpected; Found final cleanup notification before initiating final cleanup\n");
364                 REG_WR(p_hwfn, addr, 0);
365         }
366
367         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
368                    "Sending final cleanup for PFVF[%d] [Command %08x\n]",
369                    id, command);
370
371         qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
372
373         /* Poll until completion */
374         while (!REG_RD(p_hwfn, addr) && count--)
375                 msleep(FINAL_CLEANUP_POLL_TIME);
376
377         if (REG_RD(p_hwfn, addr))
378                 rc = 0;
379         else
380                 DP_NOTICE(p_hwfn,
381                           "Failed to receive FW final cleanup notification\n");
382
383         /* Cleanup afterwards */
384         REG_WR(p_hwfn, addr, 0);
385
386         return rc;
387 }
388
389 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
390 {
391         int hw_mode = 0;
392
393         hw_mode = (1 << MODE_BB_A0);
394
395         switch (p_hwfn->cdev->num_ports_in_engines) {
396         case 1:
397                 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
398                 break;
399         case 2:
400                 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
401                 break;
402         case 4:
403                 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
404                 break;
405         default:
406                 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
407                           p_hwfn->cdev->num_ports_in_engines);
408                 return;
409         }
410
411         switch (p_hwfn->cdev->mf_mode) {
412         case SF:
413                 hw_mode |= 1 << MODE_SF;
414                 break;
415         case MF_OVLAN:
416                 hw_mode |= 1 << MODE_MF_SD;
417                 break;
418         case MF_NPAR:
419                 hw_mode |= 1 << MODE_MF_SI;
420                 break;
421         default:
422                 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
423                 hw_mode |= 1 << MODE_SF;
424         }
425
426         hw_mode |= 1 << MODE_ASIC;
427
428         p_hwfn->hw_info.hw_mode = hw_mode;
429 }
430
431 /* Init run time data for all PFs on an engine. */
432 static void qed_init_cau_rt_data(struct qed_dev *cdev)
433 {
434         u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
435         int i, sb_id;
436
437         for_each_hwfn(cdev, i) {
438                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
439                 struct qed_igu_info *p_igu_info;
440                 struct qed_igu_block *p_block;
441                 struct cau_sb_entry sb_entry;
442
443                 p_igu_info = p_hwfn->hw_info.p_igu_info;
444
445                 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
446                      sb_id++) {
447                         p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
448                         if (!p_block->is_pf)
449                                 continue;
450
451                         qed_init_cau_sb_entry(p_hwfn, &sb_entry,
452                                               p_block->function_id,
453                                               0, 0);
454                         STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
455                                          sb_entry);
456                 }
457         }
458 }
459
460 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
461                               struct qed_ptt *p_ptt,
462                               int hw_mode)
463 {
464         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
465         struct qed_qm_common_rt_init_params params;
466         struct qed_dev *cdev = p_hwfn->cdev;
467         int rc = 0;
468
469         qed_init_cau_rt_data(cdev);
470
471         /* Program GTT windows */
472         qed_gtt_init(p_hwfn);
473
474         if (p_hwfn->mcp_info) {
475                 if (p_hwfn->mcp_info->func_info.bandwidth_max)
476                         qm_info->pf_rl_en = 1;
477                 if (p_hwfn->mcp_info->func_info.bandwidth_min)
478                         qm_info->pf_wfq_en = 1;
479         }
480
481         memset(&params, 0, sizeof(params));
482         params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
483         params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
484         params.pf_rl_en = qm_info->pf_rl_en;
485         params.pf_wfq_en = qm_info->pf_wfq_en;
486         params.vport_rl_en = qm_info->vport_rl_en;
487         params.vport_wfq_en = qm_info->vport_wfq_en;
488         params.port_params = qm_info->qm_port_params;
489
490         qed_qm_common_rt_init(p_hwfn, &params);
491
492         qed_cxt_hw_init_common(p_hwfn);
493
494         /* Close gate from NIG to BRB/Storm; By default they are open, but
495          * we close them to prevent NIG from passing data to reset blocks.
496          * Should have been done in the ENGINE phase, but init-tool lacks
497          * proper port-pretend capabilities.
498          */
499         qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
500         qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
501         qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
502         qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
503         qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
504         qed_port_unpretend(p_hwfn, p_ptt);
505
506         rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
507         if (rc != 0)
508                 return rc;
509
510         qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
511         qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
512
513         /* Disable relaxed ordering in the PCI config space */
514         qed_wr(p_hwfn, p_ptt, 0x20b4,
515                qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
516
517         return rc;
518 }
519
520 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
521                             struct qed_ptt *p_ptt,
522                             int hw_mode)
523 {
524         int rc = 0;
525
526         rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
527                           hw_mode);
528         return rc;
529 }
530
531 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
532                           struct qed_ptt *p_ptt,
533                           int hw_mode,
534                           bool b_hw_start,
535                           enum qed_int_mode int_mode,
536                           bool allow_npar_tx_switch)
537 {
538         u8 rel_pf_id = p_hwfn->rel_pf_id;
539         int rc = 0;
540
541         if (p_hwfn->mcp_info) {
542                 struct qed_mcp_function_info *p_info;
543
544                 p_info = &p_hwfn->mcp_info->func_info;
545                 if (p_info->bandwidth_min)
546                         p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
547
548                 /* Update rate limit once we'll actually have a link */
549                 p_hwfn->qm_info.pf_rl = 100;
550         }
551
552         qed_cxt_hw_init_pf(p_hwfn);
553
554         qed_int_igu_init_rt(p_hwfn);
555
556         /* Set VLAN in NIG if needed */
557         if (hw_mode & (1 << MODE_MF_SD)) {
558                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
559                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
560                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
561                              p_hwfn->hw_info.ovlan);
562         }
563
564         /* Enable classification by MAC if needed */
565         if (hw_mode & MODE_MF_SI) {
566                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
567                            "Configuring TAGMAC_CLS_TYPE\n");
568                 STORE_RT_REG(p_hwfn,
569                              NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
570         }
571
572         /* Protocl Configuration  */
573         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
574         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
575         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
576
577         /* Cleanup chip from previous driver if such remains exist */
578         rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
579         if (rc != 0)
580                 return rc;
581
582         /* PF Init sequence */
583         rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
584         if (rc)
585                 return rc;
586
587         /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
588         rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
589         if (rc)
590                 return rc;
591
592         /* Pure runtime initializations - directly to the HW  */
593         qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
594
595         if (b_hw_start) {
596                 /* enable interrupts */
597                 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
598
599                 /* send function start command */
600                 rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
601                 if (rc)
602                         DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
603         }
604         return rc;
605 }
606
607 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
608                                struct qed_ptt *p_ptt,
609                                u8 enable)
610 {
611         u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
612
613         /* Change PF in PXP */
614         qed_wr(p_hwfn, p_ptt,
615                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
616
617         /* wait until value is set - try for 1 second every 50us */
618         for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
619                 val = qed_rd(p_hwfn, p_ptt,
620                              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
621                 if (val == set_val)
622                         break;
623
624                 usleep_range(50, 60);
625         }
626
627         if (val != set_val) {
628                 DP_NOTICE(p_hwfn,
629                           "PFID_ENABLE_MASTER wasn't changed after a second\n");
630                 return -EAGAIN;
631         }
632
633         return 0;
634 }
635
636 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
637                                 struct qed_ptt *p_main_ptt)
638 {
639         /* Read shadow of current MFW mailbox */
640         qed_mcp_read_mb(p_hwfn, p_main_ptt);
641         memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
642                p_hwfn->mcp_info->mfw_mb_cur,
643                p_hwfn->mcp_info->mfw_mb_length);
644 }
645
646 int qed_hw_init(struct qed_dev *cdev,
647                 bool b_hw_start,
648                 enum qed_int_mode int_mode,
649                 bool allow_npar_tx_switch,
650                 const u8 *bin_fw_data)
651 {
652         struct qed_storm_stats *p_stat;
653         u32 load_code, param, *p_address;
654         int rc, mfw_rc, i;
655         u8 fw_vport = 0;
656
657         rc = qed_init_fw_data(cdev, bin_fw_data);
658         if (rc != 0)
659                 return rc;
660
661         for_each_hwfn(cdev, i) {
662                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
663
664                 rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
665                 if (rc != 0)
666                         return rc;
667
668                 /* Enable DMAE in PXP */
669                 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
670
671                 qed_calc_hw_mode(p_hwfn);
672
673                 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
674                                       &load_code);
675                 if (rc) {
676                         DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
677                         return rc;
678                 }
679
680                 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
681
682                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
683                            "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
684                            rc, load_code);
685
686                 p_hwfn->first_on_engine = (load_code ==
687                                            FW_MSG_CODE_DRV_LOAD_ENGINE);
688
689                 switch (load_code) {
690                 case FW_MSG_CODE_DRV_LOAD_ENGINE:
691                         rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
692                                                 p_hwfn->hw_info.hw_mode);
693                         if (rc)
694                                 break;
695                 /* Fall into */
696                 case FW_MSG_CODE_DRV_LOAD_PORT:
697                         rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
698                                               p_hwfn->hw_info.hw_mode);
699                         if (rc)
700                                 break;
701
702                 /* Fall into */
703                 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
704                         rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
705                                             p_hwfn->hw_info.hw_mode,
706                                             b_hw_start, int_mode,
707                                             allow_npar_tx_switch);
708                         break;
709                 default:
710                         rc = -EINVAL;
711                         break;
712                 }
713
714                 if (rc)
715                         DP_NOTICE(p_hwfn,
716                                   "init phase failed for loadcode 0x%x (rc %d)\n",
717                                    load_code, rc);
718
719                 /* ACK mfw regardless of success or failure of initialization */
720                 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
721                                      DRV_MSG_CODE_LOAD_DONE,
722                                      0, &load_code, &param);
723                 if (rc)
724                         return rc;
725                 if (mfw_rc) {
726                         DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
727                         return mfw_rc;
728                 }
729
730                 p_hwfn->hw_init_done = true;
731
732                 /* init PF stats */
733                 p_stat = &p_hwfn->storm_stats;
734                 p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
735                                          MSTORM_QUEUE_STAT_OFFSET(fw_vport);
736                 p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
737
738                 p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
739                                          USTORM_QUEUE_STAT_OFFSET(fw_vport);
740                 p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
741
742                 p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
743                                          PSTORM_QUEUE_STAT_OFFSET(fw_vport);
744                 p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
745
746                 p_address = &p_stat->tstats.address;
747                 *p_address = BAR0_MAP_REG_TSDM_RAM +
748                              TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
749                 p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
750         }
751
752         return 0;
753 }
754
755 #define QED_HW_STOP_RETRY_LIMIT (10)
756 int qed_hw_stop(struct qed_dev *cdev)
757 {
758         int rc = 0, t_rc;
759         int i, j;
760
761         for_each_hwfn(cdev, j) {
762                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
763                 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
764
765                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
766
767                 /* mark the hw as uninitialized... */
768                 p_hwfn->hw_init_done = false;
769
770                 rc = qed_sp_pf_stop(p_hwfn);
771                 if (rc)
772                         return rc;
773
774                 qed_wr(p_hwfn, p_ptt,
775                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
776
777                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
778                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
779                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
780                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
781                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
782
783                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
784                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
785                 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
786                         if ((!qed_rd(p_hwfn, p_ptt,
787                                      TM_REG_PF_SCAN_ACTIVE_CONN)) &&
788                             (!qed_rd(p_hwfn, p_ptt,
789                                      TM_REG_PF_SCAN_ACTIVE_TASK)))
790                                 break;
791
792                         usleep_range(1000, 2000);
793                 }
794                 if (i == QED_HW_STOP_RETRY_LIMIT)
795                         DP_NOTICE(p_hwfn,
796                                   "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
797                                   (u8)qed_rd(p_hwfn, p_ptt,
798                                              TM_REG_PF_SCAN_ACTIVE_CONN),
799                                   (u8)qed_rd(p_hwfn, p_ptt,
800                                              TM_REG_PF_SCAN_ACTIVE_TASK));
801
802                 /* Disable Attention Generation */
803                 qed_int_igu_disable_int(p_hwfn, p_ptt);
804
805                 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
806                 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
807
808                 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
809
810                 /* Need to wait 1ms to guarantee SBs are cleared */
811                 usleep_range(1000, 2000);
812         }
813
814         /* Disable DMAE in PXP - in CMT, this should only be done for
815          * first hw-function, and only after all transactions have
816          * stopped for all active hw-functions.
817          */
818         t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
819                                    cdev->hwfns[0].p_main_ptt,
820                                    false);
821         if (t_rc != 0)
822                 rc = t_rc;
823
824         return rc;
825 }
826
827 void qed_hw_stop_fastpath(struct qed_dev *cdev)
828 {
829         int i, j;
830
831         for_each_hwfn(cdev, j) {
832                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
833                 struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
834
835                 DP_VERBOSE(p_hwfn,
836                            NETIF_MSG_IFDOWN,
837                            "Shutting down the fastpath\n");
838
839                 qed_wr(p_hwfn, p_ptt,
840                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
841
842                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
843                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
844                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
845                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
846                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
847
848                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
849                 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
850                 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
851                         if ((!qed_rd(p_hwfn, p_ptt,
852                                      TM_REG_PF_SCAN_ACTIVE_CONN)) &&
853                             (!qed_rd(p_hwfn, p_ptt,
854                                      TM_REG_PF_SCAN_ACTIVE_TASK)))
855                                 break;
856
857                         usleep_range(1000, 2000);
858                 }
859                 if (i == QED_HW_STOP_RETRY_LIMIT)
860                         DP_NOTICE(p_hwfn,
861                                   "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
862                                   (u8)qed_rd(p_hwfn, p_ptt,
863                                              TM_REG_PF_SCAN_ACTIVE_CONN),
864                                   (u8)qed_rd(p_hwfn, p_ptt,
865                                              TM_REG_PF_SCAN_ACTIVE_TASK));
866
867                 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
868
869                 /* Need to wait 1ms to guarantee SBs are cleared */
870                 usleep_range(1000, 2000);
871         }
872 }
873
874 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
875 {
876         /* Re-open incoming traffic */
877         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
878                NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
879 }
880
881 static int qed_reg_assert(struct qed_hwfn *hwfn,
882                           struct qed_ptt *ptt, u32 reg,
883                           bool expected)
884 {
885         u32 assert_val = qed_rd(hwfn, ptt, reg);
886
887         if (assert_val != expected) {
888                 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
889                           reg, expected);
890                 return -EINVAL;
891         }
892
893         return 0;
894 }
895
896 int qed_hw_reset(struct qed_dev *cdev)
897 {
898         int rc = 0;
899         u32 unload_resp, unload_param;
900         int i;
901
902         for_each_hwfn(cdev, i) {
903                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
904
905                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
906
907                 /* Check for incorrect states */
908                 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
909                                QM_REG_USG_CNT_PF_TX, 0);
910                 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
911                                QM_REG_USG_CNT_PF_OTHER, 0);
912
913                 /* Disable PF in HW blocks */
914                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
915                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
916                 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
917                        TCFC_REG_STRONG_ENABLE_PF, 0);
918                 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
919                        CCFC_REG_STRONG_ENABLE_PF, 0);
920
921                 /* Send unload command to MCP */
922                 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
923                                  DRV_MSG_CODE_UNLOAD_REQ,
924                                  DRV_MB_PARAM_UNLOAD_WOL_MCP,
925                                  &unload_resp, &unload_param);
926                 if (rc) {
927                         DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
928                         unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
929                 }
930
931                 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
932                                  DRV_MSG_CODE_UNLOAD_DONE,
933                                  0, &unload_resp, &unload_param);
934                 if (rc) {
935                         DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
936                         return rc;
937                 }
938         }
939
940         return rc;
941 }
942
943 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
944 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
945 {
946         qed_ptt_pool_free(p_hwfn);
947         kfree(p_hwfn->hw_info.p_igu_info);
948 }
949
950 /* Setup bar access */
951 static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
952 {
953         int rc;
954
955         /* Allocate PTT pool */
956         rc = qed_ptt_pool_alloc(p_hwfn);
957         if (rc)
958                 return rc;
959
960         /* Allocate the main PTT */
961         p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
962
963         /* clear indirect access */
964         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
965         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
966         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
967         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
968
969         /* Clean Previous errors if such exist */
970         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
971                PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
972                1 << p_hwfn->abs_pf_id);
973
974         /* enable internal target-read */
975         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
976                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
977
978         return 0;
979 }
980
981 static void get_function_id(struct qed_hwfn *p_hwfn)
982 {
983         /* ME Register */
984         p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
985
986         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
987
988         p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
989         p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
990                                       PXP_CONCRETE_FID_PFID);
991         p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
992                                     PXP_CONCRETE_FID_PORT);
993 }
994
995 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
996 {
997         u32 *feat_num = p_hwfn->hw_info.feat_num;
998         int num_features = 1;
999
1000         feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1001                                                 num_features,
1002                                         RESC_NUM(p_hwfn, QED_L2_QUEUE));
1003         DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1004                    "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1005                    feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1006                    num_features);
1007 }
1008
1009 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1010 {
1011         u32 *resc_start = p_hwfn->hw_info.resc_start;
1012         u32 *resc_num = p_hwfn->hw_info.resc_num;
1013         int num_funcs, i;
1014
1015         num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
1016                                   : p_hwfn->cdev->num_ports_in_engines;
1017
1018         resc_num[QED_SB] = min_t(u32,
1019                                  (MAX_SB_PER_PATH_BB / num_funcs),
1020                                  qed_int_get_num_sbs(p_hwfn, NULL));
1021         resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1022         resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1023         resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1024         resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1025         resc_num[QED_RL] = 8;
1026         resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1027         resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1028                              num_funcs;
1029         resc_num[QED_ILT] = 950;
1030
1031         for (i = 0; i < QED_MAX_RESC; i++)
1032                 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1033
1034         qed_hw_set_feat(p_hwfn);
1035
1036         DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1037                    "The numbers for each resource are:\n"
1038                    "SB = %d start = %d\n"
1039                    "L2_QUEUE = %d start = %d\n"
1040                    "VPORT = %d start = %d\n"
1041                    "PQ = %d start = %d\n"
1042                    "RL = %d start = %d\n"
1043                    "MAC = %d start = %d\n"
1044                    "VLAN = %d start = %d\n"
1045                    "ILT = %d start = %d\n",
1046                    p_hwfn->hw_info.resc_num[QED_SB],
1047                    p_hwfn->hw_info.resc_start[QED_SB],
1048                    p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1049                    p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1050                    p_hwfn->hw_info.resc_num[QED_VPORT],
1051                    p_hwfn->hw_info.resc_start[QED_VPORT],
1052                    p_hwfn->hw_info.resc_num[QED_PQ],
1053                    p_hwfn->hw_info.resc_start[QED_PQ],
1054                    p_hwfn->hw_info.resc_num[QED_RL],
1055                    p_hwfn->hw_info.resc_start[QED_RL],
1056                    p_hwfn->hw_info.resc_num[QED_MAC],
1057                    p_hwfn->hw_info.resc_start[QED_MAC],
1058                    p_hwfn->hw_info.resc_num[QED_VLAN],
1059                    p_hwfn->hw_info.resc_start[QED_VLAN],
1060                    p_hwfn->hw_info.resc_num[QED_ILT],
1061                    p_hwfn->hw_info.resc_start[QED_ILT]);
1062 }
1063
1064 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1065                                struct qed_ptt *p_ptt)
1066 {
1067         u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1068         u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
1069         struct qed_mcp_link_params *link;
1070
1071         /* Read global nvm_cfg address */
1072         nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1073
1074         /* Verify MCP has initialized it */
1075         if (!nvm_cfg_addr) {
1076                 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1077                 return -EINVAL;
1078         }
1079
1080         /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1081         nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1082
1083         /* Read Vendor Id / Device Id */
1084         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1085                offsetof(struct nvm_cfg1, glob) +
1086                offsetof(struct nvm_cfg1_glob, pci_id);
1087         p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
1088                                     NVM_CFG1_GLOB_VENDOR_ID_MASK;
1089
1090         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1091                offsetof(struct nvm_cfg1, glob) +
1092                offsetof(struct nvm_cfg1_glob, core_cfg);
1093
1094         core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1095
1096         switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1097                 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1098         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1099                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1100                 break;
1101         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1102                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1103                 break;
1104         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1105                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1106                 break;
1107         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1108                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1109                 break;
1110         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1111                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1112                 break;
1113         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1114                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1115                 break;
1116         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1117                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1118                 break;
1119         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1120                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1121                 break;
1122         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1123                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1124                 break;
1125         default:
1126                 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1127                           core_cfg);
1128                 break;
1129         }
1130
1131         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1132                offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
1133                offsetof(struct nvm_cfg1_func, device_id);
1134         val = qed_rd(p_hwfn, p_ptt, addr);
1135
1136         if (IS_MF(p_hwfn)) {
1137                 p_hwfn->hw_info.device_id =
1138                         (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
1139                         NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
1140         } else {
1141                 p_hwfn->hw_info.device_id =
1142                         (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
1143                         NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
1144         }
1145
1146         /* Read default link configuration */
1147         link = &p_hwfn->mcp_info->link_input;
1148         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1149                         offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1150         link_temp = qed_rd(p_hwfn, p_ptt,
1151                            port_cfg_addr +
1152                            offsetof(struct nvm_cfg1_port, speed_cap_mask));
1153         link->speed.advertised_speeds =
1154                 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1155
1156         p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1157                                                 link->speed.advertised_speeds;
1158
1159         link_temp = qed_rd(p_hwfn, p_ptt,
1160                            port_cfg_addr +
1161                            offsetof(struct nvm_cfg1_port, link_settings));
1162         switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1163                 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1164         case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1165                 link->speed.autoneg = true;
1166                 break;
1167         case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1168                 link->speed.forced_speed = 1000;
1169                 break;
1170         case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1171                 link->speed.forced_speed = 10000;
1172                 break;
1173         case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1174                 link->speed.forced_speed = 25000;
1175                 break;
1176         case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1177                 link->speed.forced_speed = 40000;
1178                 break;
1179         case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1180                 link->speed.forced_speed = 50000;
1181                 break;
1182         case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1183                 link->speed.forced_speed = 100000;
1184                 break;
1185         default:
1186                 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1187                           link_temp);
1188         }
1189
1190         link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1191         link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1192         link->pause.autoneg = !!(link_temp &
1193                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1194         link->pause.forced_rx = !!(link_temp &
1195                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1196         link->pause.forced_tx = !!(link_temp &
1197                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1198         link->loopback_mode = 0;
1199
1200         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1201                    "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1202                    link->speed.forced_speed, link->speed.advertised_speeds,
1203                    link->speed.autoneg, link->pause.autoneg);
1204
1205         /* Read Multi-function information from shmem */
1206         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1207                offsetof(struct nvm_cfg1, glob) +
1208                offsetof(struct nvm_cfg1_glob, generic_cont0);
1209
1210         generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1211
1212         mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1213                   NVM_CFG1_GLOB_MF_MODE_OFFSET;
1214
1215         switch (mf_mode) {
1216         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1217                 p_hwfn->cdev->mf_mode = MF_OVLAN;
1218                 break;
1219         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1220                 p_hwfn->cdev->mf_mode = MF_NPAR;
1221                 break;
1222         case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
1223                 p_hwfn->cdev->mf_mode = SF;
1224                 break;
1225         }
1226         DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1227                 p_hwfn->cdev->mf_mode);
1228
1229         return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1230 }
1231
1232 static int
1233 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1234                 struct qed_ptt *p_ptt,
1235                 enum qed_pci_personality personality)
1236 {
1237         u32 port_mode;
1238         int rc;
1239
1240         /* Read the port mode */
1241         port_mode = qed_rd(p_hwfn, p_ptt,
1242                            CNIG_REG_NW_PORT_MODE_BB_B0);
1243
1244         if (port_mode < 3) {
1245                 p_hwfn->cdev->num_ports_in_engines = 1;
1246         } else if (port_mode <= 5) {
1247                 p_hwfn->cdev->num_ports_in_engines = 2;
1248         } else {
1249                 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1250                           p_hwfn->cdev->num_ports_in_engines);
1251
1252                 /* Default num_ports_in_engines to something */
1253                 p_hwfn->cdev->num_ports_in_engines = 1;
1254         }
1255
1256         qed_hw_get_nvm_info(p_hwfn, p_ptt);
1257
1258         rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1259         if (rc)
1260                 return rc;
1261
1262         if (qed_mcp_is_init(p_hwfn))
1263                 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1264                                 p_hwfn->mcp_info->func_info.mac);
1265         else
1266                 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1267
1268         if (qed_mcp_is_init(p_hwfn)) {
1269                 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1270                         p_hwfn->hw_info.ovlan =
1271                                 p_hwfn->mcp_info->func_info.ovlan;
1272
1273                 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1274         }
1275
1276         if (qed_mcp_is_init(p_hwfn)) {
1277                 enum qed_pci_personality protocol;
1278
1279                 protocol = p_hwfn->mcp_info->func_info.protocol;
1280                 p_hwfn->hw_info.personality = protocol;
1281         }
1282
1283         qed_hw_get_resc(p_hwfn);
1284
1285         return rc;
1286 }
1287
1288 static void qed_get_dev_info(struct qed_dev *cdev)
1289 {
1290         u32 tmp;
1291
1292         cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1293                                      MISCS_REG_CHIP_NUM);
1294         cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1295                                      MISCS_REG_CHIP_REV);
1296         MASK_FIELD(CHIP_REV, cdev->chip_rev);
1297
1298         /* Learn number of HW-functions */
1299         tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1300                      MISCS_REG_CMT_ENABLED_FOR_PAIR);
1301
1302         if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
1303                 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1304                 cdev->num_hwfns = 2;
1305         } else {
1306                 cdev->num_hwfns = 1;
1307         }
1308
1309         cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1310                                     MISCS_REG_CHIP_TEST_REG) >> 4;
1311         MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1312         cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1313                                        MISCS_REG_CHIP_METAL);
1314         MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1315
1316         DP_INFO(cdev->hwfns,
1317                 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1318                 cdev->chip_num, cdev->chip_rev,
1319                 cdev->chip_bond_id, cdev->chip_metal);
1320 }
1321
1322 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1323                                  void __iomem *p_regview,
1324                                  void __iomem *p_doorbells,
1325                                  enum qed_pci_personality personality)
1326 {
1327         int rc = 0;
1328
1329         /* Split PCI bars evenly between hwfns */
1330         p_hwfn->regview = p_regview;
1331         p_hwfn->doorbells = p_doorbells;
1332
1333         /* Validate that chip access is feasible */
1334         if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1335                 DP_ERR(p_hwfn,
1336                        "Reading the ME register returns all Fs; Preventing further chip access\n");
1337                 return -EINVAL;
1338         }
1339
1340         get_function_id(p_hwfn);
1341
1342         rc = qed_hw_hwfn_prepare(p_hwfn);
1343         if (rc) {
1344                 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1345                 goto err0;
1346         }
1347
1348         /* First hwfn learns basic information, e.g., number of hwfns */
1349         if (!p_hwfn->my_id)
1350                 qed_get_dev_info(p_hwfn->cdev);
1351
1352         /* Initialize MCP structure */
1353         rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1354         if (rc) {
1355                 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1356                 goto err1;
1357         }
1358
1359         /* Read the device configuration information from the HW and SHMEM */
1360         rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1361         if (rc) {
1362                 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1363                 goto err2;
1364         }
1365
1366         /* Allocate the init RT array and initialize the init-ops engine */
1367         rc = qed_init_alloc(p_hwfn);
1368         if (rc) {
1369                 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1370                 goto err2;
1371         }
1372
1373         return rc;
1374 err2:
1375         qed_mcp_free(p_hwfn);
1376 err1:
1377         qed_hw_hwfn_free(p_hwfn);
1378 err0:
1379         return rc;
1380 }
1381
1382 static u32 qed_hw_bar_size(struct qed_dev *cdev,
1383                            u8 bar_id)
1384 {
1385         u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
1386
1387         return size / cdev->num_hwfns;
1388 }
1389
1390 int qed_hw_prepare(struct qed_dev *cdev,
1391                    int personality)
1392 {
1393         int rc, i;
1394
1395         /* Store the precompiled init data ptrs */
1396         qed_init_iro_array(cdev);
1397
1398         /* Initialize the first hwfn - will learn number of hwfns */
1399         rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
1400                                    cdev->doorbells, personality);
1401         if (rc)
1402                 return rc;
1403
1404         personality = cdev->hwfns[0].hw_info.personality;
1405
1406         /* Initialize the rest of the hwfns */
1407         for (i = 1; i < cdev->num_hwfns; i++) {
1408                 void __iomem *p_regview, *p_doorbell;
1409
1410                 p_regview =  cdev->regview +
1411                              i * qed_hw_bar_size(cdev, 0);
1412                 p_doorbell = cdev->doorbells +
1413                              i * qed_hw_bar_size(cdev, 1);
1414                 rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
1415                                            p_doorbell, personality);
1416                 if (rc) {
1417                         /* Cleanup previously initialized hwfns */
1418                         while (--i >= 0) {
1419                                 qed_init_free(&cdev->hwfns[i]);
1420                                 qed_mcp_free(&cdev->hwfns[i]);
1421                                 qed_hw_hwfn_free(&cdev->hwfns[i]);
1422                         }
1423                         return rc;
1424                 }
1425         }
1426
1427         return 0;
1428 }
1429
1430 void qed_hw_remove(struct qed_dev *cdev)
1431 {
1432         int i;
1433
1434         for_each_hwfn(cdev, i) {
1435                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1436
1437                 qed_init_free(p_hwfn);
1438                 qed_hw_hwfn_free(p_hwfn);
1439                 qed_mcp_free(p_hwfn);
1440         }
1441 }
1442
1443 int qed_chain_alloc(struct qed_dev *cdev,
1444                     enum qed_chain_use_mode intended_use,
1445                     enum qed_chain_mode mode,
1446                     u16 num_elems,
1447                     size_t elem_size,
1448                     struct qed_chain *p_chain)
1449 {
1450         dma_addr_t p_pbl_phys = 0;
1451         void *p_pbl_virt = NULL;
1452         dma_addr_t p_phys = 0;
1453         void *p_virt = NULL;
1454         u16 page_cnt = 0;
1455         size_t size;
1456
1457         if (mode == QED_CHAIN_MODE_SINGLE)
1458                 page_cnt = 1;
1459         else
1460                 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1461
1462         size = page_cnt * QED_CHAIN_PAGE_SIZE;
1463         p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1464                                     size, &p_phys, GFP_KERNEL);
1465         if (!p_virt) {
1466                 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1467                 goto nomem;
1468         }
1469
1470         if (mode == QED_CHAIN_MODE_PBL) {
1471                 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1472                 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1473                                                 size, &p_pbl_phys,
1474                                                 GFP_KERNEL);
1475                 if (!p_pbl_virt) {
1476                         DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1477                         goto nomem;
1478                 }
1479
1480                 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1481                                    (u8)elem_size, intended_use,
1482                                    p_pbl_phys, p_pbl_virt);
1483         } else {
1484                 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1485                                (u8)elem_size, intended_use, mode);
1486         }
1487
1488         return 0;
1489
1490 nomem:
1491         dma_free_coherent(&cdev->pdev->dev,
1492                           page_cnt * QED_CHAIN_PAGE_SIZE,
1493                           p_virt, p_phys);
1494         dma_free_coherent(&cdev->pdev->dev,
1495                           page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1496                           p_pbl_virt, p_pbl_phys);
1497
1498         return -ENOMEM;
1499 }
1500
1501 void qed_chain_free(struct qed_dev *cdev,
1502                     struct qed_chain *p_chain)
1503 {
1504         size_t size;
1505
1506         if (!p_chain->p_virt_addr)
1507                 return;
1508
1509         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1510                 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1511                 dma_free_coherent(&cdev->pdev->dev, size,
1512                                   p_chain->pbl.p_virt_table,
1513                                   p_chain->pbl.p_phys_table);
1514         }
1515
1516         size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1517         dma_free_coherent(&cdev->pdev->dev, size,
1518                           p_chain->p_virt_addr,
1519                           p_chain->p_phys_addr);
1520 }
1521
1522 static void __qed_get_vport_stats(struct qed_dev *cdev,
1523                                   struct qed_eth_stats  *stats)
1524 {
1525         int i, j;
1526
1527         memset(stats, 0, sizeof(*stats));
1528
1529         for_each_hwfn(cdev, i) {
1530                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1531                 struct eth_mstorm_per_queue_stat mstats;
1532                 struct eth_ustorm_per_queue_stat ustats;
1533                 struct eth_pstorm_per_queue_stat pstats;
1534                 struct tstorm_per_port_stat tstats;
1535                 struct port_stats port_stats;
1536                 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1537
1538                 if (!p_ptt) {
1539                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1540                         continue;
1541                 }
1542
1543                 memset(&mstats, 0, sizeof(mstats));
1544                 qed_memcpy_from(p_hwfn, p_ptt, &mstats,
1545                                 p_hwfn->storm_stats.mstats.address,
1546                                 p_hwfn->storm_stats.mstats.len);
1547
1548                 memset(&ustats, 0, sizeof(ustats));
1549                 qed_memcpy_from(p_hwfn, p_ptt, &ustats,
1550                                 p_hwfn->storm_stats.ustats.address,
1551                                 p_hwfn->storm_stats.ustats.len);
1552
1553                 memset(&pstats, 0, sizeof(pstats));
1554                 qed_memcpy_from(p_hwfn, p_ptt, &pstats,
1555                                 p_hwfn->storm_stats.pstats.address,
1556                                 p_hwfn->storm_stats.pstats.len);
1557
1558                 memset(&tstats, 0, sizeof(tstats));
1559                 qed_memcpy_from(p_hwfn, p_ptt, &tstats,
1560                                 p_hwfn->storm_stats.tstats.address,
1561                                 p_hwfn->storm_stats.tstats.len);
1562
1563                 memset(&port_stats, 0, sizeof(port_stats));
1564
1565                 if (p_hwfn->mcp_info)
1566                         qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1567                                         p_hwfn->mcp_info->port_addr +
1568                                         offsetof(struct public_port, stats),
1569                                         sizeof(port_stats));
1570                 qed_ptt_release(p_hwfn, p_ptt);
1571
1572                 stats->no_buff_discards +=
1573                         HILO_64_REGPAIR(mstats.no_buff_discard);
1574                 stats->packet_too_big_discard +=
1575                         HILO_64_REGPAIR(mstats.packet_too_big_discard);
1576                 stats->ttl0_discard +=
1577                         HILO_64_REGPAIR(mstats.ttl0_discard);
1578                 stats->tpa_coalesced_pkts +=
1579                         HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1580                 stats->tpa_coalesced_events +=
1581                         HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1582                 stats->tpa_aborts_num +=
1583                         HILO_64_REGPAIR(mstats.tpa_aborts_num);
1584                 stats->tpa_coalesced_bytes +=
1585                         HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1586
1587                 stats->rx_ucast_bytes +=
1588                         HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1589                 stats->rx_mcast_bytes +=
1590                         HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1591                 stats->rx_bcast_bytes +=
1592                         HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1593                 stats->rx_ucast_pkts +=
1594                         HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1595                 stats->rx_mcast_pkts +=
1596                         HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1597                 stats->rx_bcast_pkts +=
1598                         HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1599
1600                 stats->mftag_filter_discards +=
1601                         HILO_64_REGPAIR(tstats.mftag_filter_discard);
1602                 stats->mac_filter_discards +=
1603                         HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1604
1605                 stats->tx_ucast_bytes +=
1606                         HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1607                 stats->tx_mcast_bytes +=
1608                         HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1609                 stats->tx_bcast_bytes +=
1610                         HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1611                 stats->tx_ucast_pkts +=
1612                         HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1613                 stats->tx_mcast_pkts +=
1614                         HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1615                 stats->tx_bcast_pkts +=
1616                         HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1617                 stats->tx_err_drop_pkts +=
1618                         HILO_64_REGPAIR(pstats.error_drop_pkts);
1619                 stats->rx_64_byte_packets       += port_stats.pmm.r64;
1620                 stats->rx_127_byte_packets      += port_stats.pmm.r127;
1621                 stats->rx_255_byte_packets      += port_stats.pmm.r255;
1622                 stats->rx_511_byte_packets      += port_stats.pmm.r511;
1623                 stats->rx_1023_byte_packets     += port_stats.pmm.r1023;
1624                 stats->rx_1518_byte_packets     += port_stats.pmm.r1518;
1625                 stats->rx_1522_byte_packets     += port_stats.pmm.r1522;
1626                 stats->rx_2047_byte_packets     += port_stats.pmm.r2047;
1627                 stats->rx_4095_byte_packets     += port_stats.pmm.r4095;
1628                 stats->rx_9216_byte_packets     += port_stats.pmm.r9216;
1629                 stats->rx_16383_byte_packets    += port_stats.pmm.r16383;
1630                 stats->rx_crc_errors        += port_stats.pmm.rfcs;
1631                 stats->rx_mac_crtl_frames       += port_stats.pmm.rxcf;
1632                 stats->rx_pause_frames    += port_stats.pmm.rxpf;
1633                 stats->rx_pfc_frames        += port_stats.pmm.rxpp;
1634                 stats->rx_align_errors    += port_stats.pmm.raln;
1635                 stats->rx_carrier_errors        += port_stats.pmm.rfcr;
1636                 stats->rx_oversize_packets      += port_stats.pmm.rovr;
1637                 stats->rx_jabbers              += port_stats.pmm.rjbr;
1638                 stats->rx_undersize_packets     += port_stats.pmm.rund;
1639                 stats->rx_fragments          += port_stats.pmm.rfrg;
1640                 stats->tx_64_byte_packets       += port_stats.pmm.t64;
1641                 stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
1642                 stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
1643                 stats->tx_256_to_511_byte_packets  += port_stats.pmm.t511;
1644                 stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
1645                 stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
1646                 stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
1647                 stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
1648                 stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
1649                 stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
1650                 stats->tx_pause_frames    += port_stats.pmm.txpf;
1651                 stats->tx_pfc_frames        += port_stats.pmm.txpp;
1652                 stats->tx_lpi_entry_count       += port_stats.pmm.tlpiec;
1653                 stats->tx_total_collisions      += port_stats.pmm.tncl;
1654                 stats->rx_mac_bytes          += port_stats.pmm.rbyte;
1655                 stats->rx_mac_uc_packets        += port_stats.pmm.rxuca;
1656                 stats->rx_mac_mc_packets        += port_stats.pmm.rxmca;
1657                 stats->rx_mac_bc_packets        += port_stats.pmm.rxbca;
1658                 stats->rx_mac_frames_ok  += port_stats.pmm.rxpok;
1659                 stats->tx_mac_bytes          += port_stats.pmm.tbyte;
1660                 stats->tx_mac_uc_packets        += port_stats.pmm.txuca;
1661                 stats->tx_mac_mc_packets        += port_stats.pmm.txmca;
1662                 stats->tx_mac_bc_packets        += port_stats.pmm.txbca;
1663                 stats->tx_mac_ctrl_frames       += port_stats.pmm.txcf;
1664
1665                 for (j = 0; j < 8; j++) {
1666                         stats->brb_truncates += port_stats.brb.brb_truncate[j];
1667                         stats->brb_discards += port_stats.brb.brb_discard[j];
1668                 }
1669         }
1670 }
1671
1672 void qed_get_vport_stats(struct qed_dev *cdev,
1673                          struct qed_eth_stats *stats)
1674 {
1675         u32 i;
1676
1677         if (!cdev) {
1678                 memset(stats, 0, sizeof(*stats));
1679                 return;
1680         }
1681
1682         __qed_get_vport_stats(cdev, stats);
1683
1684         if (!cdev->reset_stats)
1685                 return;
1686
1687         /* Reduce the statistics baseline */
1688         for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1689                 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1690 }
1691
1692 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1693 void qed_reset_vport_stats(struct qed_dev *cdev)
1694 {
1695         int i;
1696
1697         for_each_hwfn(cdev, i) {
1698                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1699                 struct eth_mstorm_per_queue_stat mstats;
1700                 struct eth_ustorm_per_queue_stat ustats;
1701                 struct eth_pstorm_per_queue_stat pstats;
1702                 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1703
1704                 if (!p_ptt) {
1705                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1706                         continue;
1707                 }
1708
1709                 memset(&mstats, 0, sizeof(mstats));
1710                 qed_memcpy_to(p_hwfn, p_ptt,
1711                               p_hwfn->storm_stats.mstats.address,
1712                               &mstats,
1713                               p_hwfn->storm_stats.mstats.len);
1714
1715                 memset(&ustats, 0, sizeof(ustats));
1716                 qed_memcpy_to(p_hwfn, p_ptt,
1717                               p_hwfn->storm_stats.ustats.address,
1718                               &ustats,
1719                               p_hwfn->storm_stats.ustats.len);
1720
1721                 memset(&pstats, 0, sizeof(pstats));
1722                 qed_memcpy_to(p_hwfn, p_ptt,
1723                               p_hwfn->storm_stats.pstats.address,
1724                               &pstats,
1725                               p_hwfn->storm_stats.pstats.len);
1726
1727                 qed_ptt_release(p_hwfn, p_ptt);
1728         }
1729
1730         /* PORT statistics are not necessarily reset, so we need to
1731          * read and create a baseline for future statistics.
1732          */
1733         if (!cdev->reset_stats)
1734                 DP_INFO(cdev, "Reset stats not allocated\n");
1735         else
1736                 __qed_get_vport_stats(cdev, cdev->reset_stats);
1737 }
1738
1739 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1740                     u16 src_id, u16 *dst_id)
1741 {
1742         if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1743                 u16 min, max;
1744
1745                 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1746                 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1747                 DP_NOTICE(p_hwfn,
1748                           "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1749                           src_id, min, max);
1750
1751                 return -EINVAL;
1752         }
1753
1754         *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1755
1756         return 0;
1757 }
1758
1759 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1760                  u8 src_id, u8 *dst_id)
1761 {
1762         if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1763                 u8 min, max;
1764
1765                 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1766                 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1767                 DP_NOTICE(p_hwfn,
1768                           "vport id [%d] is not valid, available indices [%d - %d]\n",
1769                           src_id, min, max);
1770
1771                 return -EINVAL;
1772         }
1773
1774         *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1775
1776         return 0;
1777 }
1778
1779 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1780                    u8 src_id, u8 *dst_id)
1781 {
1782         if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1783                 u8 min, max;
1784
1785                 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1786                 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1787                 DP_NOTICE(p_hwfn,
1788                           "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1789                           src_id, min, max);
1790
1791                 return -EINVAL;
1792         }
1793
1794         *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1795
1796         return 0;
1797 }