]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/sfc/ef10.c
sfc: set interrupt moderation via MCDI
[karo-tx-linux.git] / drivers / net / ethernet / sfc / ef10.c
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2012-2013 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9
10 #include "net_driver.h"
11 #include "ef10_regs.h"
12 #include "io.h"
13 #include "mcdi.h"
14 #include "mcdi_pcol.h"
15 #include "nic.h"
16 #include "workarounds.h"
17 #include "selftest.h"
18 #include "ef10_sriov.h"
19 #include <linux/in.h>
20 #include <linux/jhash.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
23
24 /* Hardware control for EF10 architecture including 'Huntington'. */
25
26 #define EFX_EF10_DRVGEN_EV              7
27 enum {
28         EFX_EF10_TEST = 1,
29         EFX_EF10_REFILL,
30 };
31
32 /* The reserved RSS context value */
33 #define EFX_EF10_RSS_CONTEXT_INVALID    0xffffffff
34 /* The maximum size of a shared RSS context */
35 /* TODO: this should really be from the mcdi protocol export */
36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
37
38 /* The filter table(s) are managed by firmware and we have write-only
39  * access.  When removing filters we must identify them to the
40  * firmware by a 64-bit handle, but this is too wide for Linux kernel
41  * interfaces (32-bit for RX NFC, 16-bit for RFS).  Also, we need to
42  * be able to tell in advance whether a requested insertion will
43  * replace an existing filter.  Therefore we maintain a software hash
44  * table, which should be at least as large as the hardware hash
45  * table.
46  *
47  * Huntington has a single 8K filter table shared between all filter
48  * types and both ports.
49  */
50 #define HUNT_FILTER_TBL_ROWS 8192
51
52 #define EFX_EF10_FILTER_ID_INVALID 0xffff
53
54 #define EFX_EF10_FILTER_DEV_UC_MAX      32
55 #define EFX_EF10_FILTER_DEV_MC_MAX      256
56
57 /* VLAN list entry */
58 struct efx_ef10_vlan {
59         struct list_head list;
60         u16 vid;
61 };
62
63 /* Per-VLAN filters information */
64 struct efx_ef10_filter_vlan {
65         struct list_head list;
66         u16 vid;
67         u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
68         u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
69         u16 ucdef;
70         u16 bcast;
71         u16 mcdef;
72 };
73
74 struct efx_ef10_dev_addr {
75         u8 addr[ETH_ALEN];
76 };
77
78 struct efx_ef10_filter_table {
79 /* The MCDI match masks supported by this fw & hw, in order of priority */
80         u32 rx_match_mcdi_flags[
81                 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
82         unsigned int rx_match_count;
83
84         struct {
85                 unsigned long spec;     /* pointer to spec plus flag bits */
86 /* BUSY flag indicates that an update is in progress.  AUTO_OLD is
87  * used to mark and sweep MAC filters for the device address lists.
88  */
89 #define EFX_EF10_FILTER_FLAG_BUSY       1UL
90 #define EFX_EF10_FILTER_FLAG_AUTO_OLD   2UL
91 #define EFX_EF10_FILTER_FLAGS           3UL
92                 u64 handle;             /* firmware handle */
93         } *entry;
94         wait_queue_head_t waitq;
95 /* Shadow of net_device address lists, guarded by mac_lock */
96         struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
97         struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
98         int dev_uc_count;
99         int dev_mc_count;
100         bool uc_promisc;
101         bool mc_promisc;
102 /* Whether in multicast promiscuous mode when last changed */
103         bool mc_promisc_last;
104         bool vlan_filter;
105         struct list_head vlan_list;
106 };
107
108 /* An arbitrary search limit for the software hash table */
109 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
110
111 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
112 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
113 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
114 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
115                                               struct efx_ef10_filter_vlan *vlan);
116 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
117
118 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
119 {
120         efx_dword_t reg;
121
122         efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
123         return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
124                 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
125 }
126
127 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
128 {
129         int bar;
130
131         bar = efx->type->mem_bar;
132         return resource_size(&efx->pci_dev->resource[bar]);
133 }
134
135 static bool efx_ef10_is_vf(struct efx_nic *efx)
136 {
137         return efx->type->is_vf;
138 }
139
140 static int efx_ef10_get_pf_index(struct efx_nic *efx)
141 {
142         MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
143         struct efx_ef10_nic_data *nic_data = efx->nic_data;
144         size_t outlen;
145         int rc;
146
147         rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
148                           sizeof(outbuf), &outlen);
149         if (rc)
150                 return rc;
151         if (outlen < sizeof(outbuf))
152                 return -EIO;
153
154         nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
155         return 0;
156 }
157
158 #ifdef CONFIG_SFC_SRIOV
159 static int efx_ef10_get_vf_index(struct efx_nic *efx)
160 {
161         MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
162         struct efx_ef10_nic_data *nic_data = efx->nic_data;
163         size_t outlen;
164         int rc;
165
166         rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
167                           sizeof(outbuf), &outlen);
168         if (rc)
169                 return rc;
170         if (outlen < sizeof(outbuf))
171                 return -EIO;
172
173         nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
174         return 0;
175 }
176 #endif
177
178 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
179 {
180         MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
181         struct efx_ef10_nic_data *nic_data = efx->nic_data;
182         size_t outlen;
183         int rc;
184
185         BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
186
187         rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
188                           outbuf, sizeof(outbuf), &outlen);
189         if (rc)
190                 return rc;
191         if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
192                 netif_err(efx, drv, efx->net_dev,
193                           "unable to read datapath firmware capabilities\n");
194                 return -EIO;
195         }
196
197         nic_data->datapath_caps =
198                 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
199
200         if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
201                 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
202                                 GET_CAPABILITIES_V2_OUT_FLAGS2);
203         else
204                 nic_data->datapath_caps2 = 0;
205
206         /* record the DPCPU firmware IDs to determine VEB vswitching support.
207          */
208         nic_data->rx_dpcpu_fw_id =
209                 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
210         nic_data->tx_dpcpu_fw_id =
211                 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
212
213         if (!(nic_data->datapath_caps &
214               (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
215                 netif_err(efx, probe, efx->net_dev,
216                           "current firmware does not support an RX prefix\n");
217                 return -ENODEV;
218         }
219
220         return 0;
221 }
222
223 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
224 {
225         MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
226         int rc;
227
228         rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
229                           outbuf, sizeof(outbuf), NULL);
230         if (rc)
231                 return rc;
232         rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
233         return rc > 0 ? rc : -ERANGE;
234 }
235
236 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
237 {
238         MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
239         size_t outlen;
240         int rc;
241
242         BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
243
244         rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
245                           outbuf, sizeof(outbuf), &outlen);
246         if (rc)
247                 return rc;
248         if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
249                 return -EIO;
250
251         ether_addr_copy(mac_address,
252                         MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
253         return 0;
254 }
255
256 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
257 {
258         MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
259         MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
260         size_t outlen;
261         int num_addrs, rc;
262
263         MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
264                        EVB_PORT_ID_ASSIGNED);
265         rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
266                           sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
267
268         if (rc)
269                 return rc;
270         if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
271                 return -EIO;
272
273         num_addrs = MCDI_DWORD(outbuf,
274                                VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
275
276         WARN_ON(num_addrs != 1);
277
278         ether_addr_copy(mac_address,
279                         MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
280
281         return 0;
282 }
283
284 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
285                                                struct device_attribute *attr,
286                                                char *buf)
287 {
288         struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
289
290         return sprintf(buf, "%d\n",
291                        ((efx->mcdi->fn_flags) &
292                         (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
293                        ? 1 : 0);
294 }
295
296 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
297                                           struct device_attribute *attr,
298                                           char *buf)
299 {
300         struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
301
302         return sprintf(buf, "%d\n",
303                        ((efx->mcdi->fn_flags) &
304                         (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
305                        ? 1 : 0);
306 }
307
308 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
309 {
310         struct efx_ef10_nic_data *nic_data = efx->nic_data;
311         struct efx_ef10_vlan *vlan;
312
313         WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
314
315         list_for_each_entry(vlan, &nic_data->vlan_list, list) {
316                 if (vlan->vid == vid)
317                         return vlan;
318         }
319
320         return NULL;
321 }
322
323 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
324 {
325         struct efx_ef10_nic_data *nic_data = efx->nic_data;
326         struct efx_ef10_vlan *vlan;
327         int rc;
328
329         mutex_lock(&nic_data->vlan_lock);
330
331         vlan = efx_ef10_find_vlan(efx, vid);
332         if (vlan) {
333                 /* We add VID 0 on init. 8021q adds it on module init
334                  * for all interfaces with VLAN filtring feature.
335                  */
336                 if (vid == 0)
337                         goto done_unlock;
338                 netif_warn(efx, drv, efx->net_dev,
339                            "VLAN %u already added\n", vid);
340                 rc = -EALREADY;
341                 goto fail_exist;
342         }
343
344         rc = -ENOMEM;
345         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
346         if (!vlan)
347                 goto fail_alloc;
348
349         vlan->vid = vid;
350
351         list_add_tail(&vlan->list, &nic_data->vlan_list);
352
353         if (efx->filter_state) {
354                 mutex_lock(&efx->mac_lock);
355                 down_write(&efx->filter_sem);
356                 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
357                 up_write(&efx->filter_sem);
358                 mutex_unlock(&efx->mac_lock);
359                 if (rc)
360                         goto fail_filter_add_vlan;
361         }
362
363 done_unlock:
364         mutex_unlock(&nic_data->vlan_lock);
365         return 0;
366
367 fail_filter_add_vlan:
368         list_del(&vlan->list);
369         kfree(vlan);
370 fail_alloc:
371 fail_exist:
372         mutex_unlock(&nic_data->vlan_lock);
373         return rc;
374 }
375
376 static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
377                                        struct efx_ef10_vlan *vlan)
378 {
379         struct efx_ef10_nic_data *nic_data = efx->nic_data;
380
381         WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
382
383         if (efx->filter_state) {
384                 down_write(&efx->filter_sem);
385                 efx_ef10_filter_del_vlan(efx, vlan->vid);
386                 up_write(&efx->filter_sem);
387         }
388
389         list_del(&vlan->list);
390         kfree(vlan);
391 }
392
393 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
394 {
395         struct efx_ef10_nic_data *nic_data = efx->nic_data;
396         struct efx_ef10_vlan *vlan;
397         int rc = 0;
398
399         /* 8021q removes VID 0 on module unload for all interfaces
400          * with VLAN filtering feature. We need to keep it to receive
401          * untagged traffic.
402          */
403         if (vid == 0)
404                 return 0;
405
406         mutex_lock(&nic_data->vlan_lock);
407
408         vlan = efx_ef10_find_vlan(efx, vid);
409         if (!vlan) {
410                 netif_err(efx, drv, efx->net_dev,
411                           "VLAN %u to be deleted not found\n", vid);
412                 rc = -ENOENT;
413         } else {
414                 efx_ef10_del_vlan_internal(efx, vlan);
415         }
416
417         mutex_unlock(&nic_data->vlan_lock);
418
419         return rc;
420 }
421
422 static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
423 {
424         struct efx_ef10_nic_data *nic_data = efx->nic_data;
425         struct efx_ef10_vlan *vlan, *next_vlan;
426
427         mutex_lock(&nic_data->vlan_lock);
428         list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
429                 efx_ef10_del_vlan_internal(efx, vlan);
430         mutex_unlock(&nic_data->vlan_lock);
431 }
432
433 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
434                    NULL);
435 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
436
437 static int efx_ef10_probe(struct efx_nic *efx)
438 {
439         struct efx_ef10_nic_data *nic_data;
440         struct net_device *net_dev = efx->net_dev;
441         int i, rc;
442
443         /* We can have one VI for each 8K region.  However, until we
444          * use TX option descriptors we need two TX queues per channel.
445          */
446         efx->max_channels = min_t(unsigned int,
447                                   EFX_MAX_CHANNELS,
448                                   efx_ef10_mem_map_size(efx) /
449                                   (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
450         efx->max_tx_channels = efx->max_channels;
451         if (WARN_ON(efx->max_channels == 0))
452                 return -EIO;
453
454         nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
455         if (!nic_data)
456                 return -ENOMEM;
457         efx->nic_data = nic_data;
458
459         /* we assume later that we can copy from this buffer in dwords */
460         BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
461
462         rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
463                                   8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
464         if (rc)
465                 goto fail1;
466
467         /* Get the MC's warm boot count.  In case it's rebooting right
468          * now, be prepared to retry.
469          */
470         i = 0;
471         for (;;) {
472                 rc = efx_ef10_get_warm_boot_count(efx);
473                 if (rc >= 0)
474                         break;
475                 if (++i == 5)
476                         goto fail2;
477                 ssleep(1);
478         }
479         nic_data->warm_boot_count = rc;
480
481         nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
482
483         nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
484
485         /* In case we're recovering from a crash (kexec), we want to
486          * cancel any outstanding request by the previous user of this
487          * function.  We send a special message using the least
488          * significant bits of the 'high' (doorbell) register.
489          */
490         _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
491
492         rc = efx_mcdi_init(efx);
493         if (rc)
494                 goto fail2;
495
496         /* Reset (most) configuration for this function */
497         rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
498         if (rc)
499                 goto fail3;
500
501         /* Enable event logging */
502         rc = efx_mcdi_log_ctrl(efx, true, false, 0);
503         if (rc)
504                 goto fail3;
505
506         rc = device_create_file(&efx->pci_dev->dev,
507                                 &dev_attr_link_control_flag);
508         if (rc)
509                 goto fail3;
510
511         rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
512         if (rc)
513                 goto fail4;
514
515         rc = efx_ef10_get_pf_index(efx);
516         if (rc)
517                 goto fail5;
518
519         rc = efx_ef10_init_datapath_caps(efx);
520         if (rc < 0)
521                 goto fail5;
522
523         efx->rx_packet_len_offset =
524                 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
525
526         rc = efx_mcdi_port_get_number(efx);
527         if (rc < 0)
528                 goto fail5;
529         efx->port_num = rc;
530         net_dev->dev_port = rc;
531
532         rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
533         if (rc)
534                 goto fail5;
535
536         rc = efx_ef10_get_sysclk_freq(efx);
537         if (rc < 0)
538                 goto fail5;
539         efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
540
541         /* Check whether firmware supports bug 35388 workaround.
542          * First try to enable it, then if we get EPERM, just
543          * ask if it's already enabled
544          */
545         rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
546         if (rc == 0) {
547                 nic_data->workaround_35388 = true;
548         } else if (rc == -EPERM) {
549                 unsigned int enabled;
550
551                 rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
552                 if (rc)
553                         goto fail3;
554                 nic_data->workaround_35388 = enabled &
555                         MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
556         } else if (rc != -ENOSYS && rc != -ENOENT) {
557                 goto fail5;
558         }
559         netif_dbg(efx, probe, efx->net_dev,
560                   "workaround for bug 35388 is %sabled\n",
561                   nic_data->workaround_35388 ? "en" : "dis");
562
563         rc = efx_mcdi_mon_probe(efx);
564         if (rc && rc != -EPERM)
565                 goto fail5;
566
567         efx_ptp_probe(efx, NULL);
568
569 #ifdef CONFIG_SFC_SRIOV
570         if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
571                 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
572                 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
573
574                 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
575         } else
576 #endif
577                 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
578
579         INIT_LIST_HEAD(&nic_data->vlan_list);
580         mutex_init(&nic_data->vlan_lock);
581
582         /* Add unspecified VID to support VLAN filtering being disabled */
583         rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
584         if (rc)
585                 goto fail_add_vid_unspec;
586
587         /* If VLAN filtering is enabled, we need VID 0 to get untagged
588          * traffic.  It is added automatically if 8021q module is loaded,
589          * but we can't rely on it since module may be not loaded.
590          */
591         rc = efx_ef10_add_vlan(efx, 0);
592         if (rc)
593                 goto fail_add_vid_0;
594
595         return 0;
596
597 fail_add_vid_0:
598         efx_ef10_cleanup_vlans(efx);
599 fail_add_vid_unspec:
600         mutex_destroy(&nic_data->vlan_lock);
601         efx_ptp_remove(efx);
602         efx_mcdi_mon_remove(efx);
603 fail5:
604         device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
605 fail4:
606         device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
607 fail3:
608         efx_mcdi_fini(efx);
609 fail2:
610         efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
611 fail1:
612         kfree(nic_data);
613         efx->nic_data = NULL;
614         return rc;
615 }
616
617 static int efx_ef10_free_vis(struct efx_nic *efx)
618 {
619         MCDI_DECLARE_BUF_ERR(outbuf);
620         size_t outlen;
621         int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
622                                     outbuf, sizeof(outbuf), &outlen);
623
624         /* -EALREADY means nothing to free, so ignore */
625         if (rc == -EALREADY)
626                 rc = 0;
627         if (rc)
628                 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
629                                        rc);
630         return rc;
631 }
632
633 #ifdef EFX_USE_PIO
634
635 static void efx_ef10_free_piobufs(struct efx_nic *efx)
636 {
637         struct efx_ef10_nic_data *nic_data = efx->nic_data;
638         MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
639         unsigned int i;
640         int rc;
641
642         BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
643
644         for (i = 0; i < nic_data->n_piobufs; i++) {
645                 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
646                                nic_data->piobuf_handle[i]);
647                 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
648                                   NULL, 0, NULL);
649                 WARN_ON(rc);
650         }
651
652         nic_data->n_piobufs = 0;
653 }
654
655 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
656 {
657         struct efx_ef10_nic_data *nic_data = efx->nic_data;
658         MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
659         unsigned int i;
660         size_t outlen;
661         int rc = 0;
662
663         BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
664
665         for (i = 0; i < n; i++) {
666                 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
667                                         outbuf, sizeof(outbuf), &outlen);
668                 if (rc) {
669                         /* Don't display the MC error if we didn't have space
670                          * for a VF.
671                          */
672                         if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
673                                 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
674                                                        0, outbuf, outlen, rc);
675                         break;
676                 }
677                 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
678                         rc = -EIO;
679                         break;
680                 }
681                 nic_data->piobuf_handle[i] =
682                         MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
683                 netif_dbg(efx, probe, efx->net_dev,
684                           "allocated PIO buffer %u handle %x\n", i,
685                           nic_data->piobuf_handle[i]);
686         }
687
688         nic_data->n_piobufs = i;
689         if (rc)
690                 efx_ef10_free_piobufs(efx);
691         return rc;
692 }
693
694 static int efx_ef10_link_piobufs(struct efx_nic *efx)
695 {
696         struct efx_ef10_nic_data *nic_data = efx->nic_data;
697         _MCDI_DECLARE_BUF(inbuf,
698                           max(MC_CMD_LINK_PIOBUF_IN_LEN,
699                               MC_CMD_UNLINK_PIOBUF_IN_LEN));
700         struct efx_channel *channel;
701         struct efx_tx_queue *tx_queue;
702         unsigned int offset, index;
703         int rc;
704
705         BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
706         BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
707
708         memset(inbuf, 0, sizeof(inbuf));
709
710         /* Link a buffer to each VI in the write-combining mapping */
711         for (index = 0; index < nic_data->n_piobufs; ++index) {
712                 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
713                                nic_data->piobuf_handle[index]);
714                 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
715                                nic_data->pio_write_vi_base + index);
716                 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
717                                   inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
718                                   NULL, 0, NULL);
719                 if (rc) {
720                         netif_err(efx, drv, efx->net_dev,
721                                   "failed to link VI %u to PIO buffer %u (%d)\n",
722                                   nic_data->pio_write_vi_base + index, index,
723                                   rc);
724                         goto fail;
725                 }
726                 netif_dbg(efx, probe, efx->net_dev,
727                           "linked VI %u to PIO buffer %u\n",
728                           nic_data->pio_write_vi_base + index, index);
729         }
730
731         /* Link a buffer to each TX queue */
732         efx_for_each_channel(channel, efx) {
733                 efx_for_each_channel_tx_queue(tx_queue, channel) {
734                         /* We assign the PIO buffers to queues in
735                          * reverse order to allow for the following
736                          * special case.
737                          */
738                         offset = ((efx->tx_channel_offset + efx->n_tx_channels -
739                                    tx_queue->channel->channel - 1) *
740                                   efx_piobuf_size);
741                         index = offset / ER_DZ_TX_PIOBUF_SIZE;
742                         offset = offset % ER_DZ_TX_PIOBUF_SIZE;
743
744                         /* When the host page size is 4K, the first
745                          * host page in the WC mapping may be within
746                          * the same VI page as the last TX queue.  We
747                          * can only link one buffer to each VI.
748                          */
749                         if (tx_queue->queue == nic_data->pio_write_vi_base) {
750                                 BUG_ON(index != 0);
751                                 rc = 0;
752                         } else {
753                                 MCDI_SET_DWORD(inbuf,
754                                                LINK_PIOBUF_IN_PIOBUF_HANDLE,
755                                                nic_data->piobuf_handle[index]);
756                                 MCDI_SET_DWORD(inbuf,
757                                                LINK_PIOBUF_IN_TXQ_INSTANCE,
758                                                tx_queue->queue);
759                                 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
760                                                   inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
761                                                   NULL, 0, NULL);
762                         }
763
764                         if (rc) {
765                                 /* This is non-fatal; the TX path just
766                                  * won't use PIO for this queue
767                                  */
768                                 netif_err(efx, drv, efx->net_dev,
769                                           "failed to link VI %u to PIO buffer %u (%d)\n",
770                                           tx_queue->queue, index, rc);
771                                 tx_queue->piobuf = NULL;
772                         } else {
773                                 tx_queue->piobuf =
774                                         nic_data->pio_write_base +
775                                         index * EFX_VI_PAGE_SIZE + offset;
776                                 tx_queue->piobuf_offset = offset;
777                                 netif_dbg(efx, probe, efx->net_dev,
778                                           "linked VI %u to PIO buffer %u offset %x addr %p\n",
779                                           tx_queue->queue, index,
780                                           tx_queue->piobuf_offset,
781                                           tx_queue->piobuf);
782                         }
783                 }
784         }
785
786         return 0;
787
788 fail:
789         while (index--) {
790                 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
791                                nic_data->pio_write_vi_base + index);
792                 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
793                              inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
794                              NULL, 0, NULL);
795         }
796         return rc;
797 }
798
799 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
800 {
801         struct efx_channel *channel;
802         struct efx_tx_queue *tx_queue;
803
804         /* All our existing PIO buffers went away */
805         efx_for_each_channel(channel, efx)
806                 efx_for_each_channel_tx_queue(tx_queue, channel)
807                         tx_queue->piobuf = NULL;
808 }
809
810 #else /* !EFX_USE_PIO */
811
812 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
813 {
814         return n == 0 ? 0 : -ENOBUFS;
815 }
816
817 static int efx_ef10_link_piobufs(struct efx_nic *efx)
818 {
819         return 0;
820 }
821
822 static void efx_ef10_free_piobufs(struct efx_nic *efx)
823 {
824 }
825
826 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
827 {
828 }
829
830 #endif /* EFX_USE_PIO */
831
832 static void efx_ef10_remove(struct efx_nic *efx)
833 {
834         struct efx_ef10_nic_data *nic_data = efx->nic_data;
835         int rc;
836
837 #ifdef CONFIG_SFC_SRIOV
838         struct efx_ef10_nic_data *nic_data_pf;
839         struct pci_dev *pci_dev_pf;
840         struct efx_nic *efx_pf;
841         struct ef10_vf *vf;
842
843         if (efx->pci_dev->is_virtfn) {
844                 pci_dev_pf = efx->pci_dev->physfn;
845                 if (pci_dev_pf) {
846                         efx_pf = pci_get_drvdata(pci_dev_pf);
847                         nic_data_pf = efx_pf->nic_data;
848                         vf = nic_data_pf->vf + nic_data->vf_index;
849                         vf->efx = NULL;
850                 } else
851                         netif_info(efx, drv, efx->net_dev,
852                                    "Could not get the PF id from VF\n");
853         }
854 #endif
855
856         efx_ef10_cleanup_vlans(efx);
857         mutex_destroy(&nic_data->vlan_lock);
858
859         efx_ptp_remove(efx);
860
861         efx_mcdi_mon_remove(efx);
862
863         efx_ef10_rx_free_indir_table(efx);
864
865         if (nic_data->wc_membase)
866                 iounmap(nic_data->wc_membase);
867
868         rc = efx_ef10_free_vis(efx);
869         WARN_ON(rc != 0);
870
871         if (!nic_data->must_restore_piobufs)
872                 efx_ef10_free_piobufs(efx);
873
874         device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
875         device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
876
877         efx_mcdi_fini(efx);
878         efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
879         kfree(nic_data);
880 }
881
882 static int efx_ef10_probe_pf(struct efx_nic *efx)
883 {
884         return efx_ef10_probe(efx);
885 }
886
887 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
888                             u32 *port_flags, u32 *vadaptor_flags,
889                             unsigned int *vlan_tags)
890 {
891         struct efx_ef10_nic_data *nic_data = efx->nic_data;
892         MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
893         MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
894         size_t outlen;
895         int rc;
896
897         if (nic_data->datapath_caps &
898             (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
899                 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
900                                port_id);
901
902                 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
903                                   outbuf, sizeof(outbuf), &outlen);
904                 if (rc)
905                         return rc;
906
907                 if (outlen < sizeof(outbuf)) {
908                         rc = -EIO;
909                         return rc;
910                 }
911         }
912
913         if (port_flags)
914                 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
915         if (vadaptor_flags)
916                 *vadaptor_flags =
917                         MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
918         if (vlan_tags)
919                 *vlan_tags =
920                         MCDI_DWORD(outbuf,
921                                    VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
922
923         return 0;
924 }
925
926 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
927 {
928         MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
929
930         MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
931         return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
932                             NULL, 0, NULL);
933 }
934
935 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
936 {
937         MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
938
939         MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
940         return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
941                             NULL, 0, NULL);
942 }
943
944 int efx_ef10_vport_add_mac(struct efx_nic *efx,
945                            unsigned int port_id, u8 *mac)
946 {
947         MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
948
949         MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
950         ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
951
952         return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
953                             sizeof(inbuf), NULL, 0, NULL);
954 }
955
956 int efx_ef10_vport_del_mac(struct efx_nic *efx,
957                            unsigned int port_id, u8 *mac)
958 {
959         MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
960
961         MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
962         ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
963
964         return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
965                             sizeof(inbuf), NULL, 0, NULL);
966 }
967
968 #ifdef CONFIG_SFC_SRIOV
969 static int efx_ef10_probe_vf(struct efx_nic *efx)
970 {
971         int rc;
972         struct pci_dev *pci_dev_pf;
973
974         /* If the parent PF has no VF data structure, it doesn't know about this
975          * VF so fail probe.  The VF needs to be re-created.  This can happen
976          * if the PF driver is unloaded while the VF is assigned to a guest.
977          */
978         pci_dev_pf = efx->pci_dev->physfn;
979         if (pci_dev_pf) {
980                 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
981                 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
982
983                 if (!nic_data_pf->vf) {
984                         netif_info(efx, drv, efx->net_dev,
985                                    "The VF cannot link to its parent PF; "
986                                    "please destroy and re-create the VF\n");
987                         return -EBUSY;
988                 }
989         }
990
991         rc = efx_ef10_probe(efx);
992         if (rc)
993                 return rc;
994
995         rc = efx_ef10_get_vf_index(efx);
996         if (rc)
997                 goto fail;
998
999         if (efx->pci_dev->is_virtfn) {
1000                 if (efx->pci_dev->physfn) {
1001                         struct efx_nic *efx_pf =
1002                                 pci_get_drvdata(efx->pci_dev->physfn);
1003                         struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1004                         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1005
1006                         nic_data_p->vf[nic_data->vf_index].efx = efx;
1007                         nic_data_p->vf[nic_data->vf_index].pci_dev =
1008                                 efx->pci_dev;
1009                 } else
1010                         netif_info(efx, drv, efx->net_dev,
1011                                    "Could not get the PF id from VF\n");
1012         }
1013
1014         return 0;
1015
1016 fail:
1017         efx_ef10_remove(efx);
1018         return rc;
1019 }
1020 #else
1021 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1022 {
1023         return 0;
1024 }
1025 #endif
1026
1027 static int efx_ef10_alloc_vis(struct efx_nic *efx,
1028                               unsigned int min_vis, unsigned int max_vis)
1029 {
1030         MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1031         MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1032         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1033         size_t outlen;
1034         int rc;
1035
1036         MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1037         MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1038         rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1039                           outbuf, sizeof(outbuf), &outlen);
1040         if (rc != 0)
1041                 return rc;
1042
1043         if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1044                 return -EIO;
1045
1046         netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1047                   MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1048
1049         nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1050         nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1051         return 0;
1052 }
1053
1054 /* Note that the failure path of this function does not free
1055  * resources, as this will be done by efx_ef10_remove().
1056  */
1057 static int efx_ef10_dimension_resources(struct efx_nic *efx)
1058 {
1059         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1060         unsigned int uc_mem_map_size, wc_mem_map_size;
1061         unsigned int min_vis = max(EFX_TXQ_TYPES,
1062                                    efx_separate_tx_channels ? 2 : 1);
1063         unsigned int channel_vis, pio_write_vi_base, max_vis;
1064         void __iomem *membase;
1065         int rc;
1066
1067         channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1068
1069 #ifdef EFX_USE_PIO
1070         /* Try to allocate PIO buffers if wanted and if the full
1071          * number of PIO buffers would be sufficient to allocate one
1072          * copy-buffer per TX channel.  Failure is non-fatal, as there
1073          * are only a small number of PIO buffers shared between all
1074          * functions of the controller.
1075          */
1076         if (efx_piobuf_size != 0 &&
1077             ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1078             efx->n_tx_channels) {
1079                 unsigned int n_piobufs =
1080                         DIV_ROUND_UP(efx->n_tx_channels,
1081                                      ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
1082
1083                 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1084                 if (rc)
1085                         netif_err(efx, probe, efx->net_dev,
1086                                   "failed to allocate PIO buffers (%d)\n", rc);
1087                 else
1088                         netif_dbg(efx, probe, efx->net_dev,
1089                                   "allocated %u PIO buffers\n", n_piobufs);
1090         }
1091 #else
1092         nic_data->n_piobufs = 0;
1093 #endif
1094
1095         /* PIO buffers should be mapped with write-combining enabled,
1096          * and we want to make single UC and WC mappings rather than
1097          * several of each (in fact that's the only option if host
1098          * page size is >4K).  So we may allocate some extra VIs just
1099          * for writing PIO buffers through.
1100          *
1101          * The UC mapping contains (channel_vis - 1) complete VIs and the
1102          * first half of the next VI.  Then the WC mapping begins with
1103          * the second half of this last VI.
1104          */
1105         uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
1106                                      ER_DZ_TX_PIOBUF);
1107         if (nic_data->n_piobufs) {
1108                 /* pio_write_vi_base rounds down to give the number of complete
1109                  * VIs inside the UC mapping.
1110                  */
1111                 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
1112                 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1113                                                nic_data->n_piobufs) *
1114                                               EFX_VI_PAGE_SIZE) -
1115                                    uc_mem_map_size);
1116                 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1117         } else {
1118                 pio_write_vi_base = 0;
1119                 wc_mem_map_size = 0;
1120                 max_vis = channel_vis;
1121         }
1122
1123         /* In case the last attached driver failed to free VIs, do it now */
1124         rc = efx_ef10_free_vis(efx);
1125         if (rc != 0)
1126                 return rc;
1127
1128         rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1129         if (rc != 0)
1130                 return rc;
1131
1132         if (nic_data->n_allocated_vis < channel_vis) {
1133                 netif_info(efx, drv, efx->net_dev,
1134                            "Could not allocate enough VIs to satisfy RSS"
1135                            " requirements. Performance may not be optimal.\n");
1136                 /* We didn't get the VIs to populate our channels.
1137                  * We could keep what we got but then we'd have more
1138                  * interrupts than we need.
1139                  * Instead calculate new max_channels and restart
1140                  */
1141                 efx->max_channels = nic_data->n_allocated_vis;
1142                 efx->max_tx_channels =
1143                         nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1144
1145                 efx_ef10_free_vis(efx);
1146                 return -EAGAIN;
1147         }
1148
1149         /* If we didn't get enough VIs to map all the PIO buffers, free the
1150          * PIO buffers
1151          */
1152         if (nic_data->n_piobufs &&
1153             nic_data->n_allocated_vis <
1154             pio_write_vi_base + nic_data->n_piobufs) {
1155                 netif_dbg(efx, probe, efx->net_dev,
1156                           "%u VIs are not sufficient to map %u PIO buffers\n",
1157                           nic_data->n_allocated_vis, nic_data->n_piobufs);
1158                 efx_ef10_free_piobufs(efx);
1159         }
1160
1161         /* Shrink the original UC mapping of the memory BAR */
1162         membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1163         if (!membase) {
1164                 netif_err(efx, probe, efx->net_dev,
1165                           "could not shrink memory BAR to %x\n",
1166                           uc_mem_map_size);
1167                 return -ENOMEM;
1168         }
1169         iounmap(efx->membase);
1170         efx->membase = membase;
1171
1172         /* Set up the WC mapping if needed */
1173         if (wc_mem_map_size) {
1174                 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1175                                                   uc_mem_map_size,
1176                                                   wc_mem_map_size);
1177                 if (!nic_data->wc_membase) {
1178                         netif_err(efx, probe, efx->net_dev,
1179                                   "could not allocate WC mapping of size %x\n",
1180                                   wc_mem_map_size);
1181                         return -ENOMEM;
1182                 }
1183                 nic_data->pio_write_vi_base = pio_write_vi_base;
1184                 nic_data->pio_write_base =
1185                         nic_data->wc_membase +
1186                         (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
1187                          uc_mem_map_size);
1188
1189                 rc = efx_ef10_link_piobufs(efx);
1190                 if (rc)
1191                         efx_ef10_free_piobufs(efx);
1192         }
1193
1194         netif_dbg(efx, probe, efx->net_dev,
1195                   "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1196                   &efx->membase_phys, efx->membase, uc_mem_map_size,
1197                   nic_data->wc_membase, wc_mem_map_size);
1198
1199         return 0;
1200 }
1201
1202 static int efx_ef10_init_nic(struct efx_nic *efx)
1203 {
1204         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1205         int rc;
1206
1207         if (nic_data->must_check_datapath_caps) {
1208                 rc = efx_ef10_init_datapath_caps(efx);
1209                 if (rc)
1210                         return rc;
1211                 nic_data->must_check_datapath_caps = false;
1212         }
1213
1214         if (nic_data->must_realloc_vis) {
1215                 /* We cannot let the number of VIs change now */
1216                 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1217                                         nic_data->n_allocated_vis);
1218                 if (rc)
1219                         return rc;
1220                 nic_data->must_realloc_vis = false;
1221         }
1222
1223         if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1224                 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1225                 if (rc == 0) {
1226                         rc = efx_ef10_link_piobufs(efx);
1227                         if (rc)
1228                                 efx_ef10_free_piobufs(efx);
1229                 }
1230
1231                 /* Log an error on failure, but this is non-fatal */
1232                 if (rc)
1233                         netif_err(efx, drv, efx->net_dev,
1234                                   "failed to restore PIO buffers (%d)\n", rc);
1235                 nic_data->must_restore_piobufs = false;
1236         }
1237
1238         /* don't fail init if RSS setup doesn't work */
1239         efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
1240
1241         return 0;
1242 }
1243
1244 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1245 {
1246         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1247 #ifdef CONFIG_SFC_SRIOV
1248         unsigned int i;
1249 #endif
1250
1251         /* All our allocations have been reset */
1252         nic_data->must_realloc_vis = true;
1253         nic_data->must_restore_filters = true;
1254         nic_data->must_restore_piobufs = true;
1255         efx_ef10_forget_old_piobufs(efx);
1256         nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1257
1258         /* Driver-created vswitches and vports must be re-created */
1259         nic_data->must_probe_vswitching = true;
1260         nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1261 #ifdef CONFIG_SFC_SRIOV
1262         if (nic_data->vf)
1263                 for (i = 0; i < efx->vf_count; i++)
1264                         nic_data->vf[i].vport_id = 0;
1265 #endif
1266 }
1267
1268 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1269 {
1270         if (reason == RESET_TYPE_MC_FAILURE)
1271                 return RESET_TYPE_DATAPATH;
1272
1273         return efx_mcdi_map_reset_reason(reason);
1274 }
1275
1276 static int efx_ef10_map_reset_flags(u32 *flags)
1277 {
1278         enum {
1279                 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1280                                    ETH_RESET_SHARED_SHIFT),
1281                 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1282                                   ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1283                                   ETH_RESET_PHY | ETH_RESET_MGMT) <<
1284                                  ETH_RESET_SHARED_SHIFT)
1285         };
1286
1287         /* We assume for now that our PCI function is permitted to
1288          * reset everything.
1289          */
1290
1291         if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1292                 *flags &= ~EF10_RESET_MC;
1293                 return RESET_TYPE_WORLD;
1294         }
1295
1296         if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1297                 *flags &= ~EF10_RESET_PORT;
1298                 return RESET_TYPE_ALL;
1299         }
1300
1301         /* no invisible reset implemented */
1302
1303         return -EINVAL;
1304 }
1305
1306 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1307 {
1308         int rc = efx_mcdi_reset(efx, reset_type);
1309
1310         /* Unprivileged functions return -EPERM, but need to return success
1311          * here so that the datapath is brought back up.
1312          */
1313         if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1314                 rc = 0;
1315
1316         /* If it was a port reset, trigger reallocation of MC resources.
1317          * Note that on an MC reset nothing needs to be done now because we'll
1318          * detect the MC reset later and handle it then.
1319          * For an FLR, we never get an MC reset event, but the MC has reset all
1320          * resources assigned to us, so we have to trigger reallocation now.
1321          */
1322         if ((reset_type == RESET_TYPE_ALL ||
1323              reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1324                 efx_ef10_reset_mc_allocations(efx);
1325         return rc;
1326 }
1327
1328 #define EF10_DMA_STAT(ext_name, mcdi_name)                      \
1329         [EF10_STAT_ ## ext_name] =                              \
1330         { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1331 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name)                \
1332         [EF10_STAT_ ## int_name] =                              \
1333         { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1334 #define EF10_OTHER_STAT(ext_name)                               \
1335         [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1336 #define GENERIC_SW_STAT(ext_name)                               \
1337         [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1338
1339 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1340         EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1341         EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1342         EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1343         EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1344         EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1345         EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1346         EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1347         EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1348         EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1349         EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1350         EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1351         EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1352         EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1353         EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1354         EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1355         EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1356         EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1357         EF10_OTHER_STAT(port_rx_good_bytes),
1358         EF10_OTHER_STAT(port_rx_bad_bytes),
1359         EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1360         EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1361         EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1362         EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1363         EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1364         EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1365         EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1366         EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1367         EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1368         EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1369         EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1370         EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1371         EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1372         EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1373         EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1374         EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1375         EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1376         EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1377         EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1378         EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1379         EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1380         EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1381         GENERIC_SW_STAT(rx_nodesc_trunc),
1382         GENERIC_SW_STAT(rx_noskb_drops),
1383         EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1384         EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1385         EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1386         EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1387         EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1388         EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1389         EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1390         EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1391         EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1392         EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1393         EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1394         EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1395         EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1396         EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1397         EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1398         EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1399         EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1400         EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1401         EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1402         EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1403         EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1404         EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1405         EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1406         EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1407         EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1408         EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1409         EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1410         EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1411         EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1412         EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1413 };
1414
1415 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |      \
1416                                (1ULL << EF10_STAT_port_tx_packets) |    \
1417                                (1ULL << EF10_STAT_port_tx_pause) |      \
1418                                (1ULL << EF10_STAT_port_tx_unicast) |    \
1419                                (1ULL << EF10_STAT_port_tx_multicast) |  \
1420                                (1ULL << EF10_STAT_port_tx_broadcast) |  \
1421                                (1ULL << EF10_STAT_port_rx_bytes) |      \
1422                                (1ULL <<                                 \
1423                                 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1424                                (1ULL << EF10_STAT_port_rx_good_bytes) | \
1425                                (1ULL << EF10_STAT_port_rx_bad_bytes) |  \
1426                                (1ULL << EF10_STAT_port_rx_packets) |    \
1427                                (1ULL << EF10_STAT_port_rx_good) |       \
1428                                (1ULL << EF10_STAT_port_rx_bad) |        \
1429                                (1ULL << EF10_STAT_port_rx_pause) |      \
1430                                (1ULL << EF10_STAT_port_rx_control) |    \
1431                                (1ULL << EF10_STAT_port_rx_unicast) |    \
1432                                (1ULL << EF10_STAT_port_rx_multicast) |  \
1433                                (1ULL << EF10_STAT_port_rx_broadcast) |  \
1434                                (1ULL << EF10_STAT_port_rx_lt64) |       \
1435                                (1ULL << EF10_STAT_port_rx_64) |         \
1436                                (1ULL << EF10_STAT_port_rx_65_to_127) |  \
1437                                (1ULL << EF10_STAT_port_rx_128_to_255) | \
1438                                (1ULL << EF10_STAT_port_rx_256_to_511) | \
1439                                (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1440                                (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1441                                (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1442                                (1ULL << EF10_STAT_port_rx_gtjumbo) |    \
1443                                (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1444                                (1ULL << EF10_STAT_port_rx_overflow) |   \
1445                                (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1446                                (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1447                                (1ULL << GENERIC_STAT_rx_noskb_drops))
1448
1449 /* These statistics are only provided by the 10G MAC.  For a 10G/40G
1450  * switchable port we do not expose these because they might not
1451  * include all the packets they should.
1452  */
1453 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) |  \
1454                                  (1ULL << EF10_STAT_port_tx_lt64) |     \
1455                                  (1ULL << EF10_STAT_port_tx_64) |       \
1456                                  (1ULL << EF10_STAT_port_tx_65_to_127) |\
1457                                  (1ULL << EF10_STAT_port_tx_128_to_255) |\
1458                                  (1ULL << EF10_STAT_port_tx_256_to_511) |\
1459                                  (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1460                                  (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1461                                  (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1462
1463 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
1464  * switchable port we do expose these because the errors will otherwise
1465  * be silent.
1466  */
1467 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1468                                   (1ULL << EF10_STAT_port_rx_length_error))
1469
1470 /* These statistics are only provided if the firmware supports the
1471  * capability PM_AND_RXDP_COUNTERS.
1472  */
1473 #define HUNT_PM_AND_RXDP_STAT_MASK (                                    \
1474         (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) |              \
1475         (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) |            \
1476         (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) |               \
1477         (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) |             \
1478         (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) |                      \
1479         (1ULL << EF10_STAT_port_rx_pm_discard_qbb) |                    \
1480         (1ULL << EF10_STAT_port_rx_pm_discard_mapping) |                \
1481         (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) |             \
1482         (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) |             \
1483         (1ULL << EF10_STAT_port_rx_dp_streaming_packets) |              \
1484         (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |                      \
1485         (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1486
1487 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1488 {
1489         u64 raw_mask = HUNT_COMMON_STAT_MASK;
1490         u32 port_caps = efx_mcdi_phy_get_caps(efx);
1491         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1492
1493         if (!(efx->mcdi->fn_flags &
1494               1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1495                 return 0;
1496
1497         if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
1498                 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1499         else
1500                 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1501
1502         if (nic_data->datapath_caps &
1503             (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1504                 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1505
1506         return raw_mask;
1507 }
1508
1509 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1510 {
1511         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1512         u64 raw_mask[2];
1513
1514         raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1515
1516         /* Only show vadaptor stats when EVB capability is present */
1517         if (nic_data->datapath_caps &
1518             (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1519                 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1520                 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
1521         } else {
1522                 raw_mask[1] = 0;
1523         }
1524
1525 #if BITS_PER_LONG == 64
1526         mask[0] = raw_mask[0];
1527         mask[1] = raw_mask[1];
1528 #else
1529         mask[0] = raw_mask[0] & 0xffffffff;
1530         mask[1] = raw_mask[0] >> 32;
1531         mask[2] = raw_mask[1] & 0xffffffff;
1532         mask[3] = raw_mask[1] >> 32;
1533 #endif
1534 }
1535
1536 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1537 {
1538         DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1539
1540         efx_ef10_get_stat_mask(efx, mask);
1541         return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1542                                       mask, names);
1543 }
1544
1545 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1546                                            struct rtnl_link_stats64 *core_stats)
1547 {
1548         DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1549         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1550         u64 *stats = nic_data->stats;
1551         size_t stats_count = 0, index;
1552
1553         efx_ef10_get_stat_mask(efx, mask);
1554
1555         if (full_stats) {
1556                 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1557                         if (efx_ef10_stat_desc[index].name) {
1558                                 *full_stats++ = stats[index];
1559                                 ++stats_count;
1560                         }
1561                 }
1562         }
1563
1564         if (!core_stats)
1565                 return stats_count;
1566
1567         if (nic_data->datapath_caps &
1568                         1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1569                 /* Use vadaptor stats. */
1570                 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1571                                          stats[EF10_STAT_rx_multicast] +
1572                                          stats[EF10_STAT_rx_broadcast];
1573                 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1574                                          stats[EF10_STAT_tx_multicast] +
1575                                          stats[EF10_STAT_tx_broadcast];
1576                 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1577                                        stats[EF10_STAT_rx_multicast_bytes] +
1578                                        stats[EF10_STAT_rx_broadcast_bytes];
1579                 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1580                                        stats[EF10_STAT_tx_multicast_bytes] +
1581                                        stats[EF10_STAT_tx_broadcast_bytes];
1582                 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1583                                          stats[GENERIC_STAT_rx_noskb_drops];
1584                 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1585                 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1586                 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1587                 core_stats->rx_errors = core_stats->rx_crc_errors;
1588                 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1589         } else {
1590                 /* Use port stats. */
1591                 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1592                 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1593                 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1594                 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1595                 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1596                                          stats[GENERIC_STAT_rx_nodesc_trunc] +
1597                                          stats[GENERIC_STAT_rx_noskb_drops];
1598                 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1599                 core_stats->rx_length_errors =
1600                                 stats[EF10_STAT_port_rx_gtjumbo] +
1601                                 stats[EF10_STAT_port_rx_length_error];
1602                 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1603                 core_stats->rx_frame_errors =
1604                                 stats[EF10_STAT_port_rx_align_error];
1605                 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1606                 core_stats->rx_errors = (core_stats->rx_length_errors +
1607                                          core_stats->rx_crc_errors +
1608                                          core_stats->rx_frame_errors);
1609         }
1610
1611         return stats_count;
1612 }
1613
1614 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1615 {
1616         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1617         DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1618         __le64 generation_start, generation_end;
1619         u64 *stats = nic_data->stats;
1620         __le64 *dma_stats;
1621
1622         efx_ef10_get_stat_mask(efx, mask);
1623
1624         dma_stats = efx->stats_buffer.addr;
1625         nic_data = efx->nic_data;
1626
1627         generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1628         if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1629                 return 0;
1630         rmb();
1631         efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1632                              stats, efx->stats_buffer.addr, false);
1633         rmb();
1634         generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1635         if (generation_end != generation_start)
1636                 return -EAGAIN;
1637
1638         /* Update derived statistics */
1639         efx_nic_fix_nodesc_drop_stat(efx,
1640                                      &stats[EF10_STAT_port_rx_nodesc_drops]);
1641         stats[EF10_STAT_port_rx_good_bytes] =
1642                 stats[EF10_STAT_port_rx_bytes] -
1643                 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1644         efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1645                              stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1646         efx_update_sw_stats(efx, stats);
1647         return 0;
1648 }
1649
1650
1651 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1652                                        struct rtnl_link_stats64 *core_stats)
1653 {
1654         int retry;
1655
1656         /* If we're unlucky enough to read statistics during the DMA, wait
1657          * up to 10ms for it to finish (typically takes <500us)
1658          */
1659         for (retry = 0; retry < 100; ++retry) {
1660                 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1661                         break;
1662                 udelay(100);
1663         }
1664
1665         return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1666 }
1667
1668 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1669 {
1670         MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1671         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1672         DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1673         __le64 generation_start, generation_end;
1674         u64 *stats = nic_data->stats;
1675         u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
1676         struct efx_buffer stats_buf;
1677         __le64 *dma_stats;
1678         int rc;
1679
1680         spin_unlock_bh(&efx->stats_lock);
1681
1682         if (in_interrupt()) {
1683                 /* If in atomic context, cannot update stats.  Just update the
1684                  * software stats and return so the caller can continue.
1685                  */
1686                 spin_lock_bh(&efx->stats_lock);
1687                 efx_update_sw_stats(efx, stats);
1688                 return 0;
1689         }
1690
1691         efx_ef10_get_stat_mask(efx, mask);
1692
1693         rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
1694         if (rc) {
1695                 spin_lock_bh(&efx->stats_lock);
1696                 return rc;
1697         }
1698
1699         dma_stats = stats_buf.addr;
1700         dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
1701
1702         MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1703         MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
1704                               MAC_STATS_IN_DMA, 1);
1705         MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1706         MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1707
1708         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1709                                 NULL, 0, NULL);
1710         spin_lock_bh(&efx->stats_lock);
1711         if (rc) {
1712                 /* Expect ENOENT if DMA queues have not been set up */
1713                 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1714                         efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1715                                                sizeof(inbuf), NULL, 0, rc);
1716                 goto out;
1717         }
1718
1719         generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1720         if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1721                 WARN_ON_ONCE(1);
1722                 goto out;
1723         }
1724         rmb();
1725         efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1726                              stats, stats_buf.addr, false);
1727         rmb();
1728         generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1729         if (generation_end != generation_start) {
1730                 rc = -EAGAIN;
1731                 goto out;
1732         }
1733
1734         efx_update_sw_stats(efx, stats);
1735 out:
1736         efx_nic_free_buffer(efx, &stats_buf);
1737         return rc;
1738 }
1739
1740 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1741                                        struct rtnl_link_stats64 *core_stats)
1742 {
1743         if (efx_ef10_try_update_nic_stats_vf(efx))
1744                 return 0;
1745
1746         return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1747 }
1748
1749 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1750 {
1751         struct efx_nic *efx = channel->efx;
1752         unsigned int mode, usecs;
1753         efx_dword_t timer_cmd;
1754
1755         if (channel->irq_moderation_us) {
1756                 mode = 3;
1757                 usecs = channel->irq_moderation_us;
1758         } else {
1759                 mode = 0;
1760                 usecs = 0;
1761         }
1762
1763         if (EFX_EF10_WORKAROUND_61265(efx)) {
1764                 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
1765                 unsigned int ns = usecs * 1000;
1766
1767                 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
1768                                channel->channel);
1769                 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
1770                 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
1771                 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
1772
1773                 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
1774                                    inbuf, sizeof(inbuf), 0, NULL, 0);
1775         } else if (EFX_EF10_WORKAROUND_35388(efx)) {
1776                 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1777
1778                 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1779                                      EFE_DD_EVQ_IND_TIMER_FLAGS,
1780                                      ERF_DD_EVQ_IND_TIMER_MODE, mode,
1781                                      ERF_DD_EVQ_IND_TIMER_VAL, ticks);
1782                 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1783                                 channel->channel);
1784         } else {
1785                 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1786
1787                 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1788                                      ERF_DZ_TC_TIMER_VAL, ticks);
1789                 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1790                                 channel->channel);
1791         }
1792 }
1793
1794 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1795                                 struct ethtool_wolinfo *wol) {}
1796
1797 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1798 {
1799         return -EOPNOTSUPP;
1800 }
1801
1802 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1803 {
1804         wol->supported = 0;
1805         wol->wolopts = 0;
1806         memset(&wol->sopass, 0, sizeof(wol->sopass));
1807 }
1808
1809 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1810 {
1811         if (type != 0)
1812                 return -EINVAL;
1813         return 0;
1814 }
1815
1816 static void efx_ef10_mcdi_request(struct efx_nic *efx,
1817                                   const efx_dword_t *hdr, size_t hdr_len,
1818                                   const efx_dword_t *sdu, size_t sdu_len)
1819 {
1820         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1821         u8 *pdu = nic_data->mcdi_buf.addr;
1822
1823         memcpy(pdu, hdr, hdr_len);
1824         memcpy(pdu + hdr_len, sdu, sdu_len);
1825         wmb();
1826
1827         /* The hardware provides 'low' and 'high' (doorbell) registers
1828          * for passing the 64-bit address of an MCDI request to
1829          * firmware.  However the dwords are swapped by firmware.  The
1830          * least significant bits of the doorbell are then 0 for all
1831          * MCDI requests due to alignment.
1832          */
1833         _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1834                     ER_DZ_MC_DB_LWRD);
1835         _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1836                     ER_DZ_MC_DB_HWRD);
1837 }
1838
1839 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1840 {
1841         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1842         const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1843
1844         rmb();
1845         return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1846 }
1847
1848 static void
1849 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1850                             size_t offset, size_t outlen)
1851 {
1852         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1853         const u8 *pdu = nic_data->mcdi_buf.addr;
1854
1855         memcpy(outbuf, pdu + offset, outlen);
1856 }
1857
1858 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
1859 {
1860         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1861
1862         /* All our allocations have been reset */
1863         efx_ef10_reset_mc_allocations(efx);
1864
1865         /* The datapath firmware might have been changed */
1866         nic_data->must_check_datapath_caps = true;
1867
1868         /* MAC statistics have been cleared on the NIC; clear the local
1869          * statistic that we update with efx_update_diff_stat().
1870          */
1871         nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
1872 }
1873
1874 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1875 {
1876         struct efx_ef10_nic_data *nic_data = efx->nic_data;
1877         int rc;
1878
1879         rc = efx_ef10_get_warm_boot_count(efx);
1880         if (rc < 0) {
1881                 /* The firmware is presumably in the process of
1882                  * rebooting.  However, we are supposed to report each
1883                  * reboot just once, so we must only do that once we
1884                  * can read and store the updated warm boot count.
1885                  */
1886                 return 0;
1887         }
1888
1889         if (rc == nic_data->warm_boot_count)
1890                 return 0;
1891
1892         nic_data->warm_boot_count = rc;
1893         efx_ef10_mcdi_reboot_detected(efx);
1894
1895         return -EIO;
1896 }
1897
1898 /* Handle an MSI interrupt
1899  *
1900  * Handle an MSI hardware interrupt.  This routine schedules event
1901  * queue processing.  No interrupt acknowledgement cycle is necessary.
1902  * Also, we never need to check that the interrupt is for us, since
1903  * MSI interrupts cannot be shared.
1904  */
1905 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1906 {
1907         struct efx_msi_context *context = dev_id;
1908         struct efx_nic *efx = context->efx;
1909
1910         netif_vdbg(efx, intr, efx->net_dev,
1911                    "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1912
1913         if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1914                 /* Note test interrupts */
1915                 if (context->index == efx->irq_level)
1916                         efx->last_irq_cpu = raw_smp_processor_id();
1917
1918                 /* Schedule processing of the channel */
1919                 efx_schedule_channel_irq(efx->channel[context->index]);
1920         }
1921
1922         return IRQ_HANDLED;
1923 }
1924
1925 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1926 {
1927         struct efx_nic *efx = dev_id;
1928         bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1929         struct efx_channel *channel;
1930         efx_dword_t reg;
1931         u32 queues;
1932
1933         /* Read the ISR which also ACKs the interrupts */
1934         efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
1935         queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1936
1937         if (queues == 0)
1938                 return IRQ_NONE;
1939
1940         if (likely(soft_enabled)) {
1941                 /* Note test interrupts */
1942                 if (queues & (1U << efx->irq_level))
1943                         efx->last_irq_cpu = raw_smp_processor_id();
1944
1945                 efx_for_each_channel(channel, efx) {
1946                         if (queues & 1)
1947                                 efx_schedule_channel_irq(channel);
1948                         queues >>= 1;
1949                 }
1950         }
1951
1952         netif_vdbg(efx, intr, efx->net_dev,
1953                    "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1954                    irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1955
1956         return IRQ_HANDLED;
1957 }
1958
1959 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1960 {
1961         MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1962
1963         BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1964
1965         MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1966         (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1967                             inbuf, sizeof(inbuf), NULL, 0, NULL);
1968 }
1969
1970 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1971 {
1972         return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1973                                     (tx_queue->ptr_mask + 1) *
1974                                     sizeof(efx_qword_t),
1975                                     GFP_KERNEL);
1976 }
1977
1978 /* This writes to the TX_DESC_WPTR and also pushes data */
1979 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1980                                          const efx_qword_t *txd)
1981 {
1982         unsigned int write_ptr;
1983         efx_oword_t reg;
1984
1985         write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1986         EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1987         reg.qword[0] = *txd;
1988         efx_writeo_page(tx_queue->efx, &reg,
1989                         ER_DZ_TX_DESC_UPD, tx_queue->queue);
1990 }
1991
1992 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1993 {
1994         MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1995                                                        EFX_BUF_SIZE));
1996         bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1997         size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1998         struct efx_channel *channel = tx_queue->channel;
1999         struct efx_nic *efx = tx_queue->efx;
2000         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2001         size_t inlen;
2002         dma_addr_t dma_addr;
2003         efx_qword_t *txd;
2004         int rc;
2005         int i;
2006         BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
2007
2008         MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2009         MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2010         MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2011         MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
2012         MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
2013                               INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
2014                               INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
2015         MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
2016         MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
2017
2018         dma_addr = tx_queue->txd.buf.dma_addr;
2019
2020         netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2021                   tx_queue->queue, entries, (u64)dma_addr);
2022
2023         for (i = 0; i < entries; ++i) {
2024                 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2025                 dma_addr += EFX_BUF_SIZE;
2026         }
2027
2028         inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2029
2030         rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2031                           NULL, 0, NULL);
2032         if (rc)
2033                 goto fail;
2034
2035         /* A previous user of this TX queue might have set us up the
2036          * bomb by writing a descriptor to the TX push collector but
2037          * not the doorbell.  (Each collector belongs to a port, not a
2038          * queue or function, so cannot easily be reset.)  We must
2039          * attempt to push a no-op descriptor in its place.
2040          */
2041         tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2042         tx_queue->insert_count = 1;
2043         txd = efx_tx_desc(tx_queue, 0);
2044         EFX_POPULATE_QWORD_4(*txd,
2045                              ESF_DZ_TX_DESC_IS_OPT, true,
2046                              ESF_DZ_TX_OPTION_TYPE,
2047                              ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2048                              ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2049                              ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
2050         tx_queue->write_count = 1;
2051
2052         if (nic_data->datapath_caps &
2053             (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
2054                 tx_queue->tso_version = 1;
2055         }
2056
2057         wmb();
2058         efx_ef10_push_tx_desc(tx_queue, txd);
2059
2060         return;
2061
2062 fail:
2063         netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2064                     tx_queue->queue);
2065 }
2066
2067 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2068 {
2069         MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
2070         MCDI_DECLARE_BUF_ERR(outbuf);
2071         struct efx_nic *efx = tx_queue->efx;
2072         size_t outlen;
2073         int rc;
2074
2075         MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2076                        tx_queue->queue);
2077
2078         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
2079                           outbuf, sizeof(outbuf), &outlen);
2080
2081         if (rc && rc != -EALREADY)
2082                 goto fail;
2083
2084         return;
2085
2086 fail:
2087         efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2088                                outbuf, outlen, rc);
2089 }
2090
2091 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2092 {
2093         efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2094 }
2095
2096 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2097 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2098 {
2099         unsigned int write_ptr;
2100         efx_dword_t reg;
2101
2102         write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2103         EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2104         efx_writed_page(tx_queue->efx, &reg,
2105                         ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2106 }
2107
2108 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2109 {
2110         unsigned int old_write_count = tx_queue->write_count;
2111         struct efx_tx_buffer *buffer;
2112         unsigned int write_ptr;
2113         efx_qword_t *txd;
2114
2115         tx_queue->xmit_more_available = false;
2116         if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2117                 return;
2118
2119         do {
2120                 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2121                 buffer = &tx_queue->buffer[write_ptr];
2122                 txd = efx_tx_desc(tx_queue, write_ptr);
2123                 ++tx_queue->write_count;
2124
2125                 /* Create TX descriptor ring entry */
2126                 if (buffer->flags & EFX_TX_BUF_OPTION) {
2127                         *txd = buffer->option;
2128                 } else {
2129                         BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2130                         EFX_POPULATE_QWORD_3(
2131                                 *txd,
2132                                 ESF_DZ_TX_KER_CONT,
2133                                 buffer->flags & EFX_TX_BUF_CONT,
2134                                 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2135                                 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2136                 }
2137         } while (tx_queue->write_count != tx_queue->insert_count);
2138
2139         wmb(); /* Ensure descriptors are written before they are fetched */
2140
2141         if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2142                 txd = efx_tx_desc(tx_queue,
2143                                   old_write_count & tx_queue->ptr_mask);
2144                 efx_ef10_push_tx_desc(tx_queue, txd);
2145                 ++tx_queue->pushes;
2146         } else {
2147                 efx_ef10_notify_tx_desc(tx_queue);
2148         }
2149 }
2150
2151 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
2152                                       bool exclusive, unsigned *context_size)
2153 {
2154         MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2155         MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
2156         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2157         size_t outlen;
2158         int rc;
2159         u32 alloc_type = exclusive ?
2160                                 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2161                                 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2162         unsigned rss_spread = exclusive ?
2163                                 efx->rss_spread :
2164                                 min(rounddown_pow_of_two(efx->rss_spread),
2165                                     EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2166
2167         if (!exclusive && rss_spread == 1) {
2168                 *context = EFX_EF10_RSS_CONTEXT_INVALID;
2169                 if (context_size)
2170                         *context_size = 1;
2171                 return 0;
2172         }
2173
2174         if (nic_data->datapath_caps &
2175             1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2176                 return -EOPNOTSUPP;
2177
2178         MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
2179                        nic_data->vport_id);
2180         MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2181         MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
2182
2183         rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2184                 outbuf, sizeof(outbuf), &outlen);
2185         if (rc != 0)
2186                 return rc;
2187
2188         if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2189                 return -EIO;
2190
2191         *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
2192
2193         if (context_size)
2194                 *context_size = rss_spread;
2195
2196         return 0;
2197 }
2198
2199 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
2200 {
2201         MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
2202         int rc;
2203
2204         MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2205                        context);
2206
2207         rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
2208                             NULL, 0, NULL);
2209         WARN_ON(rc != 0);
2210 }
2211
2212 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
2213                                        const u32 *rx_indir_table)
2214 {
2215         MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2216         MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2217         int i, rc;
2218
2219         MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2220                        context);
2221         BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
2222                      MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2223
2224         for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
2225                 MCDI_PTR(tablebuf,
2226                          RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
2227                                 (u8) rx_indir_table[i];
2228
2229         rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2230                           sizeof(tablebuf), NULL, 0, NULL);
2231         if (rc != 0)
2232                 return rc;
2233
2234         MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2235                        context);
2236         BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2237                      MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2238         for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2239                 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
2240                         efx->rx_hash_key[i];
2241
2242         return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2243                             sizeof(keybuf), NULL, 0, NULL);
2244 }
2245
2246 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2247 {
2248         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2249
2250         if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2251                 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
2252         nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2253 }
2254
2255 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2256                                               unsigned *context_size)
2257 {
2258         u32 new_rx_rss_context;
2259         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2260         int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2261                                             false, context_size);
2262
2263         if (rc != 0)
2264                 return rc;
2265
2266         nic_data->rx_rss_context = new_rx_rss_context;
2267         nic_data->rx_rss_context_exclusive = false;
2268         efx_set_default_rx_indir_table(efx);
2269         return 0;
2270 }
2271
2272 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2273                                                  const u32 *rx_indir_table)
2274 {
2275         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2276         int rc;
2277         u32 new_rx_rss_context;
2278
2279         if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
2280             !nic_data->rx_rss_context_exclusive) {
2281                 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2282                                                 true, NULL);
2283                 if (rc == -EOPNOTSUPP)
2284                         return rc;
2285                 else if (rc != 0)
2286                         goto fail1;
2287         } else {
2288                 new_rx_rss_context = nic_data->rx_rss_context;
2289         }
2290
2291         rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2292                                          rx_indir_table);
2293         if (rc != 0)
2294                 goto fail2;
2295
2296         if (nic_data->rx_rss_context != new_rx_rss_context)
2297                 efx_ef10_rx_free_indir_table(efx);
2298         nic_data->rx_rss_context = new_rx_rss_context;
2299         nic_data->rx_rss_context_exclusive = true;
2300         if (rx_indir_table != efx->rx_indir_table)
2301                 memcpy(efx->rx_indir_table, rx_indir_table,
2302                        sizeof(efx->rx_indir_table));
2303         return 0;
2304
2305 fail2:
2306         if (new_rx_rss_context != nic_data->rx_rss_context)
2307                 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2308 fail1:
2309         netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2310         return rc;
2311 }
2312
2313 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2314                                           const u32 *rx_indir_table)
2315 {
2316         int rc;
2317
2318         if (efx->rss_spread == 1)
2319                 return 0;
2320
2321         rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
2322
2323         if (rc == -ENOBUFS && !user) {
2324                 unsigned context_size;
2325                 bool mismatch = false;
2326                 size_t i;
2327
2328                 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2329                      i++)
2330                         mismatch = rx_indir_table[i] !=
2331                                 ethtool_rxfh_indir_default(i, efx->rss_spread);
2332
2333                 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2334                 if (rc == 0) {
2335                         if (context_size != efx->rss_spread)
2336                                 netif_warn(efx, probe, efx->net_dev,
2337                                            "Could not allocate an exclusive RSS"
2338                                            " context; allocated a shared one of"
2339                                            " different size."
2340                                            " Wanted %u, got %u.\n",
2341                                            efx->rss_spread, context_size);
2342                         else if (mismatch)
2343                                 netif_warn(efx, probe, efx->net_dev,
2344                                            "Could not allocate an exclusive RSS"
2345                                            " context; allocated a shared one but"
2346                                            " could not apply custom"
2347                                            " indirection.\n");
2348                         else
2349                                 netif_info(efx, probe, efx->net_dev,
2350                                            "Could not allocate an exclusive RSS"
2351                                            " context; allocated a shared one.\n");
2352                 }
2353         }
2354         return rc;
2355 }
2356
2357 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2358                                           const u32 *rx_indir_table
2359                                           __attribute__ ((unused)))
2360 {
2361         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2362
2363         if (user)
2364                 return -EOPNOTSUPP;
2365         if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2366                 return 0;
2367         return efx_ef10_rx_push_shared_rss_config(efx, NULL);
2368 }
2369
2370 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
2371 {
2372         return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
2373                                     (rx_queue->ptr_mask + 1) *
2374                                     sizeof(efx_qword_t),
2375                                     GFP_KERNEL);
2376 }
2377
2378 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
2379 {
2380         MCDI_DECLARE_BUF(inbuf,
2381                          MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2382                                                 EFX_BUF_SIZE));
2383         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2384         size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
2385         struct efx_nic *efx = rx_queue->efx;
2386         struct efx_ef10_nic_data *nic_data = efx->nic_data;
2387         size_t inlen;
2388         dma_addr_t dma_addr;
2389         int rc;
2390         int i;
2391         BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
2392
2393         rx_queue->scatter_n = 0;
2394         rx_queue->scatter_len = 0;
2395
2396         MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
2397         MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
2398         MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
2399         MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
2400                        efx_rx_queue_index(rx_queue));
2401         MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
2402                               INIT_RXQ_IN_FLAG_PREFIX, 1,
2403                               INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
2404         MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
2405         MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
2406
2407         dma_addr = rx_queue->rxd.buf.dma_addr;
2408
2409         netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
2410                   efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
2411
2412         for (i = 0; i < entries; ++i) {
2413                 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
2414                 dma_addr += EFX_BUF_SIZE;
2415         }
2416
2417         inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
2418
2419         rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
2420                           NULL, 0, NULL);
2421         if (rc)
2422                 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
2423                             efx_rx_queue_index(rx_queue));
2424 }
2425
2426 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
2427 {
2428         MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
2429         MCDI_DECLARE_BUF_ERR(outbuf);
2430         struct efx_nic *efx = rx_queue->efx;
2431         size_t outlen;
2432         int rc;
2433
2434         MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
2435                        efx_rx_queue_index(rx_queue));
2436
2437         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
2438                           outbuf, sizeof(outbuf), &outlen);
2439
2440         if (rc && rc != -EALREADY)
2441                 goto fail;
2442
2443         return;
2444
2445 fail:
2446         efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
2447                                outbuf, outlen, rc);
2448 }
2449
2450 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
2451 {
2452         efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
2453 }
2454
2455 /* This creates an entry in the RX descriptor queue */
2456 static inline void
2457 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2458 {
2459         struct efx_rx_buffer *rx_buf;
2460         efx_qword_t *rxd;
2461
2462         rxd = efx_rx_desc(rx_queue, index);
2463         rx_buf = efx_rx_buffer(rx_queue, index);
2464         EFX_POPULATE_QWORD_2(*rxd,
2465                              ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2466                              ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2467 }
2468
2469 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2470 {
2471         struct efx_nic *efx = rx_queue->efx;
2472         unsigned int write_count;
2473         efx_dword_t reg;
2474
2475         /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2476         write_count = rx_queue->added_count & ~7;
2477         if (rx_queue->notified_count == write_count)
2478                 return;
2479
2480         do
2481                 efx_ef10_build_rx_desc(
2482                         rx_queue,
2483                         rx_queue->notified_count & rx_queue->ptr_mask);
2484         while (++rx_queue->notified_count != write_count);
2485
2486         wmb();
2487         EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2488                              write_count & rx_queue->ptr_mask);
2489         efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
2490                         efx_rx_queue_index(rx_queue));
2491 }
2492
2493 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2494
2495 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2496 {
2497         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2498         MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2499         efx_qword_t event;
2500
2501         EFX_POPULATE_QWORD_2(event,
2502                              ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2503                              ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2504
2505         MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2506
2507         /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2508          * already swapped the data to little-endian order.
2509          */
2510         memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2511                sizeof(efx_qword_t));
2512
2513         efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2514                            inbuf, sizeof(inbuf), 0,
2515                            efx_ef10_rx_defer_refill_complete, 0);
2516 }
2517
2518 static void
2519 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2520                                   int rc, efx_dword_t *outbuf,
2521                                   size_t outlen_actual)
2522 {
2523         /* nothing to do */
2524 }
2525
2526 static int efx_ef10_ev_probe(struct efx_channel *channel)
2527 {
2528         return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
2529                                     (channel->eventq_mask + 1) *
2530                                     sizeof(efx_qword_t),
2531                                     GFP_KERNEL);
2532 }
2533
2534 static void efx_ef10_ev_fini(struct efx_channel *channel)
2535 {
2536         MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
2537         MCDI_DECLARE_BUF_ERR(outbuf);
2538         struct efx_nic *efx = channel->efx;
2539         size_t outlen;
2540         int rc;
2541
2542         MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
2543
2544         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
2545                           outbuf, sizeof(outbuf), &outlen);
2546
2547         if (rc && rc != -EALREADY)
2548                 goto fail;
2549
2550         return;
2551
2552 fail:
2553         efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
2554                                outbuf, outlen, rc);
2555 }
2556
2557 static int efx_ef10_ev_init(struct efx_channel *channel)
2558 {
2559         MCDI_DECLARE_BUF(inbuf,
2560                          MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
2561                                                    EFX_BUF_SIZE));
2562         MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
2563         size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
2564         struct efx_nic *efx = channel->efx;
2565         struct efx_ef10_nic_data *nic_data;
2566         size_t inlen, outlen;
2567         unsigned int enabled, implemented;
2568         dma_addr_t dma_addr;
2569         int rc;
2570         int i;
2571
2572         nic_data = efx->nic_data;
2573
2574         /* Fill event queue with all ones (i.e. empty events) */
2575         memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
2576
2577         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
2578         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
2579         /* INIT_EVQ expects index in vector table, not absolute */
2580         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
2581         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
2582                        MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
2583         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
2584         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
2585         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
2586                        MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
2587         MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
2588
2589         if (nic_data->datapath_caps2 &
2590             1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
2591                 /* Use the new generic approach to specifying event queue
2592                  * configuration, requesting lower latency or higher throughput.
2593                  * The options that actually get used appear in the output.
2594                  */
2595                 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
2596                                       INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
2597                                       INIT_EVQ_V2_IN_FLAG_TYPE,
2598                                       MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
2599         } else {
2600                 bool cut_thru = !(nic_data->datapath_caps &
2601                         1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2602
2603                 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
2604                                       INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
2605                                       INIT_EVQ_IN_FLAG_RX_MERGE, 1,
2606                                       INIT_EVQ_IN_FLAG_TX_MERGE, 1,
2607                                       INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
2608         }
2609
2610         dma_addr = channel->eventq.buf.dma_addr;
2611         for (i = 0; i < entries; ++i) {
2612                 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
2613                 dma_addr += EFX_BUF_SIZE;
2614         }
2615
2616         inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
2617
2618         rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
2619                           outbuf, sizeof(outbuf), &outlen);
2620
2621         if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
2622                 netif_dbg(efx, drv, efx->net_dev,
2623                           "Channel %d using event queue flags %08x\n",
2624                           channel->channel,
2625                           MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
2626
2627         /* IRQ return is ignored */
2628         if (channel->channel || rc)
2629                 return rc;
2630
2631         /* Successfully created event queue on channel 0 */
2632         rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
2633         if (rc == -ENOSYS) {
2634                 /* GET_WORKAROUNDS was implemented before these workarounds,
2635                  * thus they must be unavailable in this firmware.
2636                  */
2637                 nic_data->workaround_26807 = false;
2638                 nic_data->workaround_61265 = false;
2639                 rc = 0;
2640         } else if (rc) {
2641                 goto fail;
2642         } else {
2643                 nic_data->workaround_26807 =
2644                         !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
2645
2646                 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
2647                     !nic_data->workaround_26807) {
2648                         unsigned int flags;
2649
2650                         rc = efx_mcdi_set_workaround(efx,
2651                                                      MC_CMD_WORKAROUND_BUG26807,
2652                                                      true, &flags);
2653
2654                         if (!rc) {
2655                                 if (flags &
2656                                     1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2657                                         netif_info(efx, drv, efx->net_dev,
2658                                                    "other functions on NIC have been reset\n");
2659
2660                                         /* With MCFW v4.6.x and earlier, the
2661                                          * boot count will have incremented,
2662                                          * so re-read the warm_boot_count
2663                                          * value now to ensure this function
2664                                          * doesn't think it has changed next
2665                                          * time it checks.
2666                                          */
2667                                         rc = efx_ef10_get_warm_boot_count(efx);
2668                                         if (rc >= 0) {
2669                                                 nic_data->warm_boot_count = rc;
2670                                                 rc = 0;
2671                                         }
2672                                 }
2673                                 nic_data->workaround_26807 = true;
2674                         } else if (rc == -EPERM) {
2675                                 rc = 0;
2676                         }
2677                 }
2678
2679                 nic_data->workaround_61265 =
2680                         !!(implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265);
2681         }
2682
2683         if (!rc)
2684                 return 0;
2685
2686 fail:
2687         efx_ef10_ev_fini(channel);
2688         return rc;
2689 }
2690
2691 static void efx_ef10_ev_remove(struct efx_channel *channel)
2692 {
2693         efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
2694 }
2695
2696 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2697                                            unsigned int rx_queue_label)
2698 {
2699         struct efx_nic *efx = rx_queue->efx;
2700
2701         netif_info(efx, hw, efx->net_dev,
2702                    "rx event arrived on queue %d labeled as queue %u\n",
2703                    efx_rx_queue_index(rx_queue), rx_queue_label);
2704
2705         efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2706 }
2707
2708 static void
2709 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2710                              unsigned int actual, unsigned int expected)
2711 {
2712         unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2713         struct efx_nic *efx = rx_queue->efx;
2714
2715         netif_info(efx, hw, efx->net_dev,
2716                    "dropped %d events (index=%d expected=%d)\n",
2717                    dropped, actual, expected);
2718
2719         efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2720 }
2721
2722 /* partially received RX was aborted. clean up. */
2723 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2724 {
2725         unsigned int rx_desc_ptr;
2726
2727         netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2728                   "scattered RX aborted (dropping %u buffers)\n",
2729                   rx_queue->scatter_n);
2730
2731         rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2732
2733         efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2734                       0, EFX_RX_PKT_DISCARD);
2735
2736         rx_queue->removed_count += rx_queue->scatter_n;
2737         rx_queue->scatter_n = 0;
2738         rx_queue->scatter_len = 0;
2739         ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2740 }
2741
2742 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2743                                     const efx_qword_t *event)
2744 {
2745         unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
2746         unsigned int n_descs, n_packets, i;
2747         struct efx_nic *efx = channel->efx;
2748         struct efx_rx_queue *rx_queue;
2749         bool rx_cont;
2750         u16 flags = 0;
2751
2752         if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2753                 return 0;
2754
2755         /* Basic packet information */
2756         rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2757         next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2758         rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2759         rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
2760         rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2761
2762         if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2763                 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2764                             EFX_QWORD_FMT "\n",
2765                             EFX_QWORD_VAL(*event));
2766
2767         rx_queue = efx_channel_get_rx_queue(channel);
2768
2769         if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2770                 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2771
2772         n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2773                    ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2774
2775         if (n_descs != rx_queue->scatter_n + 1) {
2776                 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2777
2778                 /* detect rx abort */
2779                 if (unlikely(n_descs == rx_queue->scatter_n)) {
2780                         if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2781                                 netdev_WARN(efx->net_dev,
2782                                             "invalid RX abort: scatter_n=%u event="
2783                                             EFX_QWORD_FMT "\n",
2784                                             rx_queue->scatter_n,
2785                                             EFX_QWORD_VAL(*event));
2786                         efx_ef10_handle_rx_abort(rx_queue);
2787                         return 0;
2788                 }
2789
2790                 /* Check that RX completion merging is valid, i.e.
2791                  * the current firmware supports it and this is a
2792                  * non-scattered packet.
2793                  */
2794                 if (!(nic_data->datapath_caps &
2795                       (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2796                     rx_queue->scatter_n != 0 || rx_cont) {
2797                         efx_ef10_handle_rx_bad_lbits(
2798                                 rx_queue, next_ptr_lbits,
2799                                 (rx_queue->removed_count +
2800                                  rx_queue->scatter_n + 1) &
2801                                 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2802                         return 0;
2803                 }
2804
2805                 /* Merged completion for multiple non-scattered packets */
2806                 rx_queue->scatter_n = 1;
2807                 rx_queue->scatter_len = 0;
2808                 n_packets = n_descs;
2809                 ++channel->n_rx_merge_events;
2810                 channel->n_rx_merge_packets += n_packets;
2811                 flags |= EFX_RX_PKT_PREFIX_LEN;
2812         } else {
2813                 ++rx_queue->scatter_n;
2814                 rx_queue->scatter_len += rx_bytes;
2815                 if (rx_cont)
2816                         return 0;
2817                 n_packets = 1;
2818         }
2819
2820         if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
2821                 flags |= EFX_RX_PKT_DISCARD;
2822
2823         if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
2824                 channel->n_rx_ip_hdr_chksum_err += n_packets;
2825         } else if (unlikely(EFX_QWORD_FIELD(*event,
2826                                             ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
2827                 channel->n_rx_tcp_udp_chksum_err += n_packets;
2828         } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
2829                    rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
2830                 flags |= EFX_RX_PKT_CSUMMED;
2831         }
2832
2833         if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
2834                 flags |= EFX_RX_PKT_TCP;
2835
2836         channel->irq_mod_score += 2 * n_packets;
2837
2838         /* Handle received packet(s) */
2839         for (i = 0; i < n_packets; i++) {
2840                 efx_rx_packet(rx_queue,
2841                               rx_queue->removed_count & rx_queue->ptr_mask,
2842                               rx_queue->scatter_n, rx_queue->scatter_len,
2843                               flags);
2844                 rx_queue->removed_count += rx_queue->scatter_n;
2845         }
2846
2847         rx_queue->scatter_n = 0;
2848         rx_queue->scatter_len = 0;
2849
2850         return n_packets;
2851 }
2852
2853 static int
2854 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2855 {
2856         struct efx_nic *efx = channel->efx;
2857         struct efx_tx_queue *tx_queue;
2858         unsigned int tx_ev_desc_ptr;
2859         unsigned int tx_ev_q_label;
2860         int tx_descs = 0;
2861
2862         if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2863                 return 0;
2864
2865         if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2866                 return 0;
2867
2868         /* Transmit completion */
2869         tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2870         tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
2871         tx_queue = efx_channel_get_tx_queue(channel,
2872                                             tx_ev_q_label % EFX_TXQ_TYPES);
2873         tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
2874                     tx_queue->ptr_mask);
2875         efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2876
2877         return tx_descs;
2878 }
2879
2880 static void
2881 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
2882 {
2883         struct efx_nic *efx = channel->efx;
2884         int subcode;
2885
2886         subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
2887
2888         switch (subcode) {
2889         case ESE_DZ_DRV_TIMER_EV:
2890         case ESE_DZ_DRV_WAKE_UP_EV:
2891                 break;
2892         case ESE_DZ_DRV_START_UP_EV:
2893                 /* event queue init complete. ok. */
2894                 break;
2895         default:
2896                 netif_err(efx, hw, efx->net_dev,
2897                           "channel %d unknown driver event type %d"
2898                           " (data " EFX_QWORD_FMT ")\n",
2899                           channel->channel, subcode,
2900                           EFX_QWORD_VAL(*event));
2901
2902         }
2903 }
2904
2905 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
2906                                                    efx_qword_t *event)
2907 {
2908         struct efx_nic *efx = channel->efx;
2909         u32 subcode;
2910
2911         subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
2912
2913         switch (subcode) {
2914         case EFX_EF10_TEST:
2915                 channel->event_test_cpu = raw_smp_processor_id();
2916                 break;
2917         case EFX_EF10_REFILL:
2918                 /* The queue must be empty, so we won't receive any rx
2919                  * events, so efx_process_channel() won't refill the
2920                  * queue. Refill it here
2921                  */
2922                 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
2923                 break;
2924         default:
2925                 netif_err(efx, hw, efx->net_dev,
2926                           "channel %d unknown driver event type %u"
2927                           " (data " EFX_QWORD_FMT ")\n",
2928                           channel->channel, (unsigned) subcode,
2929                           EFX_QWORD_VAL(*event));
2930         }
2931 }
2932
2933 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
2934 {
2935         struct efx_nic *efx = channel->efx;
2936         efx_qword_t event, *p_event;
2937         unsigned int read_ptr;
2938         int ev_code;
2939         int tx_descs = 0;
2940         int spent = 0;
2941
2942         if (quota <= 0)
2943                 return spent;
2944
2945         read_ptr = channel->eventq_read_ptr;
2946
2947         for (;;) {
2948                 p_event = efx_event(channel, read_ptr);
2949                 event = *p_event;
2950
2951                 if (!efx_event_present(&event))
2952                         break;
2953
2954                 EFX_SET_QWORD(*p_event);
2955
2956                 ++read_ptr;
2957
2958                 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
2959
2960                 netif_vdbg(efx, drv, efx->net_dev,
2961                            "processing event on %d " EFX_QWORD_FMT "\n",
2962                            channel->channel, EFX_QWORD_VAL(event));
2963
2964                 switch (ev_code) {
2965                 case ESE_DZ_EV_CODE_MCDI_EV:
2966                         efx_mcdi_process_event(channel, &event);
2967                         break;
2968                 case ESE_DZ_EV_CODE_RX_EV:
2969                         spent += efx_ef10_handle_rx_event(channel, &event);
2970                         if (spent >= quota) {
2971                                 /* XXX can we split a merged event to
2972                                  * avoid going over-quota?
2973                                  */
2974                                 spent = quota;
2975                                 goto out;
2976                         }
2977                         break;
2978                 case ESE_DZ_EV_CODE_TX_EV:
2979                         tx_descs += efx_ef10_handle_tx_event(channel, &event);
2980                         if (tx_descs > efx->txq_entries) {
2981                                 spent = quota;
2982                                 goto out;
2983                         } else if (++spent == quota) {
2984                                 goto out;
2985                         }
2986                         break;
2987                 case ESE_DZ_EV_CODE_DRIVER_EV:
2988                         efx_ef10_handle_driver_event(channel, &event);
2989                         if (++spent == quota)
2990                                 goto out;
2991                         break;
2992                 case EFX_EF10_DRVGEN_EV:
2993                         efx_ef10_handle_driver_generated_event(channel, &event);
2994                         break;
2995                 default:
2996                         netif_err(efx, hw, efx->net_dev,
2997                                   "channel %d unknown event type %d"
2998                                   " (data " EFX_QWORD_FMT ")\n",
2999                                   channel->channel, ev_code,
3000                                   EFX_QWORD_VAL(event));
3001                 }
3002         }
3003
3004 out:
3005         channel->eventq_read_ptr = read_ptr;
3006         return spent;
3007 }
3008
3009 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3010 {
3011         struct efx_nic *efx = channel->efx;
3012         efx_dword_t rptr;
3013
3014         if (EFX_EF10_WORKAROUND_35388(efx)) {
3015                 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3016                              (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3017                 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3018                              (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3019
3020                 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3021                                      EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3022                                      ERF_DD_EVQ_IND_RPTR,
3023                                      (channel->eventq_read_ptr &
3024                                       channel->eventq_mask) >>
3025                                      ERF_DD_EVQ_IND_RPTR_WIDTH);
3026                 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3027                                 channel->channel);
3028                 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3029                                      EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3030                                      ERF_DD_EVQ_IND_RPTR,
3031                                      channel->eventq_read_ptr &
3032                                      ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3033                 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3034                                 channel->channel);
3035         } else {
3036                 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3037                                      channel->eventq_read_ptr &
3038                                      channel->eventq_mask);
3039                 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3040         }
3041 }
3042
3043 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3044 {
3045         MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3046         struct efx_nic *efx = channel->efx;
3047         efx_qword_t event;
3048         int rc;
3049
3050         EFX_POPULATE_QWORD_2(event,
3051                              ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3052                              ESF_DZ_EV_DATA, EFX_EF10_TEST);
3053
3054         MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3055
3056         /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3057          * already swapped the data to little-endian order.
3058          */
3059         memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3060                sizeof(efx_qword_t));
3061
3062         rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3063                           NULL, 0, NULL);
3064         if (rc != 0)
3065                 goto fail;
3066
3067         return;
3068
3069 fail:
3070         WARN_ON(true);
3071         netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3072 }
3073
3074 void efx_ef10_handle_drain_event(struct efx_nic *efx)
3075 {
3076         if (atomic_dec_and_test(&efx->active_queues))
3077                 wake_up(&efx->flush_wq);
3078
3079         WARN_ON(atomic_read(&efx->active_queues) < 0);
3080 }
3081
3082 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3083 {
3084         struct efx_ef10_nic_data *nic_data = efx->nic_data;
3085         struct efx_channel *channel;
3086         struct efx_tx_queue *tx_queue;
3087         struct efx_rx_queue *rx_queue;
3088         int pending;
3089
3090         /* If the MC has just rebooted, the TX/RX queues will have already been
3091          * torn down, but efx->active_queues needs to be set to zero.
3092          */
3093         if (nic_data->must_realloc_vis) {
3094                 atomic_set(&efx->active_queues, 0);
3095                 return 0;
3096         }
3097
3098         /* Do not attempt to write to the NIC during EEH recovery */
3099         if (efx->state != STATE_RECOVERY) {
3100                 efx_for_each_channel(channel, efx) {
3101                         efx_for_each_channel_rx_queue(rx_queue, channel)
3102                                 efx_ef10_rx_fini(rx_queue);
3103                         efx_for_each_channel_tx_queue(tx_queue, channel)
3104                                 efx_ef10_tx_fini(tx_queue);
3105                 }
3106
3107                 wait_event_timeout(efx->flush_wq,
3108                                    atomic_read(&efx->active_queues) == 0,
3109                                    msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3110                 pending = atomic_read(&efx->active_queues);
3111                 if (pending) {
3112                         netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3113                                   pending);
3114                         return -ETIMEDOUT;
3115                 }
3116         }
3117
3118         return 0;
3119 }
3120
3121 static void efx_ef10_prepare_flr(struct efx_nic *efx)
3122 {
3123         atomic_set(&efx->active_queues, 0);
3124 }
3125
3126 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
3127                                   const struct efx_filter_spec *right)
3128 {
3129         if ((left->match_flags ^ right->match_flags) |
3130             ((left->flags ^ right->flags) &
3131              (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3132                 return false;
3133
3134         return memcmp(&left->outer_vid, &right->outer_vid,
3135                       sizeof(struct efx_filter_spec) -
3136                       offsetof(struct efx_filter_spec, outer_vid)) == 0;
3137 }
3138
3139 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
3140 {
3141         BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3142         return jhash2((const u32 *)&spec->outer_vid,
3143                       (sizeof(struct efx_filter_spec) -
3144                        offsetof(struct efx_filter_spec, outer_vid)) / 4,
3145                       0);
3146         /* XXX should we randomise the initval? */
3147 }
3148
3149 /* Decide whether a filter should be exclusive or else should allow
3150  * delivery to additional recipients.  Currently we decide that
3151  * filters for specific local unicast MAC and IP addresses are
3152  * exclusive.
3153  */
3154 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
3155 {
3156         if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
3157             !is_multicast_ether_addr(spec->loc_mac))
3158                 return true;
3159
3160         if ((spec->match_flags &
3161              (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
3162             (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
3163                 if (spec->ether_type == htons(ETH_P_IP) &&
3164                     !ipv4_is_multicast(spec->loc_host[0]))
3165                         return true;
3166                 if (spec->ether_type == htons(ETH_P_IPV6) &&
3167                     ((const u8 *)spec->loc_host)[0] != 0xff)
3168                         return true;
3169         }
3170
3171         return false;
3172 }
3173
3174 static struct efx_filter_spec *
3175 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
3176                            unsigned int filter_idx)
3177 {
3178         return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
3179                                           ~EFX_EF10_FILTER_FLAGS);
3180 }
3181
3182 static unsigned int
3183 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
3184                            unsigned int filter_idx)
3185 {
3186         return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
3187 }
3188
3189 static void
3190 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
3191                           unsigned int filter_idx,
3192                           const struct efx_filter_spec *spec,
3193                           unsigned int flags)
3194 {
3195         table->entry[filter_idx].spec = (unsigned long)spec | flags;
3196 }
3197
3198 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
3199                                       const struct efx_filter_spec *spec,
3200                                       efx_dword_t *inbuf, u64 handle,
3201                                       bool replacing)
3202 {
3203         struct efx_ef10_nic_data *nic_data = efx->nic_data;
3204         u32 flags = spec->flags;
3205
3206         memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
3207
3208         /* Remove RSS flag if we don't have an RSS context. */
3209         if (flags & EFX_FILTER_FLAG_RX_RSS &&
3210             spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
3211             nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
3212                 flags &= ~EFX_FILTER_FLAG_RX_RSS;
3213
3214         if (replacing) {
3215                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3216                                MC_CMD_FILTER_OP_IN_OP_REPLACE);
3217                 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
3218         } else {
3219                 u32 match_fields = 0;
3220
3221                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3222                                efx_ef10_filter_is_exclusive(spec) ?
3223                                MC_CMD_FILTER_OP_IN_OP_INSERT :
3224                                MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
3225
3226                 /* Convert match flags and values.  Unlike almost
3227                  * everything else in MCDI, these fields are in
3228                  * network byte order.
3229                  */
3230                 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
3231                         match_fields |=
3232                                 is_multicast_ether_addr(spec->loc_mac) ?
3233                                 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
3234                                 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
3235 #define COPY_FIELD(gen_flag, gen_field, mcdi_field)                          \
3236                 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
3237                         match_fields |=                                      \
3238                                 1 << MC_CMD_FILTER_OP_IN_MATCH_ ##           \
3239                                 mcdi_field ## _LBN;                          \
3240                         BUILD_BUG_ON(                                        \
3241                                 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
3242                                 sizeof(spec->gen_field));                    \
3243                         memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
3244                                &spec->gen_field, sizeof(spec->gen_field));   \
3245                 }
3246                 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
3247                 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
3248                 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
3249                 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
3250                 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
3251                 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
3252                 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
3253                 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
3254                 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
3255                 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
3256 #undef COPY_FIELD
3257                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
3258                                match_fields);
3259         }
3260
3261         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
3262         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
3263                        spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3264                        MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
3265                        MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
3266         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
3267         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
3268                        MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
3269         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
3270                        spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3271                        0 : spec->dmaq_id);
3272         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
3273                        (flags & EFX_FILTER_FLAG_RX_RSS) ?
3274                        MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
3275                        MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
3276         if (flags & EFX_FILTER_FLAG_RX_RSS)
3277                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
3278                                spec->rss_context !=
3279                                EFX_FILTER_RSS_CONTEXT_DEFAULT ?
3280                                spec->rss_context : nic_data->rx_rss_context);
3281 }
3282
3283 static int efx_ef10_filter_push(struct efx_nic *efx,
3284                                 const struct efx_filter_spec *spec,
3285                                 u64 *handle, bool replacing)
3286 {
3287         MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3288         MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
3289         int rc;
3290
3291         efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
3292         rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3293                           outbuf, sizeof(outbuf), NULL);
3294         if (rc == 0)
3295                 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3296         if (rc == -ENOSPC)
3297                 rc = -EBUSY; /* to match efx_farch_filter_insert() */
3298         return rc;
3299 }
3300
3301 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
3302 {
3303         unsigned int match_flags = spec->match_flags;
3304         u32 mcdi_flags = 0;
3305
3306         if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
3307                 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
3308                 mcdi_flags |=
3309                         is_multicast_ether_addr(spec->loc_mac) ?
3310                         (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
3311                         (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
3312         }
3313
3314 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) {                 \
3315                 unsigned int old_match_flags = match_flags;             \
3316                 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag;          \
3317                 if (match_flags != old_match_flags)                     \
3318                         mcdi_flags |=                                   \
3319                                 (1 << MC_CMD_FILTER_OP_IN_MATCH_ ##     \
3320                                  mcdi_field ## _LBN);                   \
3321         }
3322         MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
3323         MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
3324         MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
3325         MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
3326         MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
3327         MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
3328         MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
3329         MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
3330         MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
3331         MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
3332 #undef MAP_FILTER_TO_MCDI_FLAG
3333
3334         /* Did we map them all? */
3335         WARN_ON_ONCE(match_flags);
3336
3337         return mcdi_flags;
3338 }
3339
3340 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
3341                                const struct efx_filter_spec *spec)
3342 {
3343         u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
3344         unsigned int match_pri;
3345
3346         for (match_pri = 0;
3347              match_pri < table->rx_match_count;
3348              match_pri++)
3349                 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
3350                         return match_pri;
3351
3352         return -EPROTONOSUPPORT;
3353 }
3354
3355 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
3356                                   struct efx_filter_spec *spec,
3357                                   bool replace_equal)
3358 {
3359         struct efx_ef10_filter_table *table = efx->filter_state;
3360         DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3361         struct efx_filter_spec *saved_spec;
3362         unsigned int match_pri, hash;
3363         unsigned int priv_flags;
3364         bool replacing = false;
3365         int ins_index = -1;
3366         DEFINE_WAIT(wait);
3367         bool is_mc_recip;
3368         s32 rc;
3369
3370         /* For now, only support RX filters */
3371         if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
3372             EFX_FILTER_FLAG_RX)
3373                 return -EINVAL;
3374
3375         rc = efx_ef10_filter_pri(table, spec);
3376         if (rc < 0)
3377                 return rc;
3378         match_pri = rc;
3379
3380         hash = efx_ef10_filter_hash(spec);
3381         is_mc_recip = efx_filter_is_mc_recipient(spec);
3382         if (is_mc_recip)
3383                 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3384
3385         /* Find any existing filters with the same match tuple or
3386          * else a free slot to insert at.  If any of them are busy,
3387          * we have to wait and retry.
3388          */
3389         for (;;) {
3390                 unsigned int depth = 1;
3391                 unsigned int i;
3392
3393                 spin_lock_bh(&efx->filter_lock);
3394
3395                 for (;;) {
3396                         i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3397                         saved_spec = efx_ef10_filter_entry_spec(table, i);
3398
3399                         if (!saved_spec) {
3400                                 if (ins_index < 0)
3401                                         ins_index = i;
3402                         } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3403                                 if (table->entry[i].spec &
3404                                     EFX_EF10_FILTER_FLAG_BUSY)
3405                                         break;
3406                                 if (spec->priority < saved_spec->priority &&
3407                                     spec->priority != EFX_FILTER_PRI_AUTO) {
3408                                         rc = -EPERM;
3409                                         goto out_unlock;
3410                                 }
3411                                 if (!is_mc_recip) {
3412                                         /* This is the only one */
3413                                         if (spec->priority ==
3414                                             saved_spec->priority &&
3415                                             !replace_equal) {
3416                                                 rc = -EEXIST;
3417                                                 goto out_unlock;
3418                                         }
3419                                         ins_index = i;
3420                                         goto found;
3421                                 } else if (spec->priority >
3422                                            saved_spec->priority ||
3423                                            (spec->priority ==
3424                                             saved_spec->priority &&
3425                                             replace_equal)) {
3426                                         if (ins_index < 0)
3427                                                 ins_index = i;
3428                                         else
3429                                                 __set_bit(depth, mc_rem_map);
3430                                 }
3431                         }
3432
3433                         /* Once we reach the maximum search depth, use
3434                          * the first suitable slot or return -EBUSY if
3435                          * there was none
3436                          */
3437                         if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3438                                 if (ins_index < 0) {
3439                                         rc = -EBUSY;
3440                                         goto out_unlock;
3441                                 }
3442                                 goto found;
3443                         }
3444
3445                         ++depth;
3446                 }
3447
3448                 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3449                 spin_unlock_bh(&efx->filter_lock);
3450                 schedule();
3451         }
3452
3453 found:
3454         /* Create a software table entry if necessary, and mark it
3455          * busy.  We might yet fail to insert, but any attempt to
3456          * insert a conflicting filter while we're waiting for the
3457          * firmware must find the busy entry.
3458          */
3459         saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3460         if (saved_spec) {
3461                 if (spec->priority == EFX_FILTER_PRI_AUTO &&
3462                     saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
3463                         /* Just make sure it won't be removed */
3464                         if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
3465                                 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
3466                         table->entry[ins_index].spec &=
3467                                 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
3468                         rc = ins_index;
3469                         goto out_unlock;
3470                 }
3471                 replacing = true;
3472                 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
3473         } else {
3474                 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3475                 if (!saved_spec) {
3476                         rc = -ENOMEM;
3477                         goto out_unlock;
3478                 }
3479                 *saved_spec = *spec;
3480                 priv_flags = 0;
3481         }
3482         efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3483                                   priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
3484
3485         /* Mark lower-priority multicast recipients busy prior to removal */
3486         if (is_mc_recip) {
3487                 unsigned int depth, i;
3488
3489                 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3490                         i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3491                         if (test_bit(depth, mc_rem_map))
3492                                 table->entry[i].spec |=
3493                                         EFX_EF10_FILTER_FLAG_BUSY;
3494                 }
3495         }
3496
3497         spin_unlock_bh(&efx->filter_lock);
3498
3499         rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
3500                                   replacing);
3501
3502         /* Finalise the software table entry */
3503         spin_lock_bh(&efx->filter_lock);
3504         if (rc == 0) {
3505                 if (replacing) {
3506                         /* Update the fields that may differ */
3507                         if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
3508                                 saved_spec->flags |=
3509                                         EFX_FILTER_FLAG_RX_OVER_AUTO;
3510                         saved_spec->priority = spec->priority;
3511                         saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
3512                         saved_spec->flags |= spec->flags;
3513                         saved_spec->rss_context = spec->rss_context;
3514                         saved_spec->dmaq_id = spec->dmaq_id;
3515                 }
3516         } else if (!replacing) {
3517                 kfree(saved_spec);
3518                 saved_spec = NULL;
3519         }
3520         efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
3521
3522         /* Remove and finalise entries for lower-priority multicast
3523          * recipients
3524          */
3525         if (is_mc_recip) {
3526                 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3527                 unsigned int depth, i;
3528
3529                 memset(inbuf, 0, sizeof(inbuf));
3530
3531                 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3532                         if (!test_bit(depth, mc_rem_map))
3533                                 continue;
3534
3535                         i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3536                         saved_spec = efx_ef10_filter_entry_spec(table, i);
3537                         priv_flags = efx_ef10_filter_entry_flags(table, i);
3538
3539                         if (rc == 0) {
3540                                 spin_unlock_bh(&efx->filter_lock);
3541                                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3542                                                MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3543                                 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3544                                                table->entry[i].handle);
3545                                 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3546                                                   inbuf, sizeof(inbuf),
3547                                                   NULL, 0, NULL);
3548                                 spin_lock_bh(&efx->filter_lock);
3549                         }
3550
3551                         if (rc == 0) {
3552                                 kfree(saved_spec);
3553                                 saved_spec = NULL;
3554                                 priv_flags = 0;
3555                         } else {
3556                                 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
3557                         }
3558                         efx_ef10_filter_set_entry(table, i, saved_spec,
3559                                                   priv_flags);
3560                 }
3561         }
3562
3563         /* If successful, return the inserted filter ID */
3564         if (rc == 0)
3565                 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
3566
3567         wake_up_all(&table->waitq);
3568 out_unlock:
3569         spin_unlock_bh(&efx->filter_lock);
3570         finish_wait(&table->waitq, &wait);
3571         return rc;
3572 }
3573
3574 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
3575 {
3576         /* no need to do anything here on EF10 */
3577 }
3578
3579 /* Remove a filter.
3580  * If !by_index, remove by ID
3581  * If by_index, remove by index
3582  * Filter ID may come from userland and must be range-checked.
3583  */
3584 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3585                                            unsigned int priority_mask,
3586                                            u32 filter_id, bool by_index)
3587 {
3588         unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3589         struct efx_ef10_filter_table *table = efx->filter_state;
3590         MCDI_DECLARE_BUF(inbuf,
3591                          MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3592                          MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3593         struct efx_filter_spec *spec;
3594         DEFINE_WAIT(wait);
3595         int rc;
3596
3597         /* Find the software table entry and mark it busy.  Don't
3598          * remove it yet; any attempt to update while we're waiting
3599          * for the firmware must find the busy entry.
3600          */
3601         for (;;) {
3602                 spin_lock_bh(&efx->filter_lock);
3603                 if (!(table->entry[filter_idx].spec &
3604                       EFX_EF10_FILTER_FLAG_BUSY))
3605                         break;
3606                 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3607                 spin_unlock_bh(&efx->filter_lock);
3608                 schedule();
3609         }
3610
3611         spec = efx_ef10_filter_entry_spec(table, filter_idx);
3612         if (!spec ||
3613             (!by_index &&
3614              efx_ef10_filter_pri(table, spec) !=
3615              filter_id / HUNT_FILTER_TBL_ROWS)) {
3616                 rc = -ENOENT;
3617                 goto out_unlock;
3618         }
3619
3620         if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
3621             priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
3622                 /* Just remove flags */
3623                 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
3624                 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
3625                 rc = 0;
3626                 goto out_unlock;
3627         }
3628
3629         if (!(priority_mask & (1U << spec->priority))) {
3630                 rc = -ENOENT;
3631                 goto out_unlock;
3632         }
3633
3634         table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3635         spin_unlock_bh(&efx->filter_lock);
3636
3637         if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
3638                 /* Reset to an automatic filter */
3639
3640                 struct efx_filter_spec new_spec = *spec;
3641
3642                 new_spec.priority = EFX_FILTER_PRI_AUTO;
3643                 new_spec.flags = (EFX_FILTER_FLAG_RX |
3644                                   (efx_rss_enabled(efx) ?
3645                                    EFX_FILTER_FLAG_RX_RSS : 0));
3646                 new_spec.dmaq_id = 0;
3647                 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3648                 rc = efx_ef10_filter_push(efx, &new_spec,
3649                                           &table->entry[filter_idx].handle,
3650                                           true);
3651
3652                 spin_lock_bh(&efx->filter_lock);
3653                 if (rc == 0)
3654                         *spec = new_spec;
3655         } else {
3656                 /* Really remove the filter */
3657
3658                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3659                                efx_ef10_filter_is_exclusive(spec) ?
3660                                MC_CMD_FILTER_OP_IN_OP_REMOVE :
3661                                MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3662                 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3663                                table->entry[filter_idx].handle);
3664                 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3665                                   inbuf, sizeof(inbuf), NULL, 0, NULL);
3666
3667                 spin_lock_bh(&efx->filter_lock);
3668                 if (rc == 0) {
3669                         kfree(spec);
3670                         efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3671                 }
3672         }
3673
3674         table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3675         wake_up_all(&table->waitq);
3676 out_unlock:
3677         spin_unlock_bh(&efx->filter_lock);
3678         finish_wait(&table->waitq, &wait);
3679         return rc;
3680 }
3681
3682 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
3683                                        enum efx_filter_priority priority,
3684                                        u32 filter_id)
3685 {
3686         return efx_ef10_filter_remove_internal(efx, 1U << priority,
3687                                                filter_id, false);
3688 }
3689
3690 static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
3691 {
3692         return filter_id % HUNT_FILTER_TBL_ROWS;
3693 }
3694
3695 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
3696                                           enum efx_filter_priority priority,
3697                                           u32 filter_id)
3698 {
3699         if (filter_id == EFX_EF10_FILTER_ID_INVALID)
3700                 return;
3701         efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
3702 }
3703
3704 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
3705                                     enum efx_filter_priority priority,
3706                                     u32 filter_id, struct efx_filter_spec *spec)
3707 {
3708         unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3709         struct efx_ef10_filter_table *table = efx->filter_state;
3710         const struct efx_filter_spec *saved_spec;
3711         int rc;
3712
3713         spin_lock_bh(&efx->filter_lock);
3714         saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
3715         if (saved_spec && saved_spec->priority == priority &&
3716             efx_ef10_filter_pri(table, saved_spec) ==
3717             filter_id / HUNT_FILTER_TBL_ROWS) {
3718                 *spec = *saved_spec;
3719                 rc = 0;
3720         } else {
3721                 rc = -ENOENT;
3722         }
3723         spin_unlock_bh(&efx->filter_lock);
3724         return rc;
3725 }
3726
3727 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
3728                                      enum efx_filter_priority priority)
3729 {
3730         unsigned int priority_mask;
3731         unsigned int i;
3732         int rc;
3733
3734         priority_mask = (((1U << (priority + 1)) - 1) &
3735                          ~(1U << EFX_FILTER_PRI_AUTO));
3736
3737         for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3738                 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
3739                                                      i, true);
3740                 if (rc && rc != -ENOENT)
3741                         return rc;
3742         }
3743
3744         return 0;
3745 }
3746
3747 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
3748                                          enum efx_filter_priority priority)
3749 {
3750         struct efx_ef10_filter_table *table = efx->filter_state;
3751         unsigned int filter_idx;
3752         s32 count = 0;
3753
3754         spin_lock_bh(&efx->filter_lock);
3755         for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3756                 if (table->entry[filter_idx].spec &&
3757                     efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
3758                     priority)
3759                         ++count;
3760         }
3761         spin_unlock_bh(&efx->filter_lock);
3762         return count;
3763 }
3764
3765 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
3766 {
3767         struct efx_ef10_filter_table *table = efx->filter_state;
3768
3769         return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
3770 }
3771
3772 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
3773                                       enum efx_filter_priority priority,
3774                                       u32 *buf, u32 size)
3775 {
3776         struct efx_ef10_filter_table *table = efx->filter_state;
3777         struct efx_filter_spec *spec;
3778         unsigned int filter_idx;
3779         s32 count = 0;
3780
3781         spin_lock_bh(&efx->filter_lock);
3782         for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3783                 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3784                 if (spec && spec->priority == priority) {
3785                         if (count == size) {
3786                                 count = -EMSGSIZE;
3787                                 break;
3788                         }
3789                         buf[count++] = (efx_ef10_filter_pri(table, spec) *
3790                                         HUNT_FILTER_TBL_ROWS +
3791                                         filter_idx);
3792                 }
3793         }
3794         spin_unlock_bh(&efx->filter_lock);
3795         return count;
3796 }
3797
3798 #ifdef CONFIG_RFS_ACCEL
3799
3800 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
3801
3802 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
3803                                       struct efx_filter_spec *spec)
3804 {
3805         struct efx_ef10_filter_table *table = efx->filter_state;
3806         MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3807         struct efx_filter_spec *saved_spec;
3808         unsigned int hash, i, depth = 1;
3809         bool replacing = false;
3810         int ins_index = -1;
3811         u64 cookie;
3812         s32 rc;
3813
3814         /* Must be an RX filter without RSS and not for a multicast
3815          * destination address (RFS only works for connected sockets).
3816          * These restrictions allow us to pass only a tiny amount of
3817          * data through to the completion function.
3818          */
3819         EFX_WARN_ON_PARANOID(spec->flags !=
3820                              (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
3821         EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
3822         EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
3823
3824         hash = efx_ef10_filter_hash(spec);
3825
3826         spin_lock_bh(&efx->filter_lock);
3827
3828         /* Find any existing filter with the same match tuple or else
3829          * a free slot to insert at.  If an existing filter is busy,
3830          * we have to give up.
3831          */
3832         for (;;) {
3833                 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3834                 saved_spec = efx_ef10_filter_entry_spec(table, i);
3835
3836                 if (!saved_spec) {
3837                         if (ins_index < 0)
3838                                 ins_index = i;
3839                 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3840                         if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
3841                                 rc = -EBUSY;
3842                                 goto fail_unlock;
3843                         }
3844                         if (spec->priority < saved_spec->priority) {
3845                                 rc = -EPERM;
3846                                 goto fail_unlock;
3847                         }
3848                         ins_index = i;
3849                         break;
3850                 }
3851
3852                 /* Once we reach the maximum search depth, use the
3853                  * first suitable slot or return -EBUSY if there was
3854                  * none
3855                  */
3856                 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3857                         if (ins_index < 0) {
3858                                 rc = -EBUSY;
3859                                 goto fail_unlock;
3860                         }
3861                         break;
3862                 }
3863
3864                 ++depth;
3865         }
3866
3867         /* Create a software table entry if necessary, and mark it
3868          * busy.  We might yet fail to insert, but any attempt to
3869          * insert a conflicting filter while we're waiting for the
3870          * firmware must find the busy entry.
3871          */
3872         saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3873         if (saved_spec) {
3874                 replacing = true;
3875         } else {
3876                 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3877                 if (!saved_spec) {
3878                         rc = -ENOMEM;
3879                         goto fail_unlock;
3880                 }
3881                 *saved_spec = *spec;
3882         }
3883         efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3884                                   EFX_EF10_FILTER_FLAG_BUSY);
3885
3886         spin_unlock_bh(&efx->filter_lock);
3887
3888         /* Pack up the variables needed on completion */
3889         cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
3890
3891         efx_ef10_filter_push_prep(efx, spec, inbuf,
3892                                   table->entry[ins_index].handle, replacing);
3893         efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3894                            MC_CMD_FILTER_OP_OUT_LEN,
3895                            efx_ef10_filter_rfs_insert_complete, cookie);
3896
3897         return ins_index;
3898
3899 fail_unlock:
3900         spin_unlock_bh(&efx->filter_lock);
3901         return rc;
3902 }
3903
3904 static void
3905 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
3906                                     int rc, efx_dword_t *outbuf,
3907                                     size_t outlen_actual)
3908 {
3909         struct efx_ef10_filter_table *table = efx->filter_state;
3910         unsigned int ins_index, dmaq_id;
3911         struct efx_filter_spec *spec;
3912         bool replacing;
3913
3914         /* Unpack the cookie */
3915         replacing = cookie >> 31;
3916         ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
3917         dmaq_id = cookie & 0xffff;
3918
3919         spin_lock_bh(&efx->filter_lock);
3920         spec = efx_ef10_filter_entry_spec(table, ins_index);
3921         if (rc == 0) {
3922                 table->entry[ins_index].handle =
3923                         MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3924                 if (replacing)
3925                         spec->dmaq_id = dmaq_id;
3926         } else if (!replacing) {
3927                 kfree(spec);
3928                 spec = NULL;
3929         }
3930         efx_ef10_filter_set_entry(table, ins_index, spec, 0);
3931         spin_unlock_bh(&efx->filter_lock);
3932
3933         wake_up_all(&table->waitq);
3934 }
3935
3936 static void
3937 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3938                                     unsigned long filter_idx,
3939                                     int rc, efx_dword_t *outbuf,
3940                                     size_t outlen_actual);
3941
3942 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
3943                                            unsigned int filter_idx)
3944 {
3945         struct efx_ef10_filter_table *table = efx->filter_state;
3946         struct efx_filter_spec *spec =
3947                 efx_ef10_filter_entry_spec(table, filter_idx);
3948         MCDI_DECLARE_BUF(inbuf,
3949                          MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3950                          MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3951
3952         if (!spec ||
3953             (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
3954             spec->priority != EFX_FILTER_PRI_HINT ||
3955             !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
3956                                  flow_id, filter_idx))
3957                 return false;
3958
3959         MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3960                        MC_CMD_FILTER_OP_IN_OP_REMOVE);
3961         MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3962                        table->entry[filter_idx].handle);
3963         if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
3964                                efx_ef10_filter_rfs_expire_complete, filter_idx))
3965                 return false;
3966
3967         table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3968         return true;
3969 }
3970
3971 static void
3972 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3973                                     unsigned long filter_idx,
3974                                     int rc, efx_dword_t *outbuf,
3975                                     size_t outlen_actual)
3976 {
3977         struct efx_ef10_filter_table *table = efx->filter_state;
3978         struct efx_filter_spec *spec =
3979                 efx_ef10_filter_entry_spec(table, filter_idx);
3980
3981         spin_lock_bh(&efx->filter_lock);
3982         if (rc == 0) {
3983                 kfree(spec);
3984                 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3985         }
3986         table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3987         wake_up_all(&table->waitq);
3988         spin_unlock_bh(&efx->filter_lock);
3989 }
3990
3991 #endif /* CONFIG_RFS_ACCEL */
3992
3993 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
3994 {
3995         int match_flags = 0;
3996
3997 #define MAP_FLAG(gen_flag, mcdi_field) {                                \
3998                 u32 old_mcdi_flags = mcdi_flags;                        \
3999                 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ##      \
4000                                 mcdi_field ## _LBN);                    \
4001                 if (mcdi_flags != old_mcdi_flags)                       \
4002                         match_flags |= EFX_FILTER_MATCH_ ## gen_flag;   \
4003         }
4004         MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4005         MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4006         MAP_FLAG(REM_HOST, SRC_IP);
4007         MAP_FLAG(LOC_HOST, DST_IP);
4008         MAP_FLAG(REM_MAC, SRC_MAC);
4009         MAP_FLAG(REM_PORT, SRC_PORT);
4010         MAP_FLAG(LOC_MAC, DST_MAC);
4011         MAP_FLAG(LOC_PORT, DST_PORT);
4012         MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4013         MAP_FLAG(INNER_VID, INNER_VLAN);
4014         MAP_FLAG(OUTER_VID, OUTER_VLAN);
4015         MAP_FLAG(IP_PROTO, IP_PROTO);
4016 #undef MAP_FLAG
4017
4018         /* Did we map them all? */
4019         if (mcdi_flags)
4020                 return -EINVAL;
4021
4022         return match_flags;
4023 }
4024
4025 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4026 {
4027         struct efx_ef10_filter_table *table = efx->filter_state;
4028         struct efx_ef10_filter_vlan *vlan, *next_vlan;
4029
4030         /* See comment in efx_ef10_filter_table_remove() */
4031         if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4032                 return;
4033
4034         if (!table)
4035                 return;
4036
4037         list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4038                 efx_ef10_filter_del_vlan_internal(efx, vlan);
4039 }
4040
4041 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
4042                                             enum efx_filter_match_flags match_flags)
4043 {
4044         unsigned int match_pri;
4045         int mf;
4046
4047         for (match_pri = 0;
4048              match_pri < table->rx_match_count;
4049              match_pri++) {
4050                 mf = efx_ef10_filter_match_flags_from_mcdi(
4051                                 table->rx_match_mcdi_flags[match_pri]);
4052                 if (mf == match_flags)
4053                         return true;
4054         }
4055
4056         return false;
4057 }
4058
4059 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4060 {
4061         MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4062         MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4063         struct efx_ef10_nic_data *nic_data = efx->nic_data;
4064         struct net_device *net_dev = efx->net_dev;
4065         unsigned int pd_match_pri, pd_match_count;
4066         struct efx_ef10_filter_table *table;
4067         struct efx_ef10_vlan *vlan;
4068         size_t outlen;
4069         int rc;
4070
4071         if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4072                 return -EINVAL;
4073
4074         if (efx->filter_state) /* already probed */
4075                 return 0;
4076
4077         table = kzalloc(sizeof(*table), GFP_KERNEL);
4078         if (!table)
4079                 return -ENOMEM;
4080
4081         /* Find out which RX filter types are supported, and their priorities */
4082         MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
4083                        MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4084         rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4085                           inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4086                           &outlen);
4087         if (rc)
4088                 goto fail;
4089         pd_match_count = MCDI_VAR_ARRAY_LEN(
4090                 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
4091         table->rx_match_count = 0;
4092
4093         for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4094                 u32 mcdi_flags =
4095                         MCDI_ARRAY_DWORD(
4096                                 outbuf,
4097                                 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4098                                 pd_match_pri);
4099                 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
4100                 if (rc < 0) {
4101                         netif_dbg(efx, probe, efx->net_dev,
4102                                   "%s: fw flags %#x pri %u not supported in driver\n",
4103                                   __func__, mcdi_flags, pd_match_pri);
4104                 } else {
4105                         netif_dbg(efx, probe, efx->net_dev,
4106                                   "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4107                                   __func__, mcdi_flags, pd_match_pri,
4108                                   rc, table->rx_match_count);
4109                         table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4110                         table->rx_match_count++;
4111                 }
4112         }
4113
4114         if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
4115             !(efx_ef10_filter_match_supported(table,
4116                 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
4117               efx_ef10_filter_match_supported(table,
4118                 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4119                 netif_info(efx, probe, net_dev,
4120                            "VLAN filters are not supported in this firmware variant\n");
4121                 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4122                 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4123                 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4124         }
4125
4126         table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
4127         if (!table->entry) {
4128                 rc = -ENOMEM;
4129                 goto fail;
4130         }
4131
4132         table->mc_promisc_last = false;
4133         table->vlan_filter =
4134                 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
4135         INIT_LIST_HEAD(&table->vlan_list);
4136
4137         efx->filter_state = table;
4138         init_waitqueue_head(&table->waitq);
4139
4140         list_for_each_entry(vlan, &nic_data->vlan_list, list) {
4141                 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
4142                 if (rc)
4143                         goto fail_add_vlan;
4144         }
4145
4146         return 0;
4147
4148 fail_add_vlan:
4149         efx_ef10_filter_cleanup_vlans(efx);
4150         efx->filter_state = NULL;
4151 fail:
4152         kfree(table);
4153         return rc;
4154 }
4155
4156 /* Caller must hold efx->filter_sem for read if race against
4157  * efx_ef10_filter_table_remove() is possible
4158  */
4159 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
4160 {
4161         struct efx_ef10_filter_table *table = efx->filter_state;
4162         struct efx_ef10_nic_data *nic_data = efx->nic_data;
4163         struct efx_filter_spec *spec;
4164         unsigned int filter_idx;
4165         bool failed = false;
4166         int rc;
4167
4168         WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4169
4170         if (!nic_data->must_restore_filters)
4171                 return;
4172
4173         if (!table)
4174                 return;
4175
4176         spin_lock_bh(&efx->filter_lock);
4177
4178         for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4179                 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4180                 if (!spec)
4181                         continue;
4182
4183                 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
4184                 spin_unlock_bh(&efx->filter_lock);
4185
4186                 rc = efx_ef10_filter_push(efx, spec,
4187                                           &table->entry[filter_idx].handle,
4188                                           false);
4189                 if (rc)
4190                         failed = true;
4191
4192                 spin_lock_bh(&efx->filter_lock);
4193                 if (rc) {
4194                         kfree(spec);
4195                         efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
4196                 } else {
4197                         table->entry[filter_idx].spec &=
4198                                 ~EFX_EF10_FILTER_FLAG_BUSY;
4199                 }
4200         }
4201
4202         spin_unlock_bh(&efx->filter_lock);
4203
4204         if (failed)
4205                 netif_err(efx, hw, efx->net_dev,
4206                           "unable to restore all filters\n");
4207         else
4208                 nic_data->must_restore_filters = false;
4209 }
4210
4211 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
4212 {
4213         struct efx_ef10_filter_table *table = efx->filter_state;
4214         MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
4215         struct efx_filter_spec *spec;
4216         unsigned int filter_idx;
4217         int rc;
4218
4219         efx_ef10_filter_cleanup_vlans(efx);
4220         efx->filter_state = NULL;
4221         /* If we were called without locking, then it's not safe to free
4222          * the table as others might be using it.  So we just WARN, leak
4223          * the memory, and potentially get an inconsistent filter table
4224          * state.
4225          * This should never actually happen.
4226          */
4227         if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4228                 return;
4229
4230         if (!table)
4231                 return;
4232
4233         for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4234                 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4235                 if (!spec)
4236                         continue;
4237
4238                 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4239                                efx_ef10_filter_is_exclusive(spec) ?
4240                                MC_CMD_FILTER_OP_IN_OP_REMOVE :
4241                                MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4242                 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4243                                table->entry[filter_idx].handle);
4244                 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
4245                                         sizeof(inbuf), NULL, 0, NULL);
4246                 if (rc)
4247                         netif_info(efx, drv, efx->net_dev,
4248                                    "%s: filter %04x remove failed\n",
4249                                    __func__, filter_idx);
4250                 kfree(spec);
4251         }
4252
4253         vfree(table->entry);
4254         kfree(table);
4255 }
4256
4257 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
4258 {
4259         struct efx_ef10_filter_table *table = efx->filter_state;
4260         unsigned int filter_idx;
4261
4262         if (*id != EFX_EF10_FILTER_ID_INVALID) {
4263                 filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
4264                 if (!table->entry[filter_idx].spec)
4265                         netif_dbg(efx, drv, efx->net_dev,
4266                                   "marked null spec old %04x:%04x\n", *id,
4267                                   filter_idx);
4268                 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
4269                 *id = EFX_EF10_FILTER_ID_INVALID;
4270         }
4271 }
4272
4273 /* Mark old per-VLAN filters that may need to be removed */
4274 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
4275                                            struct efx_ef10_filter_vlan *vlan)
4276 {
4277         struct efx_ef10_filter_table *table = efx->filter_state;
4278         unsigned int i;
4279
4280         for (i = 0; i < table->dev_uc_count; i++)
4281                 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
4282         for (i = 0; i < table->dev_mc_count; i++)
4283                 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
4284         efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
4285         efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
4286         efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
4287 }
4288
4289 /* Mark old filters that may need to be removed.
4290  * Caller must hold efx->filter_sem for read if race against
4291  * efx_ef10_filter_table_remove() is possible
4292  */
4293 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
4294 {
4295         struct efx_ef10_filter_table *table = efx->filter_state;
4296         struct efx_ef10_filter_vlan *vlan;
4297
4298         spin_lock_bh(&efx->filter_lock);
4299         list_for_each_entry(vlan, &table->vlan_list, list)
4300                 _efx_ef10_filter_vlan_mark_old(efx, vlan);
4301         spin_unlock_bh(&efx->filter_lock);
4302 }
4303
4304 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
4305 {
4306         struct efx_ef10_filter_table *table = efx->filter_state;
4307         struct net_device *net_dev = efx->net_dev;
4308         struct netdev_hw_addr *uc;
4309         int addr_count;
4310         unsigned int i;
4311
4312         addr_count = netdev_uc_count(net_dev);
4313         table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
4314         table->dev_uc_count = 1 + addr_count;
4315         ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
4316         i = 1;
4317         netdev_for_each_uc_addr(uc, net_dev) {
4318                 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
4319                         table->uc_promisc = true;
4320                         break;
4321                 }
4322                 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
4323                 i++;
4324         }
4325 }
4326
4327 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
4328 {
4329         struct efx_ef10_filter_table *table = efx->filter_state;
4330         struct net_device *net_dev = efx->net_dev;
4331         struct netdev_hw_addr *mc;
4332         unsigned int i, addr_count;
4333
4334         table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
4335
4336         addr_count = netdev_mc_count(net_dev);
4337         i = 0;
4338         netdev_for_each_mc_addr(mc, net_dev) {
4339                 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
4340                         table->mc_promisc = true;
4341                         break;
4342                 }
4343                 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
4344                 i++;
4345         }
4346
4347         table->dev_mc_count = i;
4348 }
4349
4350 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
4351                                             struct efx_ef10_filter_vlan *vlan,
4352                                             bool multicast, bool rollback)
4353 {
4354         struct efx_ef10_filter_table *table = efx->filter_state;
4355         struct efx_ef10_dev_addr *addr_list;
4356         enum efx_filter_flags filter_flags;
4357         struct efx_filter_spec spec;
4358         u8 baddr[ETH_ALEN];
4359         unsigned int i, j;
4360         int addr_count;
4361         u16 *ids;
4362         int rc;
4363
4364         if (multicast) {
4365                 addr_list = table->dev_mc_list;
4366                 addr_count = table->dev_mc_count;
4367                 ids = vlan->mc;
4368         } else {
4369                 addr_list = table->dev_uc_list;
4370                 addr_count = table->dev_uc_count;
4371                 ids = vlan->uc;
4372         }
4373
4374         filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4375
4376         /* Insert/renew filters */
4377         for (i = 0; i < addr_count; i++) {
4378                 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4379                 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
4380                 rc = efx_ef10_filter_insert(efx, &spec, true);
4381                 if (rc < 0) {
4382                         if (rollback) {
4383                                 netif_info(efx, drv, efx->net_dev,
4384                                            "efx_ef10_filter_insert failed rc=%d\n",
4385                                            rc);
4386                                 /* Fall back to promiscuous */
4387                                 for (j = 0; j < i; j++) {
4388                                         efx_ef10_filter_remove_unsafe(
4389                                                 efx, EFX_FILTER_PRI_AUTO,
4390                                                 ids[j]);
4391                                         ids[j] = EFX_EF10_FILTER_ID_INVALID;
4392                                 }
4393                                 return rc;
4394                         } else {
4395                                 /* mark as not inserted, and carry on */
4396                                 rc = EFX_EF10_FILTER_ID_INVALID;
4397                         }
4398                 }
4399                 ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
4400         }
4401
4402         if (multicast && rollback) {
4403                 /* Also need an Ethernet broadcast filter */
4404                 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4405                 eth_broadcast_addr(baddr);
4406                 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
4407                 rc = efx_ef10_filter_insert(efx, &spec, true);
4408                 if (rc < 0) {
4409                         netif_warn(efx, drv, efx->net_dev,
4410                                    "Broadcast filter insert failed rc=%d\n", rc);
4411                         /* Fall back to promiscuous */
4412                         for (j = 0; j < i; j++) {
4413                                 efx_ef10_filter_remove_unsafe(
4414                                         efx, EFX_FILTER_PRI_AUTO,
4415                                         ids[j]);
4416                                 ids[j] = EFX_EF10_FILTER_ID_INVALID;
4417                         }
4418                         return rc;
4419                 } else {
4420                         EFX_WARN_ON_PARANOID(vlan->bcast !=
4421                                              EFX_EF10_FILTER_ID_INVALID);
4422                         vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
4423                 }
4424         }
4425
4426         return 0;
4427 }
4428
4429 static int efx_ef10_filter_insert_def(struct efx_nic *efx,
4430                                       struct efx_ef10_filter_vlan *vlan,
4431                                       bool multicast, bool rollback)
4432 {
4433         struct efx_ef10_nic_data *nic_data = efx->nic_data;
4434         enum efx_filter_flags filter_flags;
4435         struct efx_filter_spec spec;
4436         u8 baddr[ETH_ALEN];
4437         int rc;
4438
4439         filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4440
4441         efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4442
4443         if (multicast)
4444                 efx_filter_set_mc_def(&spec);
4445         else
4446                 efx_filter_set_uc_def(&spec);
4447
4448         if (vlan->vid != EFX_FILTER_VID_UNSPEC)
4449                 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
4450
4451         rc = efx_ef10_filter_insert(efx, &spec, true);
4452         if (rc < 0) {
4453                 netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
4454                              efx->net_dev,
4455                              "%scast mismatch filter insert failed rc=%d\n",
4456                              multicast ? "Multi" : "Uni", rc);
4457         } else if (multicast) {
4458                 EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
4459                 vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
4460                 if (!nic_data->workaround_26807) {
4461                         /* Also need an Ethernet broadcast filter */
4462                         efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
4463                                            filter_flags, 0);
4464                         eth_broadcast_addr(baddr);
4465                         efx_filter_set_eth_local(&spec, vlan->vid, baddr);
4466                         rc = efx_ef10_filter_insert(efx, &spec, true);
4467                         if (rc < 0) {
4468                                 netif_warn(efx, drv, efx->net_dev,
4469                                            "Broadcast filter insert failed rc=%d\n",
4470                                            rc);
4471                                 if (rollback) {
4472                                         /* Roll back the mc_def filter */
4473                                         efx_ef10_filter_remove_unsafe(
4474                                                         efx, EFX_FILTER_PRI_AUTO,
4475                                                         vlan->mcdef);
4476                                         vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
4477                                         return rc;
4478                                 }
4479                         } else {
4480                                 EFX_WARN_ON_PARANOID(vlan->bcast !=
4481                                                      EFX_EF10_FILTER_ID_INVALID);
4482                                 vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
4483                         }
4484                 }
4485                 rc = 0;
4486         } else {
4487                 EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
4488                 vlan->ucdef = rc;
4489                 rc = 0;
4490         }
4491         return rc;
4492 }
4493
4494 /* Remove filters that weren't renewed.  Since nothing else changes the AUTO_OLD
4495  * flag or removes these filters, we don't need to hold the filter_lock while
4496  * scanning for these filters.
4497  */
4498 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
4499 {
4500         struct efx_ef10_filter_table *table = efx->filter_state;
4501         int remove_failed = 0;
4502         int remove_noent = 0;
4503         int rc;
4504         int i;
4505
4506         for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4507                 if (ACCESS_ONCE(table->entry[i].spec) &
4508                     EFX_EF10_FILTER_FLAG_AUTO_OLD) {
4509                         rc = efx_ef10_filter_remove_internal(efx,
4510                                         1U << EFX_FILTER_PRI_AUTO, i, true);
4511                         if (rc == -ENOENT)
4512                                 remove_noent++;
4513                         else if (rc)
4514                                 remove_failed++;
4515                 }
4516         }
4517
4518         if (remove_failed)
4519                 netif_info(efx, drv, efx->net_dev,
4520                            "%s: failed to remove %d filters\n",
4521                            __func__, remove_failed);
4522         if (remove_noent)
4523                 netif_info(efx, drv, efx->net_dev,
4524                            "%s: failed to remove %d non-existent filters\n",
4525                            __func__, remove_noent);
4526 }
4527
4528 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
4529 {
4530         struct efx_ef10_nic_data *nic_data = efx->nic_data;
4531         u8 mac_old[ETH_ALEN];
4532         int rc, rc2;
4533
4534         /* Only reconfigure a PF-created vport */
4535         if (is_zero_ether_addr(nic_data->vport_mac))
4536                 return 0;
4537
4538         efx_device_detach_sync(efx);
4539         efx_net_stop(efx->net_dev);
4540         down_write(&efx->filter_sem);
4541         efx_ef10_filter_table_remove(efx);
4542         up_write(&efx->filter_sem);
4543
4544         rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
4545         if (rc)
4546                 goto restore_filters;
4547
4548         ether_addr_copy(mac_old, nic_data->vport_mac);
4549         rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
4550                                     nic_data->vport_mac);
4551         if (rc)
4552                 goto restore_vadaptor;
4553
4554         rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
4555                                     efx->net_dev->dev_addr);
4556         if (!rc) {
4557                 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
4558         } else {
4559                 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
4560                 if (rc2) {
4561                         /* Failed to add original MAC, so clear vport_mac */
4562                         eth_zero_addr(nic_data->vport_mac);
4563                         goto reset_nic;
4564                 }
4565         }
4566
4567 restore_vadaptor:
4568         rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
4569         if (rc2)
4570                 goto reset_nic;
4571 restore_filters:
4572         down_write(&efx->filter_sem);
4573         rc2 = efx_ef10_filter_table_probe(efx);
4574         up_write(&efx->filter_sem);
4575         if (rc2)
4576                 goto reset_nic;
4577
4578         rc2 = efx_net_open(efx->net_dev);
4579         if (rc2)
4580                 goto reset_nic;
4581
4582         netif_device_attach(efx->net_dev);
4583
4584         return rc;
4585
4586 reset_nic:
4587         netif_err(efx, drv, efx->net_dev,
4588                   "Failed to restore when changing MAC address - scheduling reset\n");
4589         efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
4590
4591         return rc ? rc : rc2;
4592 }
4593
4594 /* Caller must hold efx->filter_sem for read if race against
4595  * efx_ef10_filter_table_remove() is possible
4596  */
4597 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
4598                                               struct efx_ef10_filter_vlan *vlan)
4599 {
4600         struct efx_ef10_filter_table *table = efx->filter_state;
4601         struct efx_ef10_nic_data *nic_data = efx->nic_data;
4602
4603         /* Do not install unspecified VID if VLAN filtering is enabled.
4604          * Do not install all specified VIDs if VLAN filtering is disabled.
4605          */
4606         if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
4607                 return;
4608
4609         /* Insert/renew unicast filters */
4610         if (table->uc_promisc) {
4611                 efx_ef10_filter_insert_def(efx, vlan, false, false);
4612                 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
4613         } else {
4614                 /* If any of the filters failed to insert, fall back to
4615                  * promiscuous mode - add in the uc_def filter.  But keep
4616                  * our individual unicast filters.
4617                  */
4618                 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
4619                         efx_ef10_filter_insert_def(efx, vlan, false, false);
4620         }
4621
4622         /* Insert/renew multicast filters */
4623         /* If changing promiscuous state with cascaded multicast filters, remove
4624          * old filters first, so that packets are dropped rather than duplicated
4625          */
4626         if (nic_data->workaround_26807 &&
4627             table->mc_promisc_last != table->mc_promisc)
4628                 efx_ef10_filter_remove_old(efx);
4629         if (table->mc_promisc) {
4630                 if (nic_data->workaround_26807) {
4631                         /* If we failed to insert promiscuous filters, rollback
4632                          * and fall back to individual multicast filters
4633                          */
4634                         if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
4635                                 /* Changing promisc state, so remove old filters */
4636                                 efx_ef10_filter_remove_old(efx);
4637                                 efx_ef10_filter_insert_addr_list(efx, vlan,
4638                                                                  true, false);
4639                         }
4640                 } else {
4641                         /* If we failed to insert promiscuous filters, don't
4642                          * rollback.  Regardless, also insert the mc_list
4643                          */
4644                         efx_ef10_filter_insert_def(efx, vlan, true, false);
4645                         efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
4646                 }
4647         } else {
4648                 /* If any filters failed to insert, rollback and fall back to
4649                  * promiscuous mode - mc_def filter and maybe broadcast.  If
4650                  * that fails, roll back again and insert as many of our
4651                  * individual multicast filters as we can.
4652                  */
4653                 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
4654                         /* Changing promisc state, so remove old filters */
4655                         if (nic_data->workaround_26807)
4656                                 efx_ef10_filter_remove_old(efx);
4657                         if (efx_ef10_filter_insert_def(efx, vlan, true, true))
4658                                 efx_ef10_filter_insert_addr_list(efx, vlan,
4659                                                                  true, false);
4660                 }
4661         }
4662 }
4663
4664 /* Caller must hold efx->filter_sem for read if race against
4665  * efx_ef10_filter_table_remove() is possible
4666  */
4667 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4668 {
4669         struct efx_ef10_filter_table *table = efx->filter_state;
4670         struct net_device *net_dev = efx->net_dev;
4671         struct efx_ef10_filter_vlan *vlan;
4672         bool vlan_filter;
4673
4674         if (!efx_dev_registered(efx))
4675                 return;
4676
4677         if (!table)
4678                 return;
4679
4680         efx_ef10_filter_mark_old(efx);
4681
4682         /* Copy/convert the address lists; add the primary station
4683          * address and broadcast address
4684          */
4685         netif_addr_lock_bh(net_dev);
4686         efx_ef10_filter_uc_addr_list(efx);
4687         efx_ef10_filter_mc_addr_list(efx);
4688         netif_addr_unlock_bh(net_dev);
4689
4690         /* If VLAN filtering changes, all old filters are finally removed.
4691          * Do it in advance to avoid conflicts for unicast untagged and
4692          * VLAN 0 tagged filters.
4693          */
4694         vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
4695         if (table->vlan_filter != vlan_filter) {
4696                 table->vlan_filter = vlan_filter;
4697                 efx_ef10_filter_remove_old(efx);
4698         }
4699
4700         list_for_each_entry(vlan, &table->vlan_list, list)
4701                 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
4702
4703         efx_ef10_filter_remove_old(efx);
4704         table->mc_promisc_last = table->mc_promisc;
4705 }
4706
4707 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
4708 {
4709         struct efx_ef10_filter_table *table = efx->filter_state;
4710         struct efx_ef10_filter_vlan *vlan;
4711
4712         WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4713
4714         list_for_each_entry(vlan, &table->vlan_list, list) {
4715                 if (vlan->vid == vid)
4716                         return vlan;
4717         }
4718
4719         return NULL;
4720 }
4721
4722 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
4723 {
4724         struct efx_ef10_filter_table *table = efx->filter_state;
4725         struct efx_ef10_filter_vlan *vlan;
4726         unsigned int i;
4727
4728         if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4729                 return -EINVAL;
4730
4731         vlan = efx_ef10_filter_find_vlan(efx, vid);
4732         if (WARN_ON(vlan)) {
4733                 netif_err(efx, drv, efx->net_dev,
4734                           "VLAN %u already added\n", vid);
4735                 return -EALREADY;
4736         }
4737
4738         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
4739         if (!vlan)
4740                 return -ENOMEM;
4741
4742         vlan->vid = vid;
4743
4744         for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
4745                 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
4746         for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
4747                 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
4748         vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
4749         vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
4750         vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
4751
4752         list_add_tail(&vlan->list, &table->vlan_list);
4753
4754         if (efx_dev_registered(efx))
4755                 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
4756
4757         return 0;
4758 }
4759
4760 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
4761                                               struct efx_ef10_filter_vlan *vlan)
4762 {
4763         unsigned int i;
4764
4765         /* See comment in efx_ef10_filter_table_remove() */
4766         if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4767                 return;
4768
4769         list_del(&vlan->list);
4770
4771         for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
4772                 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
4773                                               vlan->uc[i]);
4774         for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
4775                 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
4776                                               vlan->mc[i]);
4777         efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
4778         efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
4779         efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
4780
4781         kfree(vlan);
4782 }
4783
4784 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
4785 {
4786         struct efx_ef10_filter_vlan *vlan;
4787
4788         /* See comment in efx_ef10_filter_table_remove() */
4789         if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4790                 return;
4791
4792         vlan = efx_ef10_filter_find_vlan(efx, vid);
4793         if (!vlan) {
4794                 netif_err(efx, drv, efx->net_dev,
4795                           "VLAN %u not found in filter state\n", vid);
4796                 return;
4797         }
4798
4799         efx_ef10_filter_del_vlan_internal(efx, vlan);
4800 }
4801
4802 static int efx_ef10_set_mac_address(struct efx_nic *efx)
4803 {
4804         MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
4805         struct efx_ef10_nic_data *nic_data = efx->nic_data;
4806         bool was_enabled = efx->port_enabled;
4807         int rc;
4808
4809         efx_device_detach_sync(efx);
4810         efx_net_stop(efx->net_dev);
4811
4812         mutex_lock(&efx->mac_lock);
4813         down_write(&efx->filter_sem);
4814         efx_ef10_filter_table_remove(efx);
4815
4816         ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
4817                         efx->net_dev->dev_addr);
4818         MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
4819                        nic_data->vport_id);
4820         rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
4821                                 sizeof(inbuf), NULL, 0, NULL);
4822
4823         efx_ef10_filter_table_probe(efx);
4824         up_write(&efx->filter_sem);
4825         mutex_unlock(&efx->mac_lock);
4826
4827         if (was_enabled)
4828                 efx_net_open(efx->net_dev);
4829         netif_device_attach(efx->net_dev);
4830
4831 #ifdef CONFIG_SFC_SRIOV
4832         if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
4833                 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
4834
4835                 if (rc == -EPERM) {
4836                         struct efx_nic *efx_pf;
4837
4838                         /* Switch to PF and change MAC address on vport */
4839                         efx_pf = pci_get_drvdata(pci_dev_pf);
4840
4841                         rc = efx_ef10_sriov_set_vf_mac(efx_pf,
4842                                                        nic_data->vf_index,
4843                                                        efx->net_dev->dev_addr);
4844                 } else if (!rc) {
4845                         struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
4846                         struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
4847                         unsigned int i;
4848
4849                         /* MAC address successfully changed by VF (with MAC
4850                          * spoofing) so update the parent PF if possible.
4851                          */
4852                         for (i = 0; i < efx_pf->vf_count; ++i) {
4853                                 struct ef10_vf *vf = nic_data->vf + i;
4854
4855                                 if (vf->efx == efx) {
4856                                         ether_addr_copy(vf->mac,
4857                                                         efx->net_dev->dev_addr);
4858                                         return 0;
4859                                 }
4860                         }
4861                 }
4862         } else
4863 #endif
4864         if (rc == -EPERM) {
4865                 netif_err(efx, drv, efx->net_dev,
4866                           "Cannot change MAC address; use sfboot to enable"
4867                           " mac-spoofing on this interface\n");
4868         } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
4869                 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
4870                  * fall-back to the method of changing the MAC address on the
4871                  * vport.  This only applies to PFs because such versions of
4872                  * MCFW do not support VFs.
4873                  */
4874                 rc = efx_ef10_vport_set_mac_address(efx);
4875         } else {
4876                 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
4877                                        sizeof(inbuf), NULL, 0, rc);
4878         }
4879
4880         return rc;
4881 }
4882
4883 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
4884 {
4885         efx_ef10_filter_sync_rx_mode(efx);
4886
4887         return efx_mcdi_set_mac(efx);
4888 }
4889
4890 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
4891 {
4892         efx_ef10_filter_sync_rx_mode(efx);
4893
4894         return 0;
4895 }
4896
4897 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
4898 {
4899         MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
4900
4901         MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
4902         return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
4903                             NULL, 0, NULL);
4904 }
4905
4906 /* MC BISTs follow a different poll mechanism to phy BISTs.
4907  * The BIST is done in the poll handler on the MC, and the MCDI command
4908  * will block until the BIST is done.
4909  */
4910 static int efx_ef10_poll_bist(struct efx_nic *efx)
4911 {
4912         int rc;
4913         MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
4914         size_t outlen;
4915         u32 result;
4916
4917         rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
4918                            outbuf, sizeof(outbuf), &outlen);
4919         if (rc != 0)
4920                 return rc;
4921
4922         if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
4923                 return -EIO;
4924
4925         result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
4926         switch (result) {
4927         case MC_CMD_POLL_BIST_PASSED:
4928                 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
4929                 return 0;
4930         case MC_CMD_POLL_BIST_TIMEOUT:
4931                 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
4932                 return -EIO;
4933         case MC_CMD_POLL_BIST_FAILED:
4934                 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
4935                 return -EIO;
4936         default:
4937                 netif_err(efx, hw, efx->net_dev,
4938                           "BIST returned unknown result %u", result);
4939                 return -EIO;
4940         }
4941 }
4942
4943 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
4944 {
4945         int rc;
4946
4947         netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
4948
4949         rc = efx_ef10_start_bist(efx, bist_type);
4950         if (rc != 0)
4951                 return rc;
4952
4953         return efx_ef10_poll_bist(efx);
4954 }
4955
4956 static int
4957 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
4958 {
4959         int rc, rc2;
4960
4961         efx_reset_down(efx, RESET_TYPE_WORLD);
4962
4963         rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
4964                           NULL, 0, NULL, 0, NULL);
4965         if (rc != 0)
4966                 goto out;
4967
4968         tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
4969         tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
4970
4971         rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
4972
4973 out:
4974         if (rc == -EPERM)
4975                 rc = 0;
4976         rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
4977         return rc ? rc : rc2;
4978 }
4979
4980 #ifdef CONFIG_SFC_MTD
4981
4982 struct efx_ef10_nvram_type_info {
4983         u16 type, type_mask;
4984         u8 port;
4985         const char *name;
4986 };
4987
4988 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
4989         { NVRAM_PARTITION_TYPE_MC_FIRMWARE,        0,    0, "sfc_mcfw" },
4990         { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
4991         { NVRAM_PARTITION_TYPE_EXPANSION_ROM,      0,    0, "sfc_exp_rom" },
4992         { NVRAM_PARTITION_TYPE_STATIC_CONFIG,      0,    0, "sfc_static_cfg" },
4993         { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,     0,    0, "sfc_dynamic_cfg" },
4994         { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
4995         { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
4996         { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
4997         { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
4998         { NVRAM_PARTITION_TYPE_LICENSE,            0,    0, "sfc_license" },
4999         { NVRAM_PARTITION_TYPE_PHY_MIN,            0xff, 0, "sfc_phy_fw" },
5000 };
5001
5002 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
5003                                         struct efx_mcdi_mtd_partition *part,
5004                                         unsigned int type)
5005 {
5006         MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
5007         MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
5008         const struct efx_ef10_nvram_type_info *info;
5009         size_t size, erase_size, outlen;
5010         bool protected;
5011         int rc;
5012
5013         for (info = efx_ef10_nvram_types; ; info++) {
5014                 if (info ==
5015                     efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
5016                         return -ENODEV;
5017                 if ((type & ~info->type_mask) == info->type)
5018                         break;
5019         }
5020         if (info->port != efx_port_num(efx))
5021                 return -ENODEV;
5022
5023         rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
5024         if (rc)
5025                 return rc;
5026         if (protected)
5027                 return -ENODEV; /* hide it */
5028
5029         part->nvram_type = type;
5030
5031         MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
5032         rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
5033                           outbuf, sizeof(outbuf), &outlen);
5034         if (rc)
5035                 return rc;
5036         if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
5037                 return -EIO;
5038         if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
5039             (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
5040                 part->fw_subtype = MCDI_DWORD(outbuf,
5041                                               NVRAM_METADATA_OUT_SUBTYPE);
5042
5043         part->common.dev_type_name = "EF10 NVRAM manager";
5044         part->common.type_name = info->name;
5045
5046         part->common.mtd.type = MTD_NORFLASH;
5047         part->common.mtd.flags = MTD_CAP_NORFLASH;
5048         part->common.mtd.size = size;
5049         part->common.mtd.erasesize = erase_size;
5050
5051         return 0;
5052 }
5053
5054 static int efx_ef10_mtd_probe(struct efx_nic *efx)
5055 {
5056         MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
5057         struct efx_mcdi_mtd_partition *parts;
5058         size_t outlen, n_parts_total, i, n_parts;
5059         unsigned int type;
5060         int rc;
5061
5062         ASSERT_RTNL();
5063
5064         BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
5065         rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
5066                           outbuf, sizeof(outbuf), &outlen);
5067         if (rc)
5068                 return rc;
5069         if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
5070                 return -EIO;
5071
5072         n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
5073         if (n_parts_total >
5074             MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
5075                 return -EIO;
5076
5077         parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
5078         if (!parts)
5079                 return -ENOMEM;
5080
5081         n_parts = 0;
5082         for (i = 0; i < n_parts_total; i++) {
5083                 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
5084                                         i);
5085                 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
5086                 if (rc == 0)
5087                         n_parts++;
5088                 else if (rc != -ENODEV)
5089                         goto fail;
5090         }
5091
5092         rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
5093 fail:
5094         if (rc)
5095                 kfree(parts);
5096         return rc;
5097 }
5098
5099 #endif /* CONFIG_SFC_MTD */
5100
5101 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
5102 {
5103         _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
5104 }
5105
5106 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
5107                                             u32 host_time) {}
5108
5109 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
5110                                            bool temp)
5111 {
5112         MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
5113         int rc;
5114
5115         if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
5116             channel->sync_events_state == SYNC_EVENTS_VALID ||
5117             (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
5118                 return 0;
5119         channel->sync_events_state = SYNC_EVENTS_REQUESTED;
5120
5121         MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
5122         MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
5123         MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
5124                        channel->channel);
5125
5126         rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
5127                           inbuf, sizeof(inbuf), NULL, 0, NULL);
5128
5129         if (rc != 0)
5130                 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
5131                                                     SYNC_EVENTS_DISABLED;
5132
5133         return rc;
5134 }
5135
5136 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
5137                                             bool temp)
5138 {
5139         MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
5140         int rc;
5141
5142         if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
5143             (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
5144                 return 0;
5145         if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
5146                 channel->sync_events_state = SYNC_EVENTS_DISABLED;
5147                 return 0;
5148         }
5149         channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
5150                                             SYNC_EVENTS_DISABLED;
5151
5152         MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
5153         MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
5154         MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
5155                        MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
5156         MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
5157                        channel->channel);
5158
5159         rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
5160                           inbuf, sizeof(inbuf), NULL, 0, NULL);
5161
5162         return rc;
5163 }
5164
5165 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
5166                                            bool temp)
5167 {
5168         int (*set)(struct efx_channel *channel, bool temp);
5169         struct efx_channel *channel;
5170
5171         set = en ?
5172               efx_ef10_rx_enable_timestamping :
5173               efx_ef10_rx_disable_timestamping;
5174
5175         efx_for_each_channel(channel, efx) {
5176                 int rc = set(channel, temp);
5177                 if (en && rc != 0) {
5178                         efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
5179                         return rc;
5180                 }
5181         }
5182
5183         return 0;
5184 }
5185
5186 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
5187                                          struct hwtstamp_config *init)
5188 {
5189         return -EOPNOTSUPP;
5190 }
5191
5192 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
5193                                       struct hwtstamp_config *init)
5194 {
5195         int rc;
5196
5197         switch (init->rx_filter) {
5198         case HWTSTAMP_FILTER_NONE:
5199                 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
5200                 /* if TX timestamping is still requested then leave PTP on */
5201                 return efx_ptp_change_mode(efx,
5202                                            init->tx_type != HWTSTAMP_TX_OFF, 0);
5203         case HWTSTAMP_FILTER_ALL:
5204         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5205         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
5206         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
5207         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5208         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5209         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5210         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5211         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5212         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5213         case HWTSTAMP_FILTER_PTP_V2_EVENT:
5214         case HWTSTAMP_FILTER_PTP_V2_SYNC:
5215         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5216                 init->rx_filter = HWTSTAMP_FILTER_ALL;
5217                 rc = efx_ptp_change_mode(efx, true, 0);
5218                 if (!rc)
5219                         rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
5220                 if (rc)
5221                         efx_ptp_change_mode(efx, false, 0);
5222                 return rc;
5223         default:
5224                 return -ERANGE;
5225         }
5226 }
5227
5228 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
5229 {
5230         if (proto != htons(ETH_P_8021Q))
5231                 return -EINVAL;
5232
5233         return efx_ef10_add_vlan(efx, vid);
5234 }
5235
5236 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
5237 {
5238         if (proto != htons(ETH_P_8021Q))
5239                 return -EINVAL;
5240
5241         return efx_ef10_del_vlan(efx, vid);
5242 }
5243
5244 #define EF10_OFFLOAD_FEATURES           \
5245         (NETIF_F_IP_CSUM |              \
5246          NETIF_F_HW_VLAN_CTAG_FILTER |  \
5247          NETIF_F_IPV6_CSUM |            \
5248          NETIF_F_RXHASH |               \
5249          NETIF_F_NTUPLE)
5250
5251 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
5252         .is_vf = true,
5253         .mem_bar = EFX_MEM_VF_BAR,
5254         .mem_map_size = efx_ef10_mem_map_size,
5255         .probe = efx_ef10_probe_vf,
5256         .remove = efx_ef10_remove,
5257         .dimension_resources = efx_ef10_dimension_resources,
5258         .init = efx_ef10_init_nic,
5259         .fini = efx_port_dummy_op_void,
5260         .map_reset_reason = efx_ef10_map_reset_reason,
5261         .map_reset_flags = efx_ef10_map_reset_flags,
5262         .reset = efx_ef10_reset,
5263         .probe_port = efx_mcdi_port_probe,
5264         .remove_port = efx_mcdi_port_remove,
5265         .fini_dmaq = efx_ef10_fini_dmaq,
5266         .prepare_flr = efx_ef10_prepare_flr,
5267         .finish_flr = efx_port_dummy_op_void,
5268         .describe_stats = efx_ef10_describe_stats,
5269         .update_stats = efx_ef10_update_stats_vf,
5270         .start_stats = efx_port_dummy_op_void,
5271         .pull_stats = efx_port_dummy_op_void,
5272         .stop_stats = efx_port_dummy_op_void,
5273         .set_id_led = efx_mcdi_set_id_led,
5274         .push_irq_moderation = efx_ef10_push_irq_moderation,
5275         .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
5276         .check_mac_fault = efx_mcdi_mac_check_fault,
5277         .reconfigure_port = efx_mcdi_port_reconfigure,
5278         .get_wol = efx_ef10_get_wol_vf,
5279         .set_wol = efx_ef10_set_wol_vf,
5280         .resume_wol = efx_port_dummy_op_void,
5281         .mcdi_request = efx_ef10_mcdi_request,
5282         .mcdi_poll_response = efx_ef10_mcdi_poll_response,
5283         .mcdi_read_response = efx_ef10_mcdi_read_response,
5284         .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
5285         .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
5286         .irq_enable_master = efx_port_dummy_op_void,
5287         .irq_test_generate = efx_ef10_irq_test_generate,
5288         .irq_disable_non_ev = efx_port_dummy_op_void,
5289         .irq_handle_msi = efx_ef10_msi_interrupt,
5290         .irq_handle_legacy = efx_ef10_legacy_interrupt,
5291         .tx_probe = efx_ef10_tx_probe,
5292         .tx_init = efx_ef10_tx_init,
5293         .tx_remove = efx_ef10_tx_remove,
5294         .tx_write = efx_ef10_tx_write,
5295         .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
5296         .rx_probe = efx_ef10_rx_probe,
5297         .rx_init = efx_ef10_rx_init,
5298         .rx_remove = efx_ef10_rx_remove,
5299         .rx_write = efx_ef10_rx_write,
5300         .rx_defer_refill = efx_ef10_rx_defer_refill,
5301         .ev_probe = efx_ef10_ev_probe,
5302         .ev_init = efx_ef10_ev_init,
5303         .ev_fini = efx_ef10_ev_fini,
5304         .ev_remove = efx_ef10_ev_remove,
5305         .ev_process = efx_ef10_ev_process,
5306         .ev_read_ack = efx_ef10_ev_read_ack,
5307         .ev_test_generate = efx_ef10_ev_test_generate,
5308         .filter_table_probe = efx_ef10_filter_table_probe,
5309         .filter_table_restore = efx_ef10_filter_table_restore,
5310         .filter_table_remove = efx_ef10_filter_table_remove,
5311         .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
5312         .filter_insert = efx_ef10_filter_insert,
5313         .filter_remove_safe = efx_ef10_filter_remove_safe,
5314         .filter_get_safe = efx_ef10_filter_get_safe,
5315         .filter_clear_rx = efx_ef10_filter_clear_rx,
5316         .filter_count_rx_used = efx_ef10_filter_count_rx_used,
5317         .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
5318         .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
5319 #ifdef CONFIG_RFS_ACCEL
5320         .filter_rfs_insert = efx_ef10_filter_rfs_insert,
5321         .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
5322 #endif
5323 #ifdef CONFIG_SFC_MTD
5324         .mtd_probe = efx_port_dummy_op_int,
5325 #endif
5326         .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
5327         .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
5328         .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
5329         .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
5330 #ifdef CONFIG_SFC_SRIOV
5331         .vswitching_probe = efx_ef10_vswitching_probe_vf,
5332         .vswitching_restore = efx_ef10_vswitching_restore_vf,
5333         .vswitching_remove = efx_ef10_vswitching_remove_vf,
5334         .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
5335 #endif
5336         .get_mac_address = efx_ef10_get_mac_address_vf,
5337         .set_mac_address = efx_ef10_set_mac_address,
5338
5339         .revision = EFX_REV_HUNT_A0,
5340         .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
5341         .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
5342         .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
5343         .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
5344         .can_rx_scatter = true,
5345         .always_rx_scatter = true,
5346         .max_interrupt_mode = EFX_INT_MODE_MSIX,
5347         .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
5348         .offload_features = EF10_OFFLOAD_FEATURES,
5349         .mcdi_max_ver = 2,
5350         .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
5351         .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
5352                             1 << HWTSTAMP_FILTER_ALL,
5353 };
5354
5355 const struct efx_nic_type efx_hunt_a0_nic_type = {
5356         .is_vf = false,
5357         .mem_bar = EFX_MEM_BAR,
5358         .mem_map_size = efx_ef10_mem_map_size,
5359         .probe = efx_ef10_probe_pf,
5360         .remove = efx_ef10_remove,
5361         .dimension_resources = efx_ef10_dimension_resources,
5362         .init = efx_ef10_init_nic,
5363         .fini = efx_port_dummy_op_void,
5364         .map_reset_reason = efx_ef10_map_reset_reason,
5365         .map_reset_flags = efx_ef10_map_reset_flags,
5366         .reset = efx_ef10_reset,
5367         .probe_port = efx_mcdi_port_probe,
5368         .remove_port = efx_mcdi_port_remove,
5369         .fini_dmaq = efx_ef10_fini_dmaq,
5370         .prepare_flr = efx_ef10_prepare_flr,
5371         .finish_flr = efx_port_dummy_op_void,
5372         .describe_stats = efx_ef10_describe_stats,
5373         .update_stats = efx_ef10_update_stats_pf,
5374         .start_stats = efx_mcdi_mac_start_stats,
5375         .pull_stats = efx_mcdi_mac_pull_stats,
5376         .stop_stats = efx_mcdi_mac_stop_stats,
5377         .set_id_led = efx_mcdi_set_id_led,
5378         .push_irq_moderation = efx_ef10_push_irq_moderation,
5379         .reconfigure_mac = efx_ef10_mac_reconfigure,
5380         .check_mac_fault = efx_mcdi_mac_check_fault,
5381         .reconfigure_port = efx_mcdi_port_reconfigure,
5382         .get_wol = efx_ef10_get_wol,
5383         .set_wol = efx_ef10_set_wol,
5384         .resume_wol = efx_port_dummy_op_void,
5385         .test_chip = efx_ef10_test_chip,
5386         .test_nvram = efx_mcdi_nvram_test_all,
5387         .mcdi_request = efx_ef10_mcdi_request,
5388         .mcdi_poll_response = efx_ef10_mcdi_poll_response,
5389         .mcdi_read_response = efx_ef10_mcdi_read_response,
5390         .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
5391         .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
5392         .irq_enable_master = efx_port_dummy_op_void,
5393         .irq_test_generate = efx_ef10_irq_test_generate,
5394         .irq_disable_non_ev = efx_port_dummy_op_void,
5395         .irq_handle_msi = efx_ef10_msi_interrupt,
5396         .irq_handle_legacy = efx_ef10_legacy_interrupt,
5397         .tx_probe = efx_ef10_tx_probe,
5398         .tx_init = efx_ef10_tx_init,
5399         .tx_remove = efx_ef10_tx_remove,
5400         .tx_write = efx_ef10_tx_write,
5401         .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
5402         .rx_probe = efx_ef10_rx_probe,
5403         .rx_init = efx_ef10_rx_init,
5404         .rx_remove = efx_ef10_rx_remove,
5405         .rx_write = efx_ef10_rx_write,
5406         .rx_defer_refill = efx_ef10_rx_defer_refill,
5407         .ev_probe = efx_ef10_ev_probe,
5408         .ev_init = efx_ef10_ev_init,
5409         .ev_fini = efx_ef10_ev_fini,
5410         .ev_remove = efx_ef10_ev_remove,
5411         .ev_process = efx_ef10_ev_process,
5412         .ev_read_ack = efx_ef10_ev_read_ack,
5413         .ev_test_generate = efx_ef10_ev_test_generate,
5414         .filter_table_probe = efx_ef10_filter_table_probe,
5415         .filter_table_restore = efx_ef10_filter_table_restore,
5416         .filter_table_remove = efx_ef10_filter_table_remove,
5417         .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
5418         .filter_insert = efx_ef10_filter_insert,
5419         .filter_remove_safe = efx_ef10_filter_remove_safe,
5420         .filter_get_safe = efx_ef10_filter_get_safe,
5421         .filter_clear_rx = efx_ef10_filter_clear_rx,
5422         .filter_count_rx_used = efx_ef10_filter_count_rx_used,
5423         .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
5424         .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
5425 #ifdef CONFIG_RFS_ACCEL
5426         .filter_rfs_insert = efx_ef10_filter_rfs_insert,
5427         .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
5428 #endif
5429 #ifdef CONFIG_SFC_MTD
5430         .mtd_probe = efx_ef10_mtd_probe,
5431         .mtd_rename = efx_mcdi_mtd_rename,
5432         .mtd_read = efx_mcdi_mtd_read,
5433         .mtd_erase = efx_mcdi_mtd_erase,
5434         .mtd_write = efx_mcdi_mtd_write,
5435         .mtd_sync = efx_mcdi_mtd_sync,
5436 #endif
5437         .ptp_write_host_time = efx_ef10_ptp_write_host_time,
5438         .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
5439         .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
5440         .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
5441         .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
5442 #ifdef CONFIG_SFC_SRIOV
5443         .sriov_configure = efx_ef10_sriov_configure,
5444         .sriov_init = efx_ef10_sriov_init,
5445         .sriov_fini = efx_ef10_sriov_fini,
5446         .sriov_wanted = efx_ef10_sriov_wanted,
5447         .sriov_reset = efx_ef10_sriov_reset,
5448         .sriov_flr = efx_ef10_sriov_flr,
5449         .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
5450         .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
5451         .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
5452         .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
5453         .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
5454         .vswitching_probe = efx_ef10_vswitching_probe_pf,
5455         .vswitching_restore = efx_ef10_vswitching_restore_pf,
5456         .vswitching_remove = efx_ef10_vswitching_remove_pf,
5457 #endif
5458         .get_mac_address = efx_ef10_get_mac_address_pf,
5459         .set_mac_address = efx_ef10_set_mac_address,
5460
5461         .revision = EFX_REV_HUNT_A0,
5462         .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
5463         .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
5464         .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
5465         .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
5466         .can_rx_scatter = true,
5467         .always_rx_scatter = true,
5468         .max_interrupt_mode = EFX_INT_MODE_MSIX,
5469         .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
5470         .offload_features = EF10_OFFLOAD_FEATURES,
5471         .mcdi_max_ver = 2,
5472         .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
5473         .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
5474                             1 << HWTSTAMP_FILTER_ALL,
5475 };