]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qede/qede_filter.c
8c594a3ca63b34f178ecd0b0830895b6e58f48cd
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qede / qede_filter.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
37
38 #include <linux/qed/qed_if.h>
39 #include "qede.h"
40
41 #ifdef CONFIG_RFS_ACCEL
42 struct qede_arfs_tuple {
43         union {
44                 __be32 src_ipv4;
45                 struct in6_addr src_ipv6;
46         };
47         union {
48                 __be32 dst_ipv4;
49                 struct in6_addr dst_ipv6;
50         };
51         __be16  src_port;
52         __be16  dst_port;
53         __be16  eth_proto;
54         u8      ip_proto;
55 };
56
57 struct qede_arfs_fltr_node {
58 #define QEDE_FLTR_VALID  0
59         unsigned long state;
60
61         /* pointer to aRFS packet buffer */
62         void *data;
63
64         /* dma map address of aRFS packet buffer */
65         dma_addr_t mapping;
66
67         /* length of aRFS packet buffer */
68         int buf_len;
69
70         /* tuples to hold from aRFS packet buffer */
71         struct qede_arfs_tuple tuple;
72
73         u32 flow_id;
74         u16 sw_id;
75         u16 rxq_id;
76         u16 next_rxq_id;
77         bool filter_op;
78         bool used;
79         struct hlist_node node;
80 };
81
82 struct qede_arfs {
83 #define QEDE_ARFS_POLL_COUNT    100
84 #define QEDE_RFS_FLW_BITSHIFT   (4)
85 #define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
86         struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
87
88         /* lock for filter list access */
89         spinlock_t              arfs_list_lock;
90         unsigned long           *arfs_fltr_bmap;
91         int                     filter_count;
92         bool                    enable;
93 };
94
95 static void qede_configure_arfs_fltr(struct qede_dev *edev,
96                                      struct qede_arfs_fltr_node *n,
97                                      u16 rxq_id, bool add_fltr)
98 {
99         const struct qed_eth_ops *op = edev->ops;
100
101         if (n->used)
102                 return;
103
104         DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
105                    "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
106                    add_fltr ? "Adding" : "Deleting",
107                    n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
108                    ntohs(n->tuple.dst_port), rxq_id);
109
110         n->used = true;
111         n->filter_op = add_fltr;
112         op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
113                                  rxq_id, add_fltr);
114 }
115
116 static void
117 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
118 {
119         kfree(fltr->data);
120         clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
121         kfree(fltr);
122 }
123
124 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
125 {
126         struct qede_arfs_fltr_node *fltr = filter;
127         struct qede_dev *edev = dev;
128
129         if (fw_rc) {
130                 DP_NOTICE(edev,
131                           "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
132                           fw_rc, fltr->flow_id, fltr->sw_id,
133                           ntohs(fltr->tuple.src_port),
134                           ntohs(fltr->tuple.dst_port), fltr->rxq_id);
135
136                 spin_lock_bh(&edev->arfs->arfs_list_lock);
137
138                 fltr->used = false;
139                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
140
141                 spin_unlock_bh(&edev->arfs->arfs_list_lock);
142                 return;
143         }
144
145         spin_lock_bh(&edev->arfs->arfs_list_lock);
146
147         fltr->used = false;
148
149         if (fltr->filter_op) {
150                 set_bit(QEDE_FLTR_VALID, &fltr->state);
151                 if (fltr->rxq_id != fltr->next_rxq_id)
152                         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
153                                                  false);
154         } else {
155                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
156                 if (fltr->rxq_id != fltr->next_rxq_id) {
157                         fltr->rxq_id = fltr->next_rxq_id;
158                         qede_configure_arfs_fltr(edev, fltr,
159                                                  fltr->rxq_id, true);
160                 }
161         }
162
163         spin_unlock_bh(&edev->arfs->arfs_list_lock);
164 }
165
166 /* Should be called while qede_lock is held */
167 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
168 {
169         int i;
170
171         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
172                 struct hlist_node *temp;
173                 struct hlist_head *head;
174                 struct qede_arfs_fltr_node *fltr;
175
176                 head = &edev->arfs->arfs_hl_head[i];
177
178                 hlist_for_each_entry_safe(fltr, temp, head, node) {
179                         bool del = false;
180
181                         if (edev->state != QEDE_STATE_OPEN)
182                                 del = true;
183
184                         spin_lock_bh(&edev->arfs->arfs_list_lock);
185
186                         if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
187                              !fltr->used) || free_fltr) {
188                                 hlist_del(&fltr->node);
189                                 dma_unmap_single(&edev->pdev->dev,
190                                                  fltr->mapping,
191                                                  fltr->buf_len, DMA_TO_DEVICE);
192                                 qede_free_arfs_filter(edev, fltr);
193                                 edev->arfs->filter_count--;
194                         } else {
195                                 if ((rps_may_expire_flow(edev->ndev,
196                                                          fltr->rxq_id,
197                                                          fltr->flow_id,
198                                                          fltr->sw_id) || del) &&
199                                                          !free_fltr)
200                                         qede_configure_arfs_fltr(edev, fltr,
201                                                                  fltr->rxq_id,
202                                                                  false);
203                         }
204
205                         spin_unlock_bh(&edev->arfs->arfs_list_lock);
206                 }
207         }
208
209         spin_lock_bh(&edev->arfs->arfs_list_lock);
210
211         if (!edev->arfs->filter_count) {
212                 if (edev->arfs->enable) {
213                         edev->arfs->enable = false;
214                         edev->ops->configure_arfs_searcher(edev->cdev, false);
215                 }
216         } else {
217                 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
218                 schedule_delayed_work(&edev->sp_task,
219                                       QEDE_SP_TASK_POLL_DELAY);
220         }
221
222         spin_unlock_bh(&edev->arfs->arfs_list_lock);
223 }
224
225 /* This function waits until all aRFS filters get deleted and freed.
226  * On timeout it frees all filters forcefully.
227  */
228 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
229 {
230         int count = QEDE_ARFS_POLL_COUNT;
231
232         while (count) {
233                 qede_process_arfs_filters(edev, false);
234
235                 if (!edev->arfs->filter_count)
236                         break;
237
238                 msleep(100);
239                 count--;
240         }
241
242         if (!count) {
243                 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
244
245                 /* Something is terribly wrong, free forcefully */
246                 qede_process_arfs_filters(edev, true);
247         }
248 }
249
250 int qede_alloc_arfs(struct qede_dev *edev)
251 {
252         int i;
253
254         edev->arfs = vzalloc(sizeof(*edev->arfs));
255         if (!edev->arfs)
256                 return -ENOMEM;
257
258         spin_lock_init(&edev->arfs->arfs_list_lock);
259
260         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
261                 INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
262
263         edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
264         if (!edev->ndev->rx_cpu_rmap) {
265                 vfree(edev->arfs);
266                 edev->arfs = NULL;
267                 return -ENOMEM;
268         }
269
270         edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR));
271         if (!edev->arfs->arfs_fltr_bmap) {
272                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
273                 edev->ndev->rx_cpu_rmap = NULL;
274                 vfree(edev->arfs);
275                 edev->arfs = NULL;
276                 return -ENOMEM;
277         }
278
279         return 0;
280 }
281
282 void qede_free_arfs(struct qede_dev *edev)
283 {
284         if (!edev->arfs)
285                 return;
286
287         if (edev->ndev->rx_cpu_rmap)
288                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
289
290         edev->ndev->rx_cpu_rmap = NULL;
291         vfree(edev->arfs->arfs_fltr_bmap);
292         edev->arfs->arfs_fltr_bmap = NULL;
293         vfree(edev->arfs);
294         edev->arfs = NULL;
295 }
296
297 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
298                                  const struct sk_buff *skb)
299 {
300         if (skb->protocol == htons(ETH_P_IP)) {
301                 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
302                     tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
303                         return true;
304                 else
305                         return false;
306         } else {
307                 struct in6_addr *src = &tpos->tuple.src_ipv6;
308                 u8 size = sizeof(struct in6_addr);
309
310                 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
311                     !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
312                         return true;
313                 else
314                         return false;
315         }
316 }
317
318 static struct qede_arfs_fltr_node *
319 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
320                           __be16 src_port, __be16 dst_port, u8 ip_proto)
321 {
322         struct qede_arfs_fltr_node *tpos;
323
324         hlist_for_each_entry(tpos, h, node)
325                 if (tpos->tuple.ip_proto == ip_proto &&
326                     tpos->tuple.eth_proto == skb->protocol &&
327                     qede_compare_ip_addr(tpos, skb) &&
328                     tpos->tuple.src_port == src_port &&
329                     tpos->tuple.dst_port == dst_port)
330                         return tpos;
331
332         return NULL;
333 }
334
335 static struct qede_arfs_fltr_node *
336 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
337 {
338         struct qede_arfs_fltr_node *n;
339         int bit_id;
340
341         bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
342                                      QEDE_RFS_MAX_FLTR);
343
344         if (bit_id >= QEDE_RFS_MAX_FLTR)
345                 return NULL;
346
347         n = kzalloc(sizeof(*n), GFP_ATOMIC);
348         if (!n)
349                 return NULL;
350
351         n->data = kzalloc(min_hlen, GFP_ATOMIC);
352         if (!n->data) {
353                 kfree(n);
354                 return NULL;
355         }
356
357         n->sw_id = (u16)bit_id;
358         set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
359         return n;
360 }
361
362 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
363                        u16 rxq_index, u32 flow_id)
364 {
365         struct qede_dev *edev = netdev_priv(dev);
366         struct qede_arfs_fltr_node *n;
367         int min_hlen, rc, tp_offset;
368         struct ethhdr *eth;
369         __be16 *ports;
370         u16 tbl_idx;
371         u8 ip_proto;
372
373         if (skb->encapsulation)
374                 return -EPROTONOSUPPORT;
375
376         if (skb->protocol != htons(ETH_P_IP) &&
377             skb->protocol != htons(ETH_P_IPV6))
378                 return -EPROTONOSUPPORT;
379
380         if (skb->protocol == htons(ETH_P_IP)) {
381                 ip_proto = ip_hdr(skb)->protocol;
382                 tp_offset = sizeof(struct iphdr);
383         } else {
384                 ip_proto = ipv6_hdr(skb)->nexthdr;
385                 tp_offset = sizeof(struct ipv6hdr);
386         }
387
388         if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
389                 return -EPROTONOSUPPORT;
390
391         ports = (__be16 *)(skb->data + tp_offset);
392         tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
393
394         spin_lock_bh(&edev->arfs->arfs_list_lock);
395
396         n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
397                                       skb, ports[0], ports[1], ip_proto);
398
399         if (n) {
400                 /* Filter match */
401                 n->next_rxq_id = rxq_index;
402
403                 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
404                         if (n->rxq_id != rxq_index)
405                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
406                                                          false);
407                 } else {
408                         if (!n->used) {
409                                 n->rxq_id = rxq_index;
410                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
411                                                          true);
412                         }
413                 }
414
415                 rc = n->sw_id;
416                 goto ret_unlock;
417         }
418
419         min_hlen = ETH_HLEN + skb_headlen(skb);
420
421         n = qede_alloc_filter(edev, min_hlen);
422         if (!n) {
423                 rc = -ENOMEM;
424                 goto ret_unlock;
425         }
426
427         n->buf_len = min_hlen;
428         n->rxq_id = rxq_index;
429         n->next_rxq_id = rxq_index;
430         n->tuple.src_port = ports[0];
431         n->tuple.dst_port = ports[1];
432         n->flow_id = flow_id;
433
434         if (skb->protocol == htons(ETH_P_IP)) {
435                 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
436                 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
437         } else {
438                 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
439                        sizeof(struct in6_addr));
440                 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
441                        sizeof(struct in6_addr));
442         }
443
444         eth = (struct ethhdr *)n->data;
445         eth->h_proto = skb->protocol;
446         n->tuple.eth_proto = skb->protocol;
447         n->tuple.ip_proto = ip_proto;
448         memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
449
450         n->mapping = dma_map_single(&edev->pdev->dev, n->data,
451                                     n->buf_len, DMA_TO_DEVICE);
452         if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
453                 DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
454                 qede_free_arfs_filter(edev, n);
455                 rc = -ENOMEM;
456                 goto ret_unlock;
457         }
458
459         INIT_HLIST_NODE(&n->node);
460         hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
461         edev->arfs->filter_count++;
462
463         if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
464                 edev->ops->configure_arfs_searcher(edev->cdev, true);
465                 edev->arfs->enable = true;
466         }
467
468         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
469
470         spin_unlock_bh(&edev->arfs->arfs_list_lock);
471
472         set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
473         schedule_delayed_work(&edev->sp_task, 0);
474         return n->sw_id;
475
476 ret_unlock:
477         spin_unlock_bh(&edev->arfs->arfs_list_lock);
478         return rc;
479 }
480 #endif
481
482 void qede_force_mac(void *dev, u8 *mac, bool forced)
483 {
484         struct qede_dev *edev = dev;
485
486         /* MAC hints take effect only if we haven't set one already */
487         if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
488                 return;
489
490         ether_addr_copy(edev->ndev->dev_addr, mac);
491         ether_addr_copy(edev->primary_mac, mac);
492 }
493
494 void qede_fill_rss_params(struct qede_dev *edev,
495                           struct qed_update_vport_rss_params *rss, u8 *update)
496 {
497         bool need_reset = false;
498         int i;
499
500         if (QEDE_RSS_COUNT(edev) <= 1) {
501                 memset(rss, 0, sizeof(*rss));
502                 *update = 0;
503                 return;
504         }
505
506         /* Need to validate current RSS config uses valid entries */
507         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
508                 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
509                         need_reset = true;
510                         break;
511                 }
512         }
513
514         if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
515                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
516                         u16 indir_val, val;
517
518                         val = QEDE_RSS_COUNT(edev);
519                         indir_val = ethtool_rxfh_indir_default(i, val);
520                         edev->rss_ind_table[i] = indir_val;
521                 }
522                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
523         }
524
525         /* Now that we have the queue-indirection, prepare the handles */
526         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
527                 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
528
529                 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
530         }
531
532         if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
533                 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
534                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
535         }
536         memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
537
538         if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
539                 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
540                     QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
541                 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
542         }
543         rss->rss_caps = edev->rss_caps;
544
545         *update = 1;
546 }
547
548 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
549                                  enum qed_filter_xcast_params_type opcode,
550                                  unsigned char mac[ETH_ALEN])
551 {
552         struct qed_filter_params filter_cmd;
553
554         memset(&filter_cmd, 0, sizeof(filter_cmd));
555         filter_cmd.type = QED_FILTER_TYPE_UCAST;
556         filter_cmd.filter.ucast.type = opcode;
557         filter_cmd.filter.ucast.mac_valid = 1;
558         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
559
560         return edev->ops->filter_config(edev->cdev, &filter_cmd);
561 }
562
563 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
564                                   enum qed_filter_xcast_params_type opcode,
565                                   u16 vid)
566 {
567         struct qed_filter_params filter_cmd;
568
569         memset(&filter_cmd, 0, sizeof(filter_cmd));
570         filter_cmd.type = QED_FILTER_TYPE_UCAST;
571         filter_cmd.filter.ucast.type = opcode;
572         filter_cmd.filter.ucast.vlan_valid = 1;
573         filter_cmd.filter.ucast.vlan = vid;
574
575         return edev->ops->filter_config(edev->cdev, &filter_cmd);
576 }
577
578 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
579 {
580         struct qed_update_vport_params *params;
581         int rc;
582
583         /* Proceed only if action actually needs to be performed */
584         if (edev->accept_any_vlan == action)
585                 return 0;
586
587         params = vzalloc(sizeof(*params));
588         if (!params)
589                 return -ENOMEM;
590
591         params->vport_id = 0;
592         params->accept_any_vlan = action;
593         params->update_accept_any_vlan_flg = 1;
594
595         rc = edev->ops->vport_update(edev->cdev, params);
596         if (rc) {
597                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
598                        action ? "enable" : "disable");
599         } else {
600                 DP_INFO(edev, "%s accept-any-vlan\n",
601                         action ? "enabled" : "disabled");
602                 edev->accept_any_vlan = action;
603         }
604
605         vfree(params);
606         return 0;
607 }
608
609 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
610 {
611         struct qede_dev *edev = netdev_priv(dev);
612         struct qede_vlan *vlan, *tmp;
613         int rc = 0;
614
615         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
616
617         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
618         if (!vlan) {
619                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
620                 return -ENOMEM;
621         }
622         INIT_LIST_HEAD(&vlan->list);
623         vlan->vid = vid;
624         vlan->configured = false;
625
626         /* Verify vlan isn't already configured */
627         list_for_each_entry(tmp, &edev->vlan_list, list) {
628                 if (tmp->vid == vlan->vid) {
629                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
630                                    "vlan already configured\n");
631                         kfree(vlan);
632                         return -EEXIST;
633                 }
634         }
635
636         /* If interface is down, cache this VLAN ID and return */
637         __qede_lock(edev);
638         if (edev->state != QEDE_STATE_OPEN) {
639                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
640                            "Interface is down, VLAN %d will be configured when interface is up\n",
641                            vid);
642                 if (vid != 0)
643                         edev->non_configured_vlans++;
644                 list_add(&vlan->list, &edev->vlan_list);
645                 goto out;
646         }
647
648         /* Check for the filter limit.
649          * Note - vlan0 has a reserved filter and can be added without
650          * worrying about quota
651          */
652         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
653             (vlan->vid == 0)) {
654                 rc = qede_set_ucast_rx_vlan(edev,
655                                             QED_FILTER_XCAST_TYPE_ADD,
656                                             vlan->vid);
657                 if (rc) {
658                         DP_ERR(edev, "Failed to configure VLAN %d\n",
659                                vlan->vid);
660                         kfree(vlan);
661                         goto out;
662                 }
663                 vlan->configured = true;
664
665                 /* vlan0 filter isn't consuming out of our quota */
666                 if (vlan->vid != 0)
667                         edev->configured_vlans++;
668         } else {
669                 /* Out of quota; Activate accept-any-VLAN mode */
670                 if (!edev->non_configured_vlans) {
671                         rc = qede_config_accept_any_vlan(edev, true);
672                         if (rc) {
673                                 kfree(vlan);
674                                 goto out;
675                         }
676                 }
677
678                 edev->non_configured_vlans++;
679         }
680
681         list_add(&vlan->list, &edev->vlan_list);
682
683 out:
684         __qede_unlock(edev);
685         return rc;
686 }
687
688 static void qede_del_vlan_from_list(struct qede_dev *edev,
689                                     struct qede_vlan *vlan)
690 {
691         /* vlan0 filter isn't consuming out of our quota */
692         if (vlan->vid != 0) {
693                 if (vlan->configured)
694                         edev->configured_vlans--;
695                 else
696                         edev->non_configured_vlans--;
697         }
698
699         list_del(&vlan->list);
700         kfree(vlan);
701 }
702
703 int qede_configure_vlan_filters(struct qede_dev *edev)
704 {
705         int rc = 0, real_rc = 0, accept_any_vlan = 0;
706         struct qed_dev_eth_info *dev_info;
707         struct qede_vlan *vlan = NULL;
708
709         if (list_empty(&edev->vlan_list))
710                 return 0;
711
712         dev_info = &edev->dev_info;
713
714         /* Configure non-configured vlans */
715         list_for_each_entry(vlan, &edev->vlan_list, list) {
716                 if (vlan->configured)
717                         continue;
718
719                 /* We have used all our credits, now enable accept_any_vlan */
720                 if ((vlan->vid != 0) &&
721                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
722                         accept_any_vlan = 1;
723                         continue;
724                 }
725
726                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
727
728                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
729                                             vlan->vid);
730                 if (rc) {
731                         DP_ERR(edev, "Failed to configure VLAN %u\n",
732                                vlan->vid);
733                         real_rc = rc;
734                         continue;
735                 }
736
737                 vlan->configured = true;
738                 /* vlan0 filter doesn't consume our VLAN filter's quota */
739                 if (vlan->vid != 0) {
740                         edev->non_configured_vlans--;
741                         edev->configured_vlans++;
742                 }
743         }
744
745         /* enable accept_any_vlan mode if we have more VLANs than credits,
746          * or remove accept_any_vlan mode if we've actually removed
747          * a non-configured vlan, and all remaining vlans are truly configured.
748          */
749
750         if (accept_any_vlan)
751                 rc = qede_config_accept_any_vlan(edev, true);
752         else if (!edev->non_configured_vlans)
753                 rc = qede_config_accept_any_vlan(edev, false);
754
755         if (rc && !real_rc)
756                 real_rc = rc;
757
758         return real_rc;
759 }
760
761 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
762 {
763         struct qede_dev *edev = netdev_priv(dev);
764         struct qede_vlan *vlan = NULL;
765         int rc = 0;
766
767         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
768
769         /* Find whether entry exists */
770         __qede_lock(edev);
771         list_for_each_entry(vlan, &edev->vlan_list, list)
772                 if (vlan->vid == vid)
773                         break;
774
775         if (!vlan || (vlan->vid != vid)) {
776                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
777                            "Vlan isn't configured\n");
778                 goto out;
779         }
780
781         if (edev->state != QEDE_STATE_OPEN) {
782                 /* As interface is already down, we don't have a VPORT
783                  * instance to remove vlan filter. So just update vlan list
784                  */
785                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
786                            "Interface is down, removing VLAN from list only\n");
787                 qede_del_vlan_from_list(edev, vlan);
788                 goto out;
789         }
790
791         /* Remove vlan */
792         if (vlan->configured) {
793                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
794                                             vid);
795                 if (rc) {
796                         DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
797                         goto out;
798                 }
799         }
800
801         qede_del_vlan_from_list(edev, vlan);
802
803         /* We have removed a VLAN - try to see if we can
804          * configure non-configured VLAN from the list.
805          */
806         rc = qede_configure_vlan_filters(edev);
807
808 out:
809         __qede_unlock(edev);
810         return rc;
811 }
812
813 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
814 {
815         struct qede_vlan *vlan = NULL;
816
817         if (list_empty(&edev->vlan_list))
818                 return;
819
820         list_for_each_entry(vlan, &edev->vlan_list, list) {
821                 if (!vlan->configured)
822                         continue;
823
824                 vlan->configured = false;
825
826                 /* vlan0 filter isn't consuming out of our quota */
827                 if (vlan->vid != 0) {
828                         edev->non_configured_vlans++;
829                         edev->configured_vlans--;
830                 }
831
832                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
833                            "marked vlan %d as non-configured\n", vlan->vid);
834         }
835
836         edev->accept_any_vlan = false;
837 }
838
839 static void qede_set_features_reload(struct qede_dev *edev,
840                                      struct qede_reload_args *args)
841 {
842         edev->ndev->features = args->u.features;
843 }
844
845 int qede_set_features(struct net_device *dev, netdev_features_t features)
846 {
847         struct qede_dev *edev = netdev_priv(dev);
848         netdev_features_t changes = features ^ dev->features;
849         bool need_reload = false;
850
851         /* No action needed if hardware GRO is disabled during driver load */
852         if (changes & NETIF_F_GRO) {
853                 if (dev->features & NETIF_F_GRO)
854                         need_reload = !edev->gro_disable;
855                 else
856                         need_reload = edev->gro_disable;
857         }
858
859         if (need_reload) {
860                 struct qede_reload_args args;
861
862                 args.u.features = features;
863                 args.func = &qede_set_features_reload;
864
865                 /* Make sure that we definitely need to reload.
866                  * In case of an eBPF attached program, there will be no FW
867                  * aggregations, so no need to actually reload.
868                  */
869                 __qede_lock(edev);
870                 if (edev->xdp_prog)
871                         args.func(edev, &args);
872                 else
873                         qede_reload(edev, &args, true);
874                 __qede_unlock(edev);
875
876                 return 1;
877         }
878
879         return 0;
880 }
881
882 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
883 {
884         struct qede_dev *edev = netdev_priv(dev);
885         u16 t_port = ntohs(ti->port);
886
887         switch (ti->type) {
888         case UDP_TUNNEL_TYPE_VXLAN:
889                 if (edev->vxlan_dst_port)
890                         return;
891
892                 edev->vxlan_dst_port = t_port;
893
894                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
895                            t_port);
896
897                 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
898                 break;
899         case UDP_TUNNEL_TYPE_GENEVE:
900                 if (edev->geneve_dst_port)
901                         return;
902
903                 edev->geneve_dst_port = t_port;
904
905                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
906                            t_port);
907                 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
908                 break;
909         default:
910                 return;
911         }
912
913         schedule_delayed_work(&edev->sp_task, 0);
914 }
915
916 void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
917 {
918         struct qede_dev *edev = netdev_priv(dev);
919         u16 t_port = ntohs(ti->port);
920
921         switch (ti->type) {
922         case UDP_TUNNEL_TYPE_VXLAN:
923                 if (t_port != edev->vxlan_dst_port)
924                         return;
925
926                 edev->vxlan_dst_port = 0;
927
928                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
929                            t_port);
930
931                 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
932                 break;
933         case UDP_TUNNEL_TYPE_GENEVE:
934                 if (t_port != edev->geneve_dst_port)
935                         return;
936
937                 edev->geneve_dst_port = 0;
938
939                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
940                            t_port);
941                 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
942                 break;
943         default:
944                 return;
945         }
946
947         schedule_delayed_work(&edev->sp_task, 0);
948 }
949
950 static void qede_xdp_reload_func(struct qede_dev *edev,
951                                  struct qede_reload_args *args)
952 {
953         struct bpf_prog *old;
954
955         old = xchg(&edev->xdp_prog, args->u.new_prog);
956         if (old)
957                 bpf_prog_put(old);
958 }
959
960 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
961 {
962         struct qede_reload_args args;
963
964         /* If we're called, there was already a bpf reference increment */
965         args.func = &qede_xdp_reload_func;
966         args.u.new_prog = prog;
967         qede_reload(edev, &args, false);
968
969         return 0;
970 }
971
972 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
973 {
974         struct qede_dev *edev = netdev_priv(dev);
975
976         if (IS_VF(edev)) {
977                 DP_NOTICE(edev, "VFs don't support XDP\n");
978                 return -EOPNOTSUPP;
979         }
980
981         switch (xdp->command) {
982         case XDP_SETUP_PROG:
983                 return qede_xdp_set(edev, xdp->prog);
984         case XDP_QUERY_PROG:
985                 xdp->prog_attached = !!edev->xdp_prog;
986                 return 0;
987         default:
988                 return -EINVAL;
989         }
990 }
991
992 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
993                                  enum qed_filter_xcast_params_type opcode,
994                                  unsigned char *mac, int num_macs)
995 {
996         struct qed_filter_params filter_cmd;
997         int i;
998
999         memset(&filter_cmd, 0, sizeof(filter_cmd));
1000         filter_cmd.type = QED_FILTER_TYPE_MCAST;
1001         filter_cmd.filter.mcast.type = opcode;
1002         filter_cmd.filter.mcast.num = num_macs;
1003
1004         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1005                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1006
1007         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1008 }
1009
1010 int qede_set_mac_addr(struct net_device *ndev, void *p)
1011 {
1012         struct qede_dev *edev = netdev_priv(ndev);
1013         struct sockaddr *addr = p;
1014         int rc;
1015
1016         ASSERT_RTNL(); /* @@@TBD To be removed */
1017
1018         DP_INFO(edev, "Set_mac_addr called\n");
1019
1020         if (!is_valid_ether_addr(addr->sa_data)) {
1021                 DP_NOTICE(edev, "The MAC address is not valid\n");
1022                 return -EFAULT;
1023         }
1024
1025         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1026                 DP_NOTICE(edev, "qed prevents setting MAC\n");
1027                 return -EINVAL;
1028         }
1029
1030         ether_addr_copy(ndev->dev_addr, addr->sa_data);
1031
1032         if (!netif_running(ndev))  {
1033                 DP_NOTICE(edev, "The device is currently down\n");
1034                 return 0;
1035         }
1036
1037         /* Remove the previous primary mac */
1038         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1039                                    edev->primary_mac);
1040         if (rc)
1041                 return rc;
1042
1043         edev->ops->common->update_mac(edev->cdev, addr->sa_data);
1044
1045         /* Add MAC filter according to the new unicast HW MAC address */
1046         ether_addr_copy(edev->primary_mac, ndev->dev_addr);
1047         return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1048                                       edev->primary_mac);
1049 }
1050
1051 static int
1052 qede_configure_mcast_filtering(struct net_device *ndev,
1053                                enum qed_filter_rx_mode_type *accept_flags)
1054 {
1055         struct qede_dev *edev = netdev_priv(ndev);
1056         unsigned char *mc_macs, *temp;
1057         struct netdev_hw_addr *ha;
1058         int rc = 0, mc_count;
1059         size_t size;
1060
1061         size = 64 * ETH_ALEN;
1062
1063         mc_macs = kzalloc(size, GFP_KERNEL);
1064         if (!mc_macs) {
1065                 DP_NOTICE(edev,
1066                           "Failed to allocate memory for multicast MACs\n");
1067                 rc = -ENOMEM;
1068                 goto exit;
1069         }
1070
1071         temp = mc_macs;
1072
1073         /* Remove all previously configured MAC filters */
1074         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1075                                    mc_macs, 1);
1076         if (rc)
1077                 goto exit;
1078
1079         netif_addr_lock_bh(ndev);
1080
1081         mc_count = netdev_mc_count(ndev);
1082         if (mc_count < 64) {
1083                 netdev_for_each_mc_addr(ha, ndev) {
1084                         ether_addr_copy(temp, ha->addr);
1085                         temp += ETH_ALEN;
1086                 }
1087         }
1088
1089         netif_addr_unlock_bh(ndev);
1090
1091         /* Check for all multicast @@@TBD resource allocation */
1092         if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1093                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1094                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1095         } else {
1096                 /* Add all multicast MAC filters */
1097                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1098                                            mc_macs, mc_count);
1099         }
1100
1101 exit:
1102         kfree(mc_macs);
1103         return rc;
1104 }
1105
1106 void qede_set_rx_mode(struct net_device *ndev)
1107 {
1108         struct qede_dev *edev = netdev_priv(ndev);
1109
1110         set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1111         schedule_delayed_work(&edev->sp_task, 0);
1112 }
1113
1114 /* Must be called with qede_lock held */
1115 void qede_config_rx_mode(struct net_device *ndev)
1116 {
1117         enum qed_filter_rx_mode_type accept_flags;
1118         struct qede_dev *edev = netdev_priv(ndev);
1119         struct qed_filter_params rx_mode;
1120         unsigned char *uc_macs, *temp;
1121         struct netdev_hw_addr *ha;
1122         int rc, uc_count;
1123         size_t size;
1124
1125         netif_addr_lock_bh(ndev);
1126
1127         uc_count = netdev_uc_count(ndev);
1128         size = uc_count * ETH_ALEN;
1129
1130         uc_macs = kzalloc(size, GFP_ATOMIC);
1131         if (!uc_macs) {
1132                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1133                 netif_addr_unlock_bh(ndev);
1134                 return;
1135         }
1136
1137         temp = uc_macs;
1138         netdev_for_each_uc_addr(ha, ndev) {
1139                 ether_addr_copy(temp, ha->addr);
1140                 temp += ETH_ALEN;
1141         }
1142
1143         netif_addr_unlock_bh(ndev);
1144
1145         /* Configure the struct for the Rx mode */
1146         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1147         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1148
1149         /* Remove all previous unicast secondary macs and multicast macs
1150          * (configrue / leave the primary mac)
1151          */
1152         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1153                                    edev->primary_mac);
1154         if (rc)
1155                 goto out;
1156
1157         /* Check for promiscuous */
1158         if (ndev->flags & IFF_PROMISC)
1159                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1160         else
1161                 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1162
1163         /* Configure all filters regardless, in case promisc is rejected */
1164         if (uc_count < edev->dev_info.num_mac_filters) {
1165                 int i;
1166
1167                 temp = uc_macs;
1168                 for (i = 0; i < uc_count; i++) {
1169                         rc = qede_set_ucast_rx_mac(edev,
1170                                                    QED_FILTER_XCAST_TYPE_ADD,
1171                                                    temp);
1172                         if (rc)
1173                                 goto out;
1174
1175                         temp += ETH_ALEN;
1176                 }
1177         } else {
1178                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1179         }
1180
1181         rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1182         if (rc)
1183                 goto out;
1184
1185         /* take care of VLAN mode */
1186         if (ndev->flags & IFF_PROMISC) {
1187                 qede_config_accept_any_vlan(edev, true);
1188         } else if (!edev->non_configured_vlans) {
1189                 /* It's possible that accept_any_vlan mode is set due to a
1190                  * previous setting of IFF_PROMISC. If vlan credits are
1191                  * sufficient, disable accept_any_vlan.
1192                  */
1193                 qede_config_accept_any_vlan(edev, false);
1194         }
1195
1196         rx_mode.filter.accept_flags = accept_flags;
1197         edev->ops->filter_config(edev->cdev, &rx_mode);
1198 out:
1199         kfree(uc_macs);
1200 }