]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qede/qede_filter.c
qede: allocate enough data for ->arfs_fltr_bmap
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qede / qede_filter.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <net/udp_tunnel.h>
35 #include <linux/bitops.h>
36 #include <linux/vmalloc.h>
37
38 #include <linux/qed/qed_if.h>
39 #include "qede.h"
40
41 #ifdef CONFIG_RFS_ACCEL
42 struct qede_arfs_tuple {
43         union {
44                 __be32 src_ipv4;
45                 struct in6_addr src_ipv6;
46         };
47         union {
48                 __be32 dst_ipv4;
49                 struct in6_addr dst_ipv6;
50         };
51         __be16  src_port;
52         __be16  dst_port;
53         __be16  eth_proto;
54         u8      ip_proto;
55 };
56
57 struct qede_arfs_fltr_node {
58 #define QEDE_FLTR_VALID  0
59         unsigned long state;
60
61         /* pointer to aRFS packet buffer */
62         void *data;
63
64         /* dma map address of aRFS packet buffer */
65         dma_addr_t mapping;
66
67         /* length of aRFS packet buffer */
68         int buf_len;
69
70         /* tuples to hold from aRFS packet buffer */
71         struct qede_arfs_tuple tuple;
72
73         u32 flow_id;
74         u16 sw_id;
75         u16 rxq_id;
76         u16 next_rxq_id;
77         bool filter_op;
78         bool used;
79         struct hlist_node node;
80 };
81
82 struct qede_arfs {
83 #define QEDE_ARFS_POLL_COUNT    100
84 #define QEDE_RFS_FLW_BITSHIFT   (4)
85 #define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
86         struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
87
88         /* lock for filter list access */
89         spinlock_t              arfs_list_lock;
90         unsigned long           *arfs_fltr_bmap;
91         int                     filter_count;
92         bool                    enable;
93 };
94
95 static void qede_configure_arfs_fltr(struct qede_dev *edev,
96                                      struct qede_arfs_fltr_node *n,
97                                      u16 rxq_id, bool add_fltr)
98 {
99         const struct qed_eth_ops *op = edev->ops;
100
101         if (n->used)
102                 return;
103
104         DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
105                    "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
106                    add_fltr ? "Adding" : "Deleting",
107                    n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
108                    ntohs(n->tuple.dst_port), rxq_id);
109
110         n->used = true;
111         n->filter_op = add_fltr;
112         op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
113                                  rxq_id, add_fltr);
114 }
115
116 static void
117 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
118 {
119         kfree(fltr->data);
120         clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
121         kfree(fltr);
122 }
123
124 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
125 {
126         struct qede_arfs_fltr_node *fltr = filter;
127         struct qede_dev *edev = dev;
128
129         if (fw_rc) {
130                 DP_NOTICE(edev,
131                           "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
132                           fw_rc, fltr->flow_id, fltr->sw_id,
133                           ntohs(fltr->tuple.src_port),
134                           ntohs(fltr->tuple.dst_port), fltr->rxq_id);
135
136                 spin_lock_bh(&edev->arfs->arfs_list_lock);
137
138                 fltr->used = false;
139                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
140
141                 spin_unlock_bh(&edev->arfs->arfs_list_lock);
142                 return;
143         }
144
145         spin_lock_bh(&edev->arfs->arfs_list_lock);
146
147         fltr->used = false;
148
149         if (fltr->filter_op) {
150                 set_bit(QEDE_FLTR_VALID, &fltr->state);
151                 if (fltr->rxq_id != fltr->next_rxq_id)
152                         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
153                                                  false);
154         } else {
155                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
156                 if (fltr->rxq_id != fltr->next_rxq_id) {
157                         fltr->rxq_id = fltr->next_rxq_id;
158                         qede_configure_arfs_fltr(edev, fltr,
159                                                  fltr->rxq_id, true);
160                 }
161         }
162
163         spin_unlock_bh(&edev->arfs->arfs_list_lock);
164 }
165
166 /* Should be called while qede_lock is held */
167 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
168 {
169         int i;
170
171         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
172                 struct hlist_node *temp;
173                 struct hlist_head *head;
174                 struct qede_arfs_fltr_node *fltr;
175
176                 head = &edev->arfs->arfs_hl_head[i];
177
178                 hlist_for_each_entry_safe(fltr, temp, head, node) {
179                         bool del = false;
180
181                         if (edev->state != QEDE_STATE_OPEN)
182                                 del = true;
183
184                         spin_lock_bh(&edev->arfs->arfs_list_lock);
185
186                         if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
187                              !fltr->used) || free_fltr) {
188                                 hlist_del(&fltr->node);
189                                 dma_unmap_single(&edev->pdev->dev,
190                                                  fltr->mapping,
191                                                  fltr->buf_len, DMA_TO_DEVICE);
192                                 qede_free_arfs_filter(edev, fltr);
193                                 edev->arfs->filter_count--;
194                         } else {
195                                 if ((rps_may_expire_flow(edev->ndev,
196                                                          fltr->rxq_id,
197                                                          fltr->flow_id,
198                                                          fltr->sw_id) || del) &&
199                                                          !free_fltr)
200                                         qede_configure_arfs_fltr(edev, fltr,
201                                                                  fltr->rxq_id,
202                                                                  false);
203                         }
204
205                         spin_unlock_bh(&edev->arfs->arfs_list_lock);
206                 }
207         }
208
209         spin_lock_bh(&edev->arfs->arfs_list_lock);
210
211         if (!edev->arfs->filter_count) {
212                 if (edev->arfs->enable) {
213                         edev->arfs->enable = false;
214                         edev->ops->configure_arfs_searcher(edev->cdev, false);
215                 }
216         } else {
217                 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
218                 schedule_delayed_work(&edev->sp_task,
219                                       QEDE_SP_TASK_POLL_DELAY);
220         }
221
222         spin_unlock_bh(&edev->arfs->arfs_list_lock);
223 }
224
225 /* This function waits until all aRFS filters get deleted and freed.
226  * On timeout it frees all filters forcefully.
227  */
228 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
229 {
230         int count = QEDE_ARFS_POLL_COUNT;
231
232         while (count) {
233                 qede_process_arfs_filters(edev, false);
234
235                 if (!edev->arfs->filter_count)
236                         break;
237
238                 msleep(100);
239                 count--;
240         }
241
242         if (!count) {
243                 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
244
245                 /* Something is terribly wrong, free forcefully */
246                 qede_process_arfs_filters(edev, true);
247         }
248 }
249
250 int qede_alloc_arfs(struct qede_dev *edev)
251 {
252         int i;
253
254         edev->arfs = vzalloc(sizeof(*edev->arfs));
255         if (!edev->arfs)
256                 return -ENOMEM;
257
258         spin_lock_init(&edev->arfs->arfs_list_lock);
259
260         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
261                 INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
262
263         edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
264         if (!edev->ndev->rx_cpu_rmap) {
265                 vfree(edev->arfs);
266                 edev->arfs = NULL;
267                 return -ENOMEM;
268         }
269
270         edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
271                                              sizeof(long));
272         if (!edev->arfs->arfs_fltr_bmap) {
273                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
274                 edev->ndev->rx_cpu_rmap = NULL;
275                 vfree(edev->arfs);
276                 edev->arfs = NULL;
277                 return -ENOMEM;
278         }
279
280         return 0;
281 }
282
283 void qede_free_arfs(struct qede_dev *edev)
284 {
285         if (!edev->arfs)
286                 return;
287
288         if (edev->ndev->rx_cpu_rmap)
289                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
290
291         edev->ndev->rx_cpu_rmap = NULL;
292         vfree(edev->arfs->arfs_fltr_bmap);
293         edev->arfs->arfs_fltr_bmap = NULL;
294         vfree(edev->arfs);
295         edev->arfs = NULL;
296 }
297
298 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
299                                  const struct sk_buff *skb)
300 {
301         if (skb->protocol == htons(ETH_P_IP)) {
302                 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
303                     tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
304                         return true;
305                 else
306                         return false;
307         } else {
308                 struct in6_addr *src = &tpos->tuple.src_ipv6;
309                 u8 size = sizeof(struct in6_addr);
310
311                 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
312                     !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
313                         return true;
314                 else
315                         return false;
316         }
317 }
318
319 static struct qede_arfs_fltr_node *
320 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
321                           __be16 src_port, __be16 dst_port, u8 ip_proto)
322 {
323         struct qede_arfs_fltr_node *tpos;
324
325         hlist_for_each_entry(tpos, h, node)
326                 if (tpos->tuple.ip_proto == ip_proto &&
327                     tpos->tuple.eth_proto == skb->protocol &&
328                     qede_compare_ip_addr(tpos, skb) &&
329                     tpos->tuple.src_port == src_port &&
330                     tpos->tuple.dst_port == dst_port)
331                         return tpos;
332
333         return NULL;
334 }
335
336 static struct qede_arfs_fltr_node *
337 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
338 {
339         struct qede_arfs_fltr_node *n;
340         int bit_id;
341
342         bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
343                                      QEDE_RFS_MAX_FLTR);
344
345         if (bit_id >= QEDE_RFS_MAX_FLTR)
346                 return NULL;
347
348         n = kzalloc(sizeof(*n), GFP_ATOMIC);
349         if (!n)
350                 return NULL;
351
352         n->data = kzalloc(min_hlen, GFP_ATOMIC);
353         if (!n->data) {
354                 kfree(n);
355                 return NULL;
356         }
357
358         n->sw_id = (u16)bit_id;
359         set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
360         return n;
361 }
362
363 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
364                        u16 rxq_index, u32 flow_id)
365 {
366         struct qede_dev *edev = netdev_priv(dev);
367         struct qede_arfs_fltr_node *n;
368         int min_hlen, rc, tp_offset;
369         struct ethhdr *eth;
370         __be16 *ports;
371         u16 tbl_idx;
372         u8 ip_proto;
373
374         if (skb->encapsulation)
375                 return -EPROTONOSUPPORT;
376
377         if (skb->protocol != htons(ETH_P_IP) &&
378             skb->protocol != htons(ETH_P_IPV6))
379                 return -EPROTONOSUPPORT;
380
381         if (skb->protocol == htons(ETH_P_IP)) {
382                 ip_proto = ip_hdr(skb)->protocol;
383                 tp_offset = sizeof(struct iphdr);
384         } else {
385                 ip_proto = ipv6_hdr(skb)->nexthdr;
386                 tp_offset = sizeof(struct ipv6hdr);
387         }
388
389         if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
390                 return -EPROTONOSUPPORT;
391
392         ports = (__be16 *)(skb->data + tp_offset);
393         tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
394
395         spin_lock_bh(&edev->arfs->arfs_list_lock);
396
397         n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
398                                       skb, ports[0], ports[1], ip_proto);
399
400         if (n) {
401                 /* Filter match */
402                 n->next_rxq_id = rxq_index;
403
404                 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
405                         if (n->rxq_id != rxq_index)
406                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
407                                                          false);
408                 } else {
409                         if (!n->used) {
410                                 n->rxq_id = rxq_index;
411                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
412                                                          true);
413                         }
414                 }
415
416                 rc = n->sw_id;
417                 goto ret_unlock;
418         }
419
420         min_hlen = ETH_HLEN + skb_headlen(skb);
421
422         n = qede_alloc_filter(edev, min_hlen);
423         if (!n) {
424                 rc = -ENOMEM;
425                 goto ret_unlock;
426         }
427
428         n->buf_len = min_hlen;
429         n->rxq_id = rxq_index;
430         n->next_rxq_id = rxq_index;
431         n->tuple.src_port = ports[0];
432         n->tuple.dst_port = ports[1];
433         n->flow_id = flow_id;
434
435         if (skb->protocol == htons(ETH_P_IP)) {
436                 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
437                 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
438         } else {
439                 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
440                        sizeof(struct in6_addr));
441                 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
442                        sizeof(struct in6_addr));
443         }
444
445         eth = (struct ethhdr *)n->data;
446         eth->h_proto = skb->protocol;
447         n->tuple.eth_proto = skb->protocol;
448         n->tuple.ip_proto = ip_proto;
449         memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
450
451         n->mapping = dma_map_single(&edev->pdev->dev, n->data,
452                                     n->buf_len, DMA_TO_DEVICE);
453         if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
454                 DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
455                 qede_free_arfs_filter(edev, n);
456                 rc = -ENOMEM;
457                 goto ret_unlock;
458         }
459
460         INIT_HLIST_NODE(&n->node);
461         hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
462         edev->arfs->filter_count++;
463
464         if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
465                 edev->ops->configure_arfs_searcher(edev->cdev, true);
466                 edev->arfs->enable = true;
467         }
468
469         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
470
471         spin_unlock_bh(&edev->arfs->arfs_list_lock);
472
473         set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
474         schedule_delayed_work(&edev->sp_task, 0);
475         return n->sw_id;
476
477 ret_unlock:
478         spin_unlock_bh(&edev->arfs->arfs_list_lock);
479         return rc;
480 }
481 #endif
482
483 void qede_force_mac(void *dev, u8 *mac, bool forced)
484 {
485         struct qede_dev *edev = dev;
486
487         /* MAC hints take effect only if we haven't set one already */
488         if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
489                 return;
490
491         ether_addr_copy(edev->ndev->dev_addr, mac);
492         ether_addr_copy(edev->primary_mac, mac);
493 }
494
495 void qede_fill_rss_params(struct qede_dev *edev,
496                           struct qed_update_vport_rss_params *rss, u8 *update)
497 {
498         bool need_reset = false;
499         int i;
500
501         if (QEDE_RSS_COUNT(edev) <= 1) {
502                 memset(rss, 0, sizeof(*rss));
503                 *update = 0;
504                 return;
505         }
506
507         /* Need to validate current RSS config uses valid entries */
508         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
509                 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
510                         need_reset = true;
511                         break;
512                 }
513         }
514
515         if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
516                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
517                         u16 indir_val, val;
518
519                         val = QEDE_RSS_COUNT(edev);
520                         indir_val = ethtool_rxfh_indir_default(i, val);
521                         edev->rss_ind_table[i] = indir_val;
522                 }
523                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
524         }
525
526         /* Now that we have the queue-indirection, prepare the handles */
527         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
528                 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
529
530                 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
531         }
532
533         if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
534                 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
535                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
536         }
537         memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
538
539         if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
540                 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
541                     QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
542                 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
543         }
544         rss->rss_caps = edev->rss_caps;
545
546         *update = 1;
547 }
548
549 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
550                                  enum qed_filter_xcast_params_type opcode,
551                                  unsigned char mac[ETH_ALEN])
552 {
553         struct qed_filter_params filter_cmd;
554
555         memset(&filter_cmd, 0, sizeof(filter_cmd));
556         filter_cmd.type = QED_FILTER_TYPE_UCAST;
557         filter_cmd.filter.ucast.type = opcode;
558         filter_cmd.filter.ucast.mac_valid = 1;
559         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
560
561         return edev->ops->filter_config(edev->cdev, &filter_cmd);
562 }
563
564 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
565                                   enum qed_filter_xcast_params_type opcode,
566                                   u16 vid)
567 {
568         struct qed_filter_params filter_cmd;
569
570         memset(&filter_cmd, 0, sizeof(filter_cmd));
571         filter_cmd.type = QED_FILTER_TYPE_UCAST;
572         filter_cmd.filter.ucast.type = opcode;
573         filter_cmd.filter.ucast.vlan_valid = 1;
574         filter_cmd.filter.ucast.vlan = vid;
575
576         return edev->ops->filter_config(edev->cdev, &filter_cmd);
577 }
578
579 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
580 {
581         struct qed_update_vport_params *params;
582         int rc;
583
584         /* Proceed only if action actually needs to be performed */
585         if (edev->accept_any_vlan == action)
586                 return 0;
587
588         params = vzalloc(sizeof(*params));
589         if (!params)
590                 return -ENOMEM;
591
592         params->vport_id = 0;
593         params->accept_any_vlan = action;
594         params->update_accept_any_vlan_flg = 1;
595
596         rc = edev->ops->vport_update(edev->cdev, params);
597         if (rc) {
598                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
599                        action ? "enable" : "disable");
600         } else {
601                 DP_INFO(edev, "%s accept-any-vlan\n",
602                         action ? "enabled" : "disabled");
603                 edev->accept_any_vlan = action;
604         }
605
606         vfree(params);
607         return 0;
608 }
609
610 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
611 {
612         struct qede_dev *edev = netdev_priv(dev);
613         struct qede_vlan *vlan, *tmp;
614         int rc = 0;
615
616         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
617
618         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
619         if (!vlan) {
620                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
621                 return -ENOMEM;
622         }
623         INIT_LIST_HEAD(&vlan->list);
624         vlan->vid = vid;
625         vlan->configured = false;
626
627         /* Verify vlan isn't already configured */
628         list_for_each_entry(tmp, &edev->vlan_list, list) {
629                 if (tmp->vid == vlan->vid) {
630                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
631                                    "vlan already configured\n");
632                         kfree(vlan);
633                         return -EEXIST;
634                 }
635         }
636
637         /* If interface is down, cache this VLAN ID and return */
638         __qede_lock(edev);
639         if (edev->state != QEDE_STATE_OPEN) {
640                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
641                            "Interface is down, VLAN %d will be configured when interface is up\n",
642                            vid);
643                 if (vid != 0)
644                         edev->non_configured_vlans++;
645                 list_add(&vlan->list, &edev->vlan_list);
646                 goto out;
647         }
648
649         /* Check for the filter limit.
650          * Note - vlan0 has a reserved filter and can be added without
651          * worrying about quota
652          */
653         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
654             (vlan->vid == 0)) {
655                 rc = qede_set_ucast_rx_vlan(edev,
656                                             QED_FILTER_XCAST_TYPE_ADD,
657                                             vlan->vid);
658                 if (rc) {
659                         DP_ERR(edev, "Failed to configure VLAN %d\n",
660                                vlan->vid);
661                         kfree(vlan);
662                         goto out;
663                 }
664                 vlan->configured = true;
665
666                 /* vlan0 filter isn't consuming out of our quota */
667                 if (vlan->vid != 0)
668                         edev->configured_vlans++;
669         } else {
670                 /* Out of quota; Activate accept-any-VLAN mode */
671                 if (!edev->non_configured_vlans) {
672                         rc = qede_config_accept_any_vlan(edev, true);
673                         if (rc) {
674                                 kfree(vlan);
675                                 goto out;
676                         }
677                 }
678
679                 edev->non_configured_vlans++;
680         }
681
682         list_add(&vlan->list, &edev->vlan_list);
683
684 out:
685         __qede_unlock(edev);
686         return rc;
687 }
688
689 static void qede_del_vlan_from_list(struct qede_dev *edev,
690                                     struct qede_vlan *vlan)
691 {
692         /* vlan0 filter isn't consuming out of our quota */
693         if (vlan->vid != 0) {
694                 if (vlan->configured)
695                         edev->configured_vlans--;
696                 else
697                         edev->non_configured_vlans--;
698         }
699
700         list_del(&vlan->list);
701         kfree(vlan);
702 }
703
704 int qede_configure_vlan_filters(struct qede_dev *edev)
705 {
706         int rc = 0, real_rc = 0, accept_any_vlan = 0;
707         struct qed_dev_eth_info *dev_info;
708         struct qede_vlan *vlan = NULL;
709
710         if (list_empty(&edev->vlan_list))
711                 return 0;
712
713         dev_info = &edev->dev_info;
714
715         /* Configure non-configured vlans */
716         list_for_each_entry(vlan, &edev->vlan_list, list) {
717                 if (vlan->configured)
718                         continue;
719
720                 /* We have used all our credits, now enable accept_any_vlan */
721                 if ((vlan->vid != 0) &&
722                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
723                         accept_any_vlan = 1;
724                         continue;
725                 }
726
727                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
728
729                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
730                                             vlan->vid);
731                 if (rc) {
732                         DP_ERR(edev, "Failed to configure VLAN %u\n",
733                                vlan->vid);
734                         real_rc = rc;
735                         continue;
736                 }
737
738                 vlan->configured = true;
739                 /* vlan0 filter doesn't consume our VLAN filter's quota */
740                 if (vlan->vid != 0) {
741                         edev->non_configured_vlans--;
742                         edev->configured_vlans++;
743                 }
744         }
745
746         /* enable accept_any_vlan mode if we have more VLANs than credits,
747          * or remove accept_any_vlan mode if we've actually removed
748          * a non-configured vlan, and all remaining vlans are truly configured.
749          */
750
751         if (accept_any_vlan)
752                 rc = qede_config_accept_any_vlan(edev, true);
753         else if (!edev->non_configured_vlans)
754                 rc = qede_config_accept_any_vlan(edev, false);
755
756         if (rc && !real_rc)
757                 real_rc = rc;
758
759         return real_rc;
760 }
761
762 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
763 {
764         struct qede_dev *edev = netdev_priv(dev);
765         struct qede_vlan *vlan = NULL;
766         int rc = 0;
767
768         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
769
770         /* Find whether entry exists */
771         __qede_lock(edev);
772         list_for_each_entry(vlan, &edev->vlan_list, list)
773                 if (vlan->vid == vid)
774                         break;
775
776         if (!vlan || (vlan->vid != vid)) {
777                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
778                            "Vlan isn't configured\n");
779                 goto out;
780         }
781
782         if (edev->state != QEDE_STATE_OPEN) {
783                 /* As interface is already down, we don't have a VPORT
784                  * instance to remove vlan filter. So just update vlan list
785                  */
786                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
787                            "Interface is down, removing VLAN from list only\n");
788                 qede_del_vlan_from_list(edev, vlan);
789                 goto out;
790         }
791
792         /* Remove vlan */
793         if (vlan->configured) {
794                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
795                                             vid);
796                 if (rc) {
797                         DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
798                         goto out;
799                 }
800         }
801
802         qede_del_vlan_from_list(edev, vlan);
803
804         /* We have removed a VLAN - try to see if we can
805          * configure non-configured VLAN from the list.
806          */
807         rc = qede_configure_vlan_filters(edev);
808
809 out:
810         __qede_unlock(edev);
811         return rc;
812 }
813
814 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
815 {
816         struct qede_vlan *vlan = NULL;
817
818         if (list_empty(&edev->vlan_list))
819                 return;
820
821         list_for_each_entry(vlan, &edev->vlan_list, list) {
822                 if (!vlan->configured)
823                         continue;
824
825                 vlan->configured = false;
826
827                 /* vlan0 filter isn't consuming out of our quota */
828                 if (vlan->vid != 0) {
829                         edev->non_configured_vlans++;
830                         edev->configured_vlans--;
831                 }
832
833                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
834                            "marked vlan %d as non-configured\n", vlan->vid);
835         }
836
837         edev->accept_any_vlan = false;
838 }
839
840 static void qede_set_features_reload(struct qede_dev *edev,
841                                      struct qede_reload_args *args)
842 {
843         edev->ndev->features = args->u.features;
844 }
845
846 int qede_set_features(struct net_device *dev, netdev_features_t features)
847 {
848         struct qede_dev *edev = netdev_priv(dev);
849         netdev_features_t changes = features ^ dev->features;
850         bool need_reload = false;
851
852         /* No action needed if hardware GRO is disabled during driver load */
853         if (changes & NETIF_F_GRO) {
854                 if (dev->features & NETIF_F_GRO)
855                         need_reload = !edev->gro_disable;
856                 else
857                         need_reload = edev->gro_disable;
858         }
859
860         if (need_reload) {
861                 struct qede_reload_args args;
862
863                 args.u.features = features;
864                 args.func = &qede_set_features_reload;
865
866                 /* Make sure that we definitely need to reload.
867                  * In case of an eBPF attached program, there will be no FW
868                  * aggregations, so no need to actually reload.
869                  */
870                 __qede_lock(edev);
871                 if (edev->xdp_prog)
872                         args.func(edev, &args);
873                 else
874                         qede_reload(edev, &args, true);
875                 __qede_unlock(edev);
876
877                 return 1;
878         }
879
880         return 0;
881 }
882
883 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
884 {
885         struct qede_dev *edev = netdev_priv(dev);
886         u16 t_port = ntohs(ti->port);
887
888         switch (ti->type) {
889         case UDP_TUNNEL_TYPE_VXLAN:
890                 if (edev->vxlan_dst_port)
891                         return;
892
893                 edev->vxlan_dst_port = t_port;
894
895                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
896                            t_port);
897
898                 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
899                 break;
900         case UDP_TUNNEL_TYPE_GENEVE:
901                 if (edev->geneve_dst_port)
902                         return;
903
904                 edev->geneve_dst_port = t_port;
905
906                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
907                            t_port);
908                 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
909                 break;
910         default:
911                 return;
912         }
913
914         schedule_delayed_work(&edev->sp_task, 0);
915 }
916
917 void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
918 {
919         struct qede_dev *edev = netdev_priv(dev);
920         u16 t_port = ntohs(ti->port);
921
922         switch (ti->type) {
923         case UDP_TUNNEL_TYPE_VXLAN:
924                 if (t_port != edev->vxlan_dst_port)
925                         return;
926
927                 edev->vxlan_dst_port = 0;
928
929                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
930                            t_port);
931
932                 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
933                 break;
934         case UDP_TUNNEL_TYPE_GENEVE:
935                 if (t_port != edev->geneve_dst_port)
936                         return;
937
938                 edev->geneve_dst_port = 0;
939
940                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
941                            t_port);
942                 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
943                 break;
944         default:
945                 return;
946         }
947
948         schedule_delayed_work(&edev->sp_task, 0);
949 }
950
951 static void qede_xdp_reload_func(struct qede_dev *edev,
952                                  struct qede_reload_args *args)
953 {
954         struct bpf_prog *old;
955
956         old = xchg(&edev->xdp_prog, args->u.new_prog);
957         if (old)
958                 bpf_prog_put(old);
959 }
960
961 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
962 {
963         struct qede_reload_args args;
964
965         /* If we're called, there was already a bpf reference increment */
966         args.func = &qede_xdp_reload_func;
967         args.u.new_prog = prog;
968         qede_reload(edev, &args, false);
969
970         return 0;
971 }
972
973 int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
974 {
975         struct qede_dev *edev = netdev_priv(dev);
976
977         if (IS_VF(edev)) {
978                 DP_NOTICE(edev, "VFs don't support XDP\n");
979                 return -EOPNOTSUPP;
980         }
981
982         switch (xdp->command) {
983         case XDP_SETUP_PROG:
984                 return qede_xdp_set(edev, xdp->prog);
985         case XDP_QUERY_PROG:
986                 xdp->prog_attached = !!edev->xdp_prog;
987                 return 0;
988         default:
989                 return -EINVAL;
990         }
991 }
992
993 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
994                                  enum qed_filter_xcast_params_type opcode,
995                                  unsigned char *mac, int num_macs)
996 {
997         struct qed_filter_params filter_cmd;
998         int i;
999
1000         memset(&filter_cmd, 0, sizeof(filter_cmd));
1001         filter_cmd.type = QED_FILTER_TYPE_MCAST;
1002         filter_cmd.filter.mcast.type = opcode;
1003         filter_cmd.filter.mcast.num = num_macs;
1004
1005         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1006                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1007
1008         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1009 }
1010
1011 int qede_set_mac_addr(struct net_device *ndev, void *p)
1012 {
1013         struct qede_dev *edev = netdev_priv(ndev);
1014         struct sockaddr *addr = p;
1015         int rc;
1016
1017         ASSERT_RTNL(); /* @@@TBD To be removed */
1018
1019         DP_INFO(edev, "Set_mac_addr called\n");
1020
1021         if (!is_valid_ether_addr(addr->sa_data)) {
1022                 DP_NOTICE(edev, "The MAC address is not valid\n");
1023                 return -EFAULT;
1024         }
1025
1026         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1027                 DP_NOTICE(edev, "qed prevents setting MAC\n");
1028                 return -EINVAL;
1029         }
1030
1031         ether_addr_copy(ndev->dev_addr, addr->sa_data);
1032
1033         if (!netif_running(ndev))  {
1034                 DP_NOTICE(edev, "The device is currently down\n");
1035                 return 0;
1036         }
1037
1038         /* Remove the previous primary mac */
1039         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1040                                    edev->primary_mac);
1041         if (rc)
1042                 return rc;
1043
1044         edev->ops->common->update_mac(edev->cdev, addr->sa_data);
1045
1046         /* Add MAC filter according to the new unicast HW MAC address */
1047         ether_addr_copy(edev->primary_mac, ndev->dev_addr);
1048         return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1049                                       edev->primary_mac);
1050 }
1051
1052 static int
1053 qede_configure_mcast_filtering(struct net_device *ndev,
1054                                enum qed_filter_rx_mode_type *accept_flags)
1055 {
1056         struct qede_dev *edev = netdev_priv(ndev);
1057         unsigned char *mc_macs, *temp;
1058         struct netdev_hw_addr *ha;
1059         int rc = 0, mc_count;
1060         size_t size;
1061
1062         size = 64 * ETH_ALEN;
1063
1064         mc_macs = kzalloc(size, GFP_KERNEL);
1065         if (!mc_macs) {
1066                 DP_NOTICE(edev,
1067                           "Failed to allocate memory for multicast MACs\n");
1068                 rc = -ENOMEM;
1069                 goto exit;
1070         }
1071
1072         temp = mc_macs;
1073
1074         /* Remove all previously configured MAC filters */
1075         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1076                                    mc_macs, 1);
1077         if (rc)
1078                 goto exit;
1079
1080         netif_addr_lock_bh(ndev);
1081
1082         mc_count = netdev_mc_count(ndev);
1083         if (mc_count < 64) {
1084                 netdev_for_each_mc_addr(ha, ndev) {
1085                         ether_addr_copy(temp, ha->addr);
1086                         temp += ETH_ALEN;
1087                 }
1088         }
1089
1090         netif_addr_unlock_bh(ndev);
1091
1092         /* Check for all multicast @@@TBD resource allocation */
1093         if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1094                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1095                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1096         } else {
1097                 /* Add all multicast MAC filters */
1098                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1099                                            mc_macs, mc_count);
1100         }
1101
1102 exit:
1103         kfree(mc_macs);
1104         return rc;
1105 }
1106
1107 void qede_set_rx_mode(struct net_device *ndev)
1108 {
1109         struct qede_dev *edev = netdev_priv(ndev);
1110
1111         set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1112         schedule_delayed_work(&edev->sp_task, 0);
1113 }
1114
1115 /* Must be called with qede_lock held */
1116 void qede_config_rx_mode(struct net_device *ndev)
1117 {
1118         enum qed_filter_rx_mode_type accept_flags;
1119         struct qede_dev *edev = netdev_priv(ndev);
1120         struct qed_filter_params rx_mode;
1121         unsigned char *uc_macs, *temp;
1122         struct netdev_hw_addr *ha;
1123         int rc, uc_count;
1124         size_t size;
1125
1126         netif_addr_lock_bh(ndev);
1127
1128         uc_count = netdev_uc_count(ndev);
1129         size = uc_count * ETH_ALEN;
1130
1131         uc_macs = kzalloc(size, GFP_ATOMIC);
1132         if (!uc_macs) {
1133                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1134                 netif_addr_unlock_bh(ndev);
1135                 return;
1136         }
1137
1138         temp = uc_macs;
1139         netdev_for_each_uc_addr(ha, ndev) {
1140                 ether_addr_copy(temp, ha->addr);
1141                 temp += ETH_ALEN;
1142         }
1143
1144         netif_addr_unlock_bh(ndev);
1145
1146         /* Configure the struct for the Rx mode */
1147         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1148         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1149
1150         /* Remove all previous unicast secondary macs and multicast macs
1151          * (configrue / leave the primary mac)
1152          */
1153         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1154                                    edev->primary_mac);
1155         if (rc)
1156                 goto out;
1157
1158         /* Check for promiscuous */
1159         if (ndev->flags & IFF_PROMISC)
1160                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1161         else
1162                 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1163
1164         /* Configure all filters regardless, in case promisc is rejected */
1165         if (uc_count < edev->dev_info.num_mac_filters) {
1166                 int i;
1167
1168                 temp = uc_macs;
1169                 for (i = 0; i < uc_count; i++) {
1170                         rc = qede_set_ucast_rx_mac(edev,
1171                                                    QED_FILTER_XCAST_TYPE_ADD,
1172                                                    temp);
1173                         if (rc)
1174                                 goto out;
1175
1176                         temp += ETH_ALEN;
1177                 }
1178         } else {
1179                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1180         }
1181
1182         rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1183         if (rc)
1184                 goto out;
1185
1186         /* take care of VLAN mode */
1187         if (ndev->flags & IFF_PROMISC) {
1188                 qede_config_accept_any_vlan(edev, true);
1189         } else if (!edev->non_configured_vlans) {
1190                 /* It's possible that accept_any_vlan mode is set due to a
1191                  * previous setting of IFF_PROMISC. If vlan credits are
1192                  * sufficient, disable accept_any_vlan.
1193                  */
1194                 qede_config_accept_any_vlan(edev, false);
1195         }
1196
1197         rx_mode.filter.accept_flags = accept_flags;
1198         edev->ops->filter_config(edev->cdev, &rx_mode);
1199 out:
1200         kfree(uc_macs);
1201 }