2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/flow_table.h>
54 MLX5E_ACTION_NONE = 0,
59 struct mlx5e_eth_addr_hash_node {
60 struct hlist_node hlist;
62 struct mlx5e_eth_addr_info ai;
65 static inline int mlx5e_hash_eth_addr(u8 *addr)
70 static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
72 struct mlx5e_eth_addr_hash_node *hn;
73 int ix = mlx5e_hash_eth_addr(addr);
76 hlist_for_each_entry(hn, &hash[ix], hlist)
77 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
83 hn->action = MLX5E_ACTION_NONE;
87 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
91 ether_addr_copy(hn->ai.addr, addr);
92 hn->action = MLX5E_ACTION_ADD;
94 hlist_add_head(&hn->hlist, &hash[ix]);
97 static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
99 hlist_del(&hn->hlist);
103 static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
104 struct mlx5e_eth_addr_info *ai)
106 void *ft = priv->ft.main;
108 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
109 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
111 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
112 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
114 if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
115 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
117 if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
118 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
120 if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
121 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
123 if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
124 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
126 if (ai->tt_vec & BIT(MLX5E_TT_ANY))
127 mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
130 static int mlx5e_get_eth_addr_type(u8 *addr)
132 if (is_unicast_ether_addr(addr))
135 if ((addr[0] == 0x01) &&
139 return MLX5E_MC_IPV4;
141 if ((addr[0] == 0x33) &&
143 return MLX5E_MC_IPV6;
145 return MLX5E_MC_OTHER;
148 static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
154 case MLX5E_FULLMATCH:
155 eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
156 switch (eth_addr_type) {
159 BIT(MLX5E_TT_IPV4_TCP) |
160 BIT(MLX5E_TT_IPV6_TCP) |
161 BIT(MLX5E_TT_IPV4_UDP) |
162 BIT(MLX5E_TT_IPV6_UDP) |
171 BIT(MLX5E_TT_IPV4_UDP) |
178 BIT(MLX5E_TT_IPV6_UDP) |
194 BIT(MLX5E_TT_IPV4_UDP) |
195 BIT(MLX5E_TT_IPV6_UDP) |
202 default: /* MLX5E_PROMISC */
204 BIT(MLX5E_TT_IPV4_TCP) |
205 BIT(MLX5E_TT_IPV6_TCP) |
206 BIT(MLX5E_TT_IPV4_UDP) |
207 BIT(MLX5E_TT_IPV6_UDP) |
218 static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
219 struct mlx5e_eth_addr_info *ai, int type,
220 void *flow_context, void *match_criteria)
222 u8 match_criteria_enable = 0;
226 u8 *match_criteria_dmac;
227 void *ft = priv->ft.main;
228 u32 *tirn = priv->tirn;
233 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
234 dmac = MLX5_ADDR_OF(fte_match_param, match_value,
235 outer_headers.dmac_47_16);
236 match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
237 outer_headers.dmac_47_16);
238 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
240 MLX5_SET(flow_context, flow_context, action,
241 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
242 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
243 MLX5_SET(dest_format_struct, dest, destination_type,
244 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
247 case MLX5E_FULLMATCH:
248 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
249 memset(match_criteria_dmac, 0xff, ETH_ALEN);
250 ether_addr_copy(dmac, ai->addr);
254 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
255 match_criteria_dmac[0] = 0x01;
263 tt_vec = mlx5e_get_tt_vec(ai, type);
265 ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
266 if (tt_vec & BIT(MLX5E_TT_ANY)) {
267 MLX5_SET(dest_format_struct, dest, destination_id,
269 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
270 match_criteria, flow_context,
275 ai->tt_vec |= BIT(MLX5E_TT_ANY);
278 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
279 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
280 outer_headers.ethertype);
282 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
283 if (tt_vec & BIT(MLX5E_TT_IPV4)) {
284 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
286 MLX5_SET(dest_format_struct, dest, destination_id,
287 tirn[MLX5E_TT_IPV4]);
288 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
289 match_criteria, flow_context,
294 ai->tt_vec |= BIT(MLX5E_TT_IPV4);
297 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
298 if (tt_vec & BIT(MLX5E_TT_IPV6)) {
299 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
301 MLX5_SET(dest_format_struct, dest, destination_id,
302 tirn[MLX5E_TT_IPV6]);
303 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
304 match_criteria, flow_context,
309 ai->tt_vec |= BIT(MLX5E_TT_IPV6);
312 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
313 outer_headers.ip_protocol);
314 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
317 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
318 if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
319 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
321 MLX5_SET(dest_format_struct, dest, destination_id,
322 tirn[MLX5E_TT_IPV4_UDP]);
323 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
324 match_criteria, flow_context,
329 ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
332 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
333 if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
334 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
336 MLX5_SET(dest_format_struct, dest, destination_id,
337 tirn[MLX5E_TT_IPV6_UDP]);
338 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
339 match_criteria, flow_context,
344 ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
347 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
350 ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
351 if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
352 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
354 MLX5_SET(dest_format_struct, dest, destination_id,
355 tirn[MLX5E_TT_IPV4_TCP]);
356 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
357 match_criteria, flow_context,
362 ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
365 ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
366 if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
367 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
369 MLX5_SET(dest_format_struct, dest, destination_id,
370 tirn[MLX5E_TT_IPV6_TCP]);
371 err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
372 match_criteria, flow_context,
377 ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
383 mlx5e_del_eth_addr_from_flow_table(priv, ai);
388 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
389 struct mlx5e_eth_addr_info *ai, int type)
395 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
396 MLX5_ST_SZ_BYTES(dest_format_struct));
397 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
398 if (!flow_context || !match_criteria) {
399 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
401 goto add_eth_addr_rule_out;
404 err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
407 netdev_err(priv->netdev, "%s: failed\n", __func__);
409 add_eth_addr_rule_out:
410 kvfree(match_criteria);
411 kvfree(flow_context);
415 enum mlx5e_vlan_rule_type {
416 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
417 MLX5E_VLAN_RULE_TYPE_ANY_VID,
418 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
421 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
422 enum mlx5e_vlan_rule_type rule_type, u16 vid)
424 u8 match_criteria_enable = 0;
432 flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
433 MLX5_ST_SZ_BYTES(dest_format_struct));
434 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
435 if (!flow_context || !match_criteria) {
436 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
438 goto add_vlan_rule_out;
440 match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
441 dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
443 MLX5_SET(flow_context, flow_context, action,
444 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
445 MLX5_SET(flow_context, flow_context, destination_list_size, 1);
446 MLX5_SET(dest_format_struct, dest, destination_type,
447 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
448 MLX5_SET(dest_format_struct, dest, destination_id,
449 mlx5_get_flow_table_id(priv->ft.main));
451 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
452 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
453 outer_headers.vlan_tag);
456 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
457 ft_ix = &priv->vlan.untagged_rule_ft_ix;
459 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
460 ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
461 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
464 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
465 ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
466 MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
468 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
469 outer_headers.first_vid);
470 MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
475 err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
476 match_criteria, flow_context, ft_ix);
478 netdev_err(priv->netdev, "%s: failed\n", __func__);
481 kvfree(match_criteria);
482 kvfree(flow_context);
486 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
487 enum mlx5e_vlan_rule_type rule_type, u16 vid)
490 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
491 mlx5_del_flow_table_entry(priv->ft.vlan,
492 priv->vlan.untagged_rule_ft_ix);
494 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
495 mlx5_del_flow_table_entry(priv->ft.vlan,
496 priv->vlan.any_vlan_rule_ft_ix);
498 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
499 mlx5_del_flow_table_entry(priv->ft.vlan,
500 priv->vlan.active_vlans_ft_ix[vid]);
505 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
507 WARN_ON(!mutex_is_locked(&priv->state_lock));
509 if (priv->vlan.filter_disabled) {
510 priv->vlan.filter_disabled = false;
511 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
512 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
517 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
519 WARN_ON(!mutex_is_locked(&priv->state_lock));
521 if (!priv->vlan.filter_disabled) {
522 priv->vlan.filter_disabled = true;
523 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
524 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
529 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
532 struct mlx5e_priv *priv = netdev_priv(dev);
535 mutex_lock(&priv->state_lock);
537 set_bit(vid, priv->vlan.active_vlans);
538 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
539 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
542 mutex_unlock(&priv->state_lock);
547 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
550 struct mlx5e_priv *priv = netdev_priv(dev);
552 mutex_lock(&priv->state_lock);
554 clear_bit(vid, priv->vlan.active_vlans);
555 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
556 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
558 mutex_unlock(&priv->state_lock);
563 int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
568 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
569 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
575 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
579 if (priv->vlan.filter_disabled) {
580 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
589 void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
593 if (priv->vlan.filter_disabled)
594 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
596 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
598 for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
599 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
602 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
603 for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
604 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
606 static void mlx5e_execute_action(struct mlx5e_priv *priv,
607 struct mlx5e_eth_addr_hash_node *hn)
609 switch (hn->action) {
610 case MLX5E_ACTION_ADD:
611 mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
612 hn->action = MLX5E_ACTION_NONE;
615 case MLX5E_ACTION_DEL:
616 mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
617 mlx5e_del_eth_addr_from_hash(hn);
622 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
624 struct net_device *netdev = priv->netdev;
625 struct netdev_hw_addr *ha;
627 netif_addr_lock_bh(netdev);
629 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
630 priv->netdev->dev_addr);
632 netdev_for_each_uc_addr(ha, netdev)
633 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
635 netdev_for_each_mc_addr(ha, netdev)
636 mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
638 netif_addr_unlock_bh(netdev);
641 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
643 struct mlx5e_eth_addr_hash_node *hn;
644 struct hlist_node *tmp;
647 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
648 mlx5e_execute_action(priv, hn);
650 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
651 mlx5e_execute_action(priv, hn);
654 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
656 struct mlx5e_eth_addr_hash_node *hn;
657 struct hlist_node *tmp;
660 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
661 hn->action = MLX5E_ACTION_DEL;
662 mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
663 hn->action = MLX5E_ACTION_DEL;
665 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
666 mlx5e_sync_netdev_addr(priv);
668 mlx5e_apply_netdev_addr(priv);
671 void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
673 struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
674 struct net_device *ndev = priv->netdev;
676 bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
677 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
678 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
679 bool broadcast_enabled = rx_mode_enable;
681 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
682 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
683 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
684 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
685 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
686 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
689 mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
691 mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
692 if (enable_broadcast)
693 mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
695 mlx5e_handle_netdev_addr(priv);
697 if (disable_broadcast)
698 mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
699 if (disable_allmulti)
700 mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
702 mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
704 ea->promisc_enabled = promisc_enabled;
705 ea->allmulti_enabled = allmulti_enabled;
706 ea->broadcast_enabled = broadcast_enabled;
709 void mlx5e_set_rx_mode_work(struct work_struct *work)
711 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
714 mutex_lock(&priv->state_lock);
715 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
716 mlx5e_set_rx_mode_core(priv);
717 mutex_unlock(&priv->state_lock);
720 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
722 ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
725 static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
727 struct mlx5_flow_table_group *g;
730 g = kcalloc(9, sizeof(*g), GFP_KERNEL);
735 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
736 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
737 outer_headers.ethertype);
738 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
739 outer_headers.ip_protocol);
742 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
743 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
744 outer_headers.ethertype);
749 g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
750 dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
751 outer_headers.dmac_47_16);
752 memset(dmac, 0xff, ETH_ALEN);
753 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
754 outer_headers.ethertype);
755 MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
756 outer_headers.ip_protocol);
759 g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
760 dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
761 outer_headers.dmac_47_16);
762 memset(dmac, 0xff, ETH_ALEN);
763 MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
764 outer_headers.ethertype);
767 g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
768 dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
769 outer_headers.dmac_47_16);
770 memset(dmac, 0xff, ETH_ALEN);
773 g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
774 dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
775 outer_headers.dmac_47_16);
777 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
778 outer_headers.ethertype);
779 MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
780 outer_headers.ip_protocol);
783 g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
784 dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
785 outer_headers.dmac_47_16);
787 MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
788 outer_headers.ethertype);
791 g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
792 dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
793 outer_headers.dmac_47_16);
795 priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
796 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
800 return priv->ft.main ? 0 : -ENOMEM;
803 static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
805 mlx5_destroy_flow_table(priv->ft.main);
808 static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
810 struct mlx5_flow_table_group *g;
812 g = kcalloc(2, sizeof(*g), GFP_KERNEL);
817 g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
818 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
819 outer_headers.vlan_tag);
820 MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
821 outer_headers.first_vid);
823 /* untagged + any vlan id */
825 g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
826 MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
827 outer_headers.vlan_tag);
829 priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
830 MLX5_FLOW_TABLE_TYPE_NIC_RCV,
834 return priv->ft.vlan ? 0 : -ENOMEM;
837 static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
839 mlx5_destroy_flow_table(priv->ft.vlan);
842 int mlx5e_open_flow_table(struct mlx5e_priv *priv)
846 err = mlx5e_create_main_flow_table(priv);
850 err = mlx5e_create_vlan_flow_table(priv);
852 goto err_destroy_main_flow_table;
856 err_destroy_main_flow_table:
857 mlx5e_destroy_main_flow_table(priv);
862 void mlx5e_close_flow_table(struct mlx5e_priv *priv)
864 mlx5e_destroy_vlan_flow_table(priv);
865 mlx5e_destroy_main_flow_table(priv);