]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Handle matching on vlan priority for offloaded TC rules
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include "en.h"
44 #include "en_tc.h"
45 #include "eswitch.h"
46
47 struct mlx5e_tc_flow {
48         struct rhash_head       node;
49         u64                     cookie;
50         struct mlx5_flow_rule   *rule;
51         struct mlx5_esw_flow_attr *attr;
52 };
53
54 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
55 #define MLX5E_TC_TABLE_NUM_GROUPS 4
56
57 static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
58                                                     struct mlx5_flow_spec *spec,
59                                                     u32 action, u32 flow_tag)
60 {
61         struct mlx5_core_dev *dev = priv->mdev;
62         struct mlx5_flow_destination dest = { 0 };
63         struct mlx5_fc *counter = NULL;
64         struct mlx5_flow_rule *rule;
65         bool table_created = false;
66
67         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
68                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
69                 dest.ft = priv->fs.vlan.ft.t;
70         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
71                 counter = mlx5_fc_create(dev, true);
72                 if (IS_ERR(counter))
73                         return ERR_CAST(counter);
74
75                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76                 dest.counter = counter;
77         }
78
79         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
80                 priv->fs.tc.t =
81                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
82                                                             MLX5E_TC_PRIO,
83                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
84                                                             MLX5E_TC_TABLE_NUM_GROUPS,
85                                                             0);
86                 if (IS_ERR(priv->fs.tc.t)) {
87                         netdev_err(priv->netdev,
88                                    "Failed to create tc offload table\n");
89                         rule = ERR_CAST(priv->fs.tc.t);
90                         goto err_create_ft;
91                 }
92
93                 table_created = true;
94         }
95
96         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
97         rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
98                                   action, flow_tag,
99                                   &dest);
100
101         if (IS_ERR(rule))
102                 goto err_add_rule;
103
104         return rule;
105
106 err_add_rule:
107         if (table_created) {
108                 mlx5_destroy_flow_table(priv->fs.tc.t);
109                 priv->fs.tc.t = NULL;
110         }
111 err_create_ft:
112         mlx5_fc_destroy(dev, counter);
113
114         return rule;
115 }
116
117 static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
118                                                     struct mlx5_flow_spec *spec,
119                                                     struct mlx5_esw_flow_attr *attr)
120 {
121         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
122         int err;
123
124         err = mlx5_eswitch_add_vlan_action(esw, attr);
125         if (err)
126                 return ERR_PTR(err);
127
128         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
129 }
130
131 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
132                               struct mlx5_flow_rule *rule,
133                               struct mlx5_esw_flow_attr *attr)
134 {
135         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
136         struct mlx5_fc *counter = NULL;
137
138         counter = mlx5_flow_rule_counter(rule);
139
140         if (esw && esw->mode == SRIOV_OFFLOADS)
141                 mlx5_eswitch_del_vlan_action(esw, attr);
142
143         mlx5_del_flow_rule(rule);
144
145         mlx5_fc_destroy(priv->mdev, counter);
146
147         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148                 mlx5_destroy_flow_table(priv->fs.tc.t);
149                 priv->fs.tc.t = NULL;
150         }
151 }
152
153 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
154                             struct tc_cls_flower_offload *f)
155 {
156         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
157                                        outer_headers);
158         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
159                                        outer_headers);
160         u16 addr_type = 0;
161         u8 ip_proto = 0;
162
163         if (f->dissector->used_keys &
164             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
165               BIT(FLOW_DISSECTOR_KEY_BASIC) |
166               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
167               BIT(FLOW_DISSECTOR_KEY_VLAN) |
168               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
169               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
170               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
171                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
172                             f->dissector->used_keys);
173                 return -EOPNOTSUPP;
174         }
175
176         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
177                 struct flow_dissector_key_control *key =
178                         skb_flow_dissector_target(f->dissector,
179                                                   FLOW_DISSECTOR_KEY_CONTROL,
180                                                   f->key);
181                 addr_type = key->addr_type;
182         }
183
184         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
185                 struct flow_dissector_key_basic *key =
186                         skb_flow_dissector_target(f->dissector,
187                                                   FLOW_DISSECTOR_KEY_BASIC,
188                                                   f->key);
189                 struct flow_dissector_key_basic *mask =
190                         skb_flow_dissector_target(f->dissector,
191                                                   FLOW_DISSECTOR_KEY_BASIC,
192                                                   f->mask);
193                 ip_proto = key->ip_proto;
194
195                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
196                          ntohs(mask->n_proto));
197                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
198                          ntohs(key->n_proto));
199
200                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
201                          mask->ip_proto);
202                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
203                          key->ip_proto);
204         }
205
206         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
207                 struct flow_dissector_key_eth_addrs *key =
208                         skb_flow_dissector_target(f->dissector,
209                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
210                                                   f->key);
211                 struct flow_dissector_key_eth_addrs *mask =
212                         skb_flow_dissector_target(f->dissector,
213                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
214                                                   f->mask);
215
216                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
217                                              dmac_47_16),
218                                 mask->dst);
219                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
220                                              dmac_47_16),
221                                 key->dst);
222
223                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
224                                              smac_47_16),
225                                 mask->src);
226                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
227                                              smac_47_16),
228                                 key->src);
229         }
230
231         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
232                 struct flow_dissector_key_vlan *key =
233                         skb_flow_dissector_target(f->dissector,
234                                                   FLOW_DISSECTOR_KEY_VLAN,
235                                                   f->key);
236                 struct flow_dissector_key_vlan *mask =
237                         skb_flow_dissector_target(f->dissector,
238                                                   FLOW_DISSECTOR_KEY_VLAN,
239                                                   f->mask);
240                 if (mask->vlan_id || mask->vlan_priority) {
241                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
242                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
243
244                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
245                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
246
247                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
248                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
249                 }
250         }
251
252         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
253                 struct flow_dissector_key_ipv4_addrs *key =
254                         skb_flow_dissector_target(f->dissector,
255                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
256                                                   f->key);
257                 struct flow_dissector_key_ipv4_addrs *mask =
258                         skb_flow_dissector_target(f->dissector,
259                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
260                                                   f->mask);
261
262                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
263                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
264                        &mask->src, sizeof(mask->src));
265                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
266                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
267                        &key->src, sizeof(key->src));
268                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
269                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
270                        &mask->dst, sizeof(mask->dst));
271                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
272                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
273                        &key->dst, sizeof(key->dst));
274         }
275
276         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
277                 struct flow_dissector_key_ipv6_addrs *key =
278                         skb_flow_dissector_target(f->dissector,
279                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
280                                                   f->key);
281                 struct flow_dissector_key_ipv6_addrs *mask =
282                         skb_flow_dissector_target(f->dissector,
283                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
284                                                   f->mask);
285
286                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
287                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
288                        &mask->src, sizeof(mask->src));
289                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
290                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
291                        &key->src, sizeof(key->src));
292
293                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
294                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
295                        &mask->dst, sizeof(mask->dst));
296                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
297                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
298                        &key->dst, sizeof(key->dst));
299         }
300
301         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
302                 struct flow_dissector_key_ports *key =
303                         skb_flow_dissector_target(f->dissector,
304                                                   FLOW_DISSECTOR_KEY_PORTS,
305                                                   f->key);
306                 struct flow_dissector_key_ports *mask =
307                         skb_flow_dissector_target(f->dissector,
308                                                   FLOW_DISSECTOR_KEY_PORTS,
309                                                   f->mask);
310                 switch (ip_proto) {
311                 case IPPROTO_TCP:
312                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
313                                  tcp_sport, ntohs(mask->src));
314                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
315                                  tcp_sport, ntohs(key->src));
316
317                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
318                                  tcp_dport, ntohs(mask->dst));
319                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
320                                  tcp_dport, ntohs(key->dst));
321                         break;
322
323                 case IPPROTO_UDP:
324                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
325                                  udp_sport, ntohs(mask->src));
326                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
327                                  udp_sport, ntohs(key->src));
328
329                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
330                                  udp_dport, ntohs(mask->dst));
331                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
332                                  udp_dport, ntohs(key->dst));
333                         break;
334                 default:
335                         netdev_err(priv->netdev,
336                                    "Only UDP and TCP transport are supported\n");
337                         return -EINVAL;
338                 }
339         }
340
341         return 0;
342 }
343
344 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
345                                 u32 *action, u32 *flow_tag)
346 {
347         const struct tc_action *a;
348         LIST_HEAD(actions);
349
350         if (tc_no_actions(exts))
351                 return -EINVAL;
352
353         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
354         *action = 0;
355
356         tcf_exts_to_list(exts, &actions);
357         list_for_each_entry(a, &actions, list) {
358                 /* Only support a single action per rule */
359                 if (*action)
360                         return -EINVAL;
361
362                 if (is_tcf_gact_shot(a)) {
363                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
364                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
365                                                flow_table_properties_nic_receive.flow_counter))
366                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
367                         continue;
368                 }
369
370                 if (is_tcf_skbedit_mark(a)) {
371                         u32 mark = tcf_skbedit_mark(a);
372
373                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
374                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
375                                             mark);
376                                 return -EINVAL;
377                         }
378
379                         *flow_tag = mark;
380                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
381                         continue;
382                 }
383
384                 return -EINVAL;
385         }
386
387         return 0;
388 }
389
390 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
391                                 struct mlx5_esw_flow_attr *attr)
392 {
393         const struct tc_action *a;
394         LIST_HEAD(actions);
395
396         if (tc_no_actions(exts))
397                 return -EINVAL;
398
399         memset(attr, 0, sizeof(*attr));
400         attr->in_rep = priv->ppriv;
401
402         tcf_exts_to_list(exts, &actions);
403         list_for_each_entry(a, &actions, list) {
404                 if (is_tcf_gact_shot(a)) {
405                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
406                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
407                         continue;
408                 }
409
410                 if (is_tcf_mirred_redirect(a)) {
411                         int ifindex = tcf_mirred_ifindex(a);
412                         struct net_device *out_dev;
413                         struct mlx5e_priv *out_priv;
414
415                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
416
417                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
418                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
419                                        priv->netdev->name, out_dev->name);
420                                 return -EINVAL;
421                         }
422
423                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
424                         out_priv = netdev_priv(out_dev);
425                         attr->out_rep = out_priv->ppriv;
426                         continue;
427                 }
428
429                 if (is_tcf_vlan(a)) {
430                         if (tcf_vlan_action(a) == VLAN_F_POP) {
431                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
432                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
433                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
434                                         return -EOPNOTSUPP;
435
436                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
437                                 attr->vlan = tcf_vlan_push_vid(a);
438                         }
439                         continue;
440                 }
441
442                 return -EINVAL;
443         }
444         return 0;
445 }
446
447 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
448                            struct tc_cls_flower_offload *f)
449 {
450         struct mlx5e_tc_table *tc = &priv->fs.tc;
451         int err = 0;
452         bool fdb_flow = false;
453         u32 flow_tag, action;
454         struct mlx5e_tc_flow *flow;
455         struct mlx5_flow_spec *spec;
456         struct mlx5_flow_rule *old = NULL;
457         struct mlx5_esw_flow_attr *old_attr = NULL;
458         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
459
460         if (esw && esw->mode == SRIOV_OFFLOADS)
461                 fdb_flow = true;
462
463         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
464                                       tc->ht_params);
465         if (flow) {
466                 old = flow->rule;
467                 old_attr = flow->attr;
468         } else {
469                 if (fdb_flow)
470                         flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
471                                        GFP_KERNEL);
472                 else
473                         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
474         }
475
476         spec = mlx5_vzalloc(sizeof(*spec));
477         if (!spec || !flow) {
478                 err = -ENOMEM;
479                 goto err_free;
480         }
481
482         flow->cookie = f->cookie;
483
484         err = parse_cls_flower(priv, spec, f);
485         if (err < 0)
486                 goto err_free;
487
488         if (fdb_flow) {
489                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
490                 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
491                 if (err < 0)
492                         goto err_free;
493                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
494         } else {
495                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
496                 if (err < 0)
497                         goto err_free;
498                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
499         }
500
501         if (IS_ERR(flow->rule)) {
502                 err = PTR_ERR(flow->rule);
503                 goto err_free;
504         }
505
506         err = rhashtable_insert_fast(&tc->ht, &flow->node,
507                                      tc->ht_params);
508         if (err)
509                 goto err_del_rule;
510
511         if (old)
512                 mlx5e_tc_del_flow(priv, old, old_attr);
513
514         goto out;
515
516 err_del_rule:
517         mlx5_del_flow_rule(flow->rule);
518
519 err_free:
520         if (!old)
521                 kfree(flow);
522 out:
523         kvfree(spec);
524         return err;
525 }
526
527 int mlx5e_delete_flower(struct mlx5e_priv *priv,
528                         struct tc_cls_flower_offload *f)
529 {
530         struct mlx5e_tc_flow *flow;
531         struct mlx5e_tc_table *tc = &priv->fs.tc;
532
533         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
534                                       tc->ht_params);
535         if (!flow)
536                 return -EINVAL;
537
538         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
539
540         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
541
542         kfree(flow);
543
544         return 0;
545 }
546
547 int mlx5e_stats_flower(struct mlx5e_priv *priv,
548                        struct tc_cls_flower_offload *f)
549 {
550         struct mlx5e_tc_table *tc = &priv->fs.tc;
551         struct mlx5e_tc_flow *flow;
552         struct tc_action *a;
553         struct mlx5_fc *counter;
554         LIST_HEAD(actions);
555         u64 bytes;
556         u64 packets;
557         u64 lastuse;
558
559         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
560                                       tc->ht_params);
561         if (!flow)
562                 return -EINVAL;
563
564         counter = mlx5_flow_rule_counter(flow->rule);
565         if (!counter)
566                 return 0;
567
568         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
569
570         tcf_exts_to_list(f->exts, &actions);
571         list_for_each_entry(a, &actions, list)
572                 tcf_action_stats_update(a, bytes, packets, lastuse);
573
574         return 0;
575 }
576
577 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
578         .head_offset = offsetof(struct mlx5e_tc_flow, node),
579         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
580         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
581         .automatic_shrinking = true,
582 };
583
584 int mlx5e_tc_init(struct mlx5e_priv *priv)
585 {
586         struct mlx5e_tc_table *tc = &priv->fs.tc;
587
588         tc->ht_params = mlx5e_tc_flow_ht_params;
589         return rhashtable_init(&tc->ht, &tc->ht_params);
590 }
591
592 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
593 {
594         struct mlx5e_tc_flow *flow = ptr;
595         struct mlx5e_priv *priv = arg;
596
597         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
598         kfree(flow);
599 }
600
601 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
602 {
603         struct mlx5e_tc_table *tc = &priv->fs.tc;
604
605         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
606
607         if (!IS_ERR_OR_NULL(tc->t)) {
608                 mlx5_destroy_flow_table(tc->t);
609                 tc->t = NULL;
610         }
611 }