]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
net/mlx5e: Add basic TC tunnel set action for SRIOV offloads
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 #include <net/vxlan.h>
45 #include "en.h"
46 #include "en_tc.h"
47 #include "eswitch.h"
48 #include "vxlan.h"
49
50 struct mlx5e_tc_flow {
51         struct rhash_head       node;
52         u64                     cookie;
53         struct mlx5_flow_handle *rule;
54         struct list_head        encap; /* flows sharing the same encap */
55         struct mlx5_esw_flow_attr *attr;
56 };
57
58 enum {
59         MLX5_HEADER_TYPE_VXLAN = 0x0,
60         MLX5_HEADER_TYPE_NVGRE = 0x1,
61 };
62
63 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
64 #define MLX5E_TC_TABLE_NUM_GROUPS 4
65
66 static struct mlx5_flow_handle *
67 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
68                       struct mlx5_flow_spec *spec,
69                       u32 action, u32 flow_tag)
70 {
71         struct mlx5_core_dev *dev = priv->mdev;
72         struct mlx5_flow_destination dest = { 0 };
73         struct mlx5_flow_act flow_act = {
74                 .action = action,
75                 .flow_tag = flow_tag,
76                 .encap_id = 0,
77         };
78         struct mlx5_fc *counter = NULL;
79         struct mlx5_flow_handle *rule;
80         bool table_created = false;
81
82         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
83                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
84                 dest.ft = priv->fs.vlan.ft.t;
85         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
86                 counter = mlx5_fc_create(dev, true);
87                 if (IS_ERR(counter))
88                         return ERR_CAST(counter);
89
90                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
91                 dest.counter = counter;
92         }
93
94         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
95                 priv->fs.tc.t =
96                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
97                                                             MLX5E_TC_PRIO,
98                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
99                                                             MLX5E_TC_TABLE_NUM_GROUPS,
100                                                             0, 0);
101                 if (IS_ERR(priv->fs.tc.t)) {
102                         netdev_err(priv->netdev,
103                                    "Failed to create tc offload table\n");
104                         rule = ERR_CAST(priv->fs.tc.t);
105                         goto err_create_ft;
106                 }
107
108                 table_created = true;
109         }
110
111         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
112         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
113
114         if (IS_ERR(rule))
115                 goto err_add_rule;
116
117         return rule;
118
119 err_add_rule:
120         if (table_created) {
121                 mlx5_destroy_flow_table(priv->fs.tc.t);
122                 priv->fs.tc.t = NULL;
123         }
124 err_create_ft:
125         mlx5_fc_destroy(dev, counter);
126
127         return rule;
128 }
129
130 static struct mlx5_flow_handle *
131 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
132                       struct mlx5_flow_spec *spec,
133                       struct mlx5_esw_flow_attr *attr)
134 {
135         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
136         int err;
137
138         err = mlx5_eswitch_add_vlan_action(esw, attr);
139         if (err)
140                 return ERR_PTR(err);
141
142         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
143 }
144
145 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
146                               struct mlx5_flow_handle *rule,
147                               struct mlx5_esw_flow_attr *attr)
148 {
149         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
150         struct mlx5_fc *counter = NULL;
151
152         counter = mlx5_flow_rule_counter(rule);
153
154         if (esw && esw->mode == SRIOV_OFFLOADS)
155                 mlx5_eswitch_del_vlan_action(esw, attr);
156
157         mlx5_del_flow_rules(rule);
158
159         mlx5_fc_destroy(priv->mdev, counter);
160
161         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
162                 mlx5_destroy_flow_table(priv->fs.tc.t);
163                 priv->fs.tc.t = NULL;
164         }
165 }
166
167 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
168                              struct tc_cls_flower_offload *f)
169 {
170         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
171                                        outer_headers);
172         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
173                                        outer_headers);
174         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
175                                     misc_parameters);
176         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
177                                     misc_parameters);
178
179         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
180         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
181
182         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
183                 struct flow_dissector_key_keyid *key =
184                         skb_flow_dissector_target(f->dissector,
185                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
186                                                   f->key);
187                 struct flow_dissector_key_keyid *mask =
188                         skb_flow_dissector_target(f->dissector,
189                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
190                                                   f->mask);
191                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
192                          be32_to_cpu(mask->keyid));
193                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
194                          be32_to_cpu(key->keyid));
195         }
196 }
197
198 static int parse_tunnel_attr(struct mlx5e_priv *priv,
199                              struct mlx5_flow_spec *spec,
200                              struct tc_cls_flower_offload *f)
201 {
202         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
203                                        outer_headers);
204         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
205                                        outer_headers);
206
207         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
208                 struct flow_dissector_key_ports *key =
209                         skb_flow_dissector_target(f->dissector,
210                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
211                                                   f->key);
212                 struct flow_dissector_key_ports *mask =
213                         skb_flow_dissector_target(f->dissector,
214                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
215                                                   f->mask);
216
217                 /* Full udp dst port must be given */
218                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
219                         return -EOPNOTSUPP;
220
221                 /* udp src port isn't supported */
222                 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
223                         return -EOPNOTSUPP;
224
225                 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
226                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
227                         parse_vxlan_attr(spec, f);
228                 else
229                         return -EOPNOTSUPP;
230
231                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
232                          udp_dport, ntohs(mask->dst));
233                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
234                          udp_dport, ntohs(key->dst));
235
236         } else { /* udp dst port must be given */
237                         return -EOPNOTSUPP;
238         }
239
240         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
241                 struct flow_dissector_key_ipv4_addrs *key =
242                         skb_flow_dissector_target(f->dissector,
243                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
244                                                   f->key);
245                 struct flow_dissector_key_ipv4_addrs *mask =
246                         skb_flow_dissector_target(f->dissector,
247                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
248                                                   f->mask);
249                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
251                          ntohl(mask->src));
252                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
254                          ntohl(key->src));
255
256                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
258                          ntohl(mask->dst));
259                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
260                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
261                          ntohl(key->dst));
262         }
263
264         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
265         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
266
267         /* Enforce DMAC when offloading incoming tunneled flows.
268          * Flow counters require a match on the DMAC.
269          */
270         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
271         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
272         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
273                                      dmac_47_16), priv->netdev->dev_addr);
274
275         /* let software handle IP fragments */
276         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
277         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
278
279         return 0;
280 }
281
282 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
283                             struct tc_cls_flower_offload *f)
284 {
285         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
286                                        outer_headers);
287         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
288                                        outer_headers);
289         u16 addr_type = 0;
290         u8 ip_proto = 0;
291
292         if (f->dissector->used_keys &
293             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
294               BIT(FLOW_DISSECTOR_KEY_BASIC) |
295               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
296               BIT(FLOW_DISSECTOR_KEY_VLAN) |
297               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
298               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
299               BIT(FLOW_DISSECTOR_KEY_PORTS) |
300               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
301               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
302               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
303               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
304               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
305                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
306                             f->dissector->used_keys);
307                 return -EOPNOTSUPP;
308         }
309
310         if ((dissector_uses_key(f->dissector,
311                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
312              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
313              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
314             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
315                 struct flow_dissector_key_control *key =
316                         skb_flow_dissector_target(f->dissector,
317                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
318                                                   f->key);
319                 switch (key->addr_type) {
320                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
321                         if (parse_tunnel_attr(priv, spec, f))
322                                 return -EOPNOTSUPP;
323                         break;
324                 default:
325                         return -EOPNOTSUPP;
326                 }
327
328                 /* In decap flow, header pointers should point to the inner
329                  * headers, outer header were already set by parse_tunnel_attr
330                  */
331                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
332                                          inner_headers);
333                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
334                                          inner_headers);
335         }
336
337         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
338                 struct flow_dissector_key_control *key =
339                         skb_flow_dissector_target(f->dissector,
340                                                   FLOW_DISSECTOR_KEY_CONTROL,
341                                                   f->key);
342                 addr_type = key->addr_type;
343         }
344
345         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
346                 struct flow_dissector_key_basic *key =
347                         skb_flow_dissector_target(f->dissector,
348                                                   FLOW_DISSECTOR_KEY_BASIC,
349                                                   f->key);
350                 struct flow_dissector_key_basic *mask =
351                         skb_flow_dissector_target(f->dissector,
352                                                   FLOW_DISSECTOR_KEY_BASIC,
353                                                   f->mask);
354                 ip_proto = key->ip_proto;
355
356                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
357                          ntohs(mask->n_proto));
358                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
359                          ntohs(key->n_proto));
360
361                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
362                          mask->ip_proto);
363                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
364                          key->ip_proto);
365         }
366
367         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
368                 struct flow_dissector_key_eth_addrs *key =
369                         skb_flow_dissector_target(f->dissector,
370                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
371                                                   f->key);
372                 struct flow_dissector_key_eth_addrs *mask =
373                         skb_flow_dissector_target(f->dissector,
374                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
375                                                   f->mask);
376
377                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
378                                              dmac_47_16),
379                                 mask->dst);
380                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
381                                              dmac_47_16),
382                                 key->dst);
383
384                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
385                                              smac_47_16),
386                                 mask->src);
387                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
388                                              smac_47_16),
389                                 key->src);
390         }
391
392         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
393                 struct flow_dissector_key_vlan *key =
394                         skb_flow_dissector_target(f->dissector,
395                                                   FLOW_DISSECTOR_KEY_VLAN,
396                                                   f->key);
397                 struct flow_dissector_key_vlan *mask =
398                         skb_flow_dissector_target(f->dissector,
399                                                   FLOW_DISSECTOR_KEY_VLAN,
400                                                   f->mask);
401                 if (mask->vlan_id) {
402                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
403                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
404
405                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
406                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
407                 }
408         }
409
410         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
411                 struct flow_dissector_key_ipv4_addrs *key =
412                         skb_flow_dissector_target(f->dissector,
413                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
414                                                   f->key);
415                 struct flow_dissector_key_ipv4_addrs *mask =
416                         skb_flow_dissector_target(f->dissector,
417                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
418                                                   f->mask);
419
420                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
421                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
422                        &mask->src, sizeof(mask->src));
423                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
424                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
425                        &key->src, sizeof(key->src));
426                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
427                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
428                        &mask->dst, sizeof(mask->dst));
429                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
430                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
431                        &key->dst, sizeof(key->dst));
432         }
433
434         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
435                 struct flow_dissector_key_ipv6_addrs *key =
436                         skb_flow_dissector_target(f->dissector,
437                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
438                                                   f->key);
439                 struct flow_dissector_key_ipv6_addrs *mask =
440                         skb_flow_dissector_target(f->dissector,
441                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
442                                                   f->mask);
443
444                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
445                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
446                        &mask->src, sizeof(mask->src));
447                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
448                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
449                        &key->src, sizeof(key->src));
450
451                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
452                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
453                        &mask->dst, sizeof(mask->dst));
454                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
455                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
456                        &key->dst, sizeof(key->dst));
457         }
458
459         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
460                 struct flow_dissector_key_ports *key =
461                         skb_flow_dissector_target(f->dissector,
462                                                   FLOW_DISSECTOR_KEY_PORTS,
463                                                   f->key);
464                 struct flow_dissector_key_ports *mask =
465                         skb_flow_dissector_target(f->dissector,
466                                                   FLOW_DISSECTOR_KEY_PORTS,
467                                                   f->mask);
468                 switch (ip_proto) {
469                 case IPPROTO_TCP:
470                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
471                                  tcp_sport, ntohs(mask->src));
472                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
473                                  tcp_sport, ntohs(key->src));
474
475                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
476                                  tcp_dport, ntohs(mask->dst));
477                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
478                                  tcp_dport, ntohs(key->dst));
479                         break;
480
481                 case IPPROTO_UDP:
482                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
483                                  udp_sport, ntohs(mask->src));
484                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
485                                  udp_sport, ntohs(key->src));
486
487                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
488                                  udp_dport, ntohs(mask->dst));
489                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
490                                  udp_dport, ntohs(key->dst));
491                         break;
492                 default:
493                         netdev_err(priv->netdev,
494                                    "Only UDP and TCP transport are supported\n");
495                         return -EINVAL;
496                 }
497         }
498
499         return 0;
500 }
501
502 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
503                                 u32 *action, u32 *flow_tag)
504 {
505         const struct tc_action *a;
506         LIST_HEAD(actions);
507
508         if (tc_no_actions(exts))
509                 return -EINVAL;
510
511         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
512         *action = 0;
513
514         tcf_exts_to_list(exts, &actions);
515         list_for_each_entry(a, &actions, list) {
516                 /* Only support a single action per rule */
517                 if (*action)
518                         return -EINVAL;
519
520                 if (is_tcf_gact_shot(a)) {
521                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
522                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
523                                                flow_table_properties_nic_receive.flow_counter))
524                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
525                         continue;
526                 }
527
528                 if (is_tcf_skbedit_mark(a)) {
529                         u32 mark = tcf_skbedit_mark(a);
530
531                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
532                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
533                                             mark);
534                                 return -EINVAL;
535                         }
536
537                         *flow_tag = mark;
538                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
539                         continue;
540                 }
541
542                 return -EINVAL;
543         }
544
545         return 0;
546 }
547
548 static inline int cmp_encap_info(struct mlx5_encap_info *a,
549                                  struct mlx5_encap_info *b)
550 {
551         return memcmp(a, b, sizeof(*a));
552 }
553
554 static inline int hash_encap_info(struct mlx5_encap_info *info)
555 {
556         return jhash(info, sizeof(*info), 0);
557 }
558
559 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
560                                    struct net_device *mirred_dev,
561                                    struct net_device **out_dev,
562                                    struct flowi4 *fl4,
563                                    struct neighbour **out_n,
564                                    __be32 *saddr,
565                                    int *out_ttl)
566 {
567         struct rtable *rt;
568         struct neighbour *n = NULL;
569         int ttl;
570
571 #if IS_ENABLED(CONFIG_INET)
572         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
573         if (IS_ERR(rt)) {
574                 pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
575                 return -EOPNOTSUPP;
576         }
577 #else
578         return -EOPNOTSUPP;
579 #endif
580
581         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
582                 pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
583                         __func__);
584                 ip_rt_put(rt);
585                 return -EOPNOTSUPP;
586         }
587
588         ttl = ip4_dst_hoplimit(&rt->dst);
589         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
590         ip_rt_put(rt);
591         if (!n)
592                 return -ENOMEM;
593
594         *out_n = n;
595         *saddr = fl4->saddr;
596         *out_ttl = ttl;
597         *out_dev = rt->dst.dev;
598
599         return 0;
600 }
601
602 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
603                                  char buf[],
604                                  unsigned char h_dest[ETH_ALEN],
605                                  int ttl,
606                                  __be32 daddr,
607                                  __be32 saddr,
608                                  __be16 udp_dst_port,
609                                  __be32 vx_vni)
610 {
611         int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
612         struct ethhdr *eth = (struct ethhdr *)buf;
613         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
614         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
615         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
616
617         memset(buf, 0, encap_size);
618
619         ether_addr_copy(eth->h_dest, h_dest);
620         ether_addr_copy(eth->h_source, out_dev->dev_addr);
621         eth->h_proto = htons(ETH_P_IP);
622
623         ip->daddr = daddr;
624         ip->saddr = saddr;
625
626         ip->ttl = ttl;
627         ip->protocol = IPPROTO_UDP;
628         ip->version = 0x4;
629         ip->ihl = 0x5;
630
631         udp->dest = udp_dst_port;
632         vxh->vx_flags = VXLAN_HF_VNI;
633         vxh->vx_vni = vxlan_vni_field(vx_vni);
634
635         return encap_size;
636 }
637
638 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
639                                           struct net_device *mirred_dev,
640                                           struct mlx5_encap_entry *e,
641                                           struct net_device **out_dev)
642 {
643         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
644         struct flowi4 fl4 = {};
645         struct neighbour *n;
646         char *encap_header;
647         int encap_size;
648         __be32 saddr;
649         int ttl;
650         int err;
651
652         encap_header = kzalloc(max_encap_size, GFP_KERNEL);
653         if (!encap_header)
654                 return -ENOMEM;
655
656         switch (e->tunnel_type) {
657         case MLX5_HEADER_TYPE_VXLAN:
658                 fl4.flowi4_proto = IPPROTO_UDP;
659                 fl4.fl4_dport = e->tun_info.tp_dst;
660                 break;
661         default:
662                 err = -EOPNOTSUPP;
663                 goto out;
664         }
665         fl4.daddr = e->tun_info.daddr;
666
667         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
668                                       &fl4, &n, &saddr, &ttl);
669         if (err)
670                 goto out;
671
672         e->n = n;
673         e->out_dev = *out_dev;
674
675         if (!(n->nud_state & NUD_VALID)) {
676                 err = -ENOTSUPP;
677                 goto out;
678         }
679
680         neigh_ha_snapshot(e->h_dest, n, *out_dev);
681
682         switch (e->tunnel_type) {
683         case MLX5_HEADER_TYPE_VXLAN:
684                 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
685                                                    e->h_dest, ttl,
686                                                    e->tun_info.daddr,
687                                                    saddr, e->tun_info.tp_dst,
688                                                    e->tun_info.tun_id);
689                 break;
690         default:
691                 err = -EOPNOTSUPP;
692                 goto out;
693         }
694
695         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
696                                encap_size, encap_header, &e->encap_id);
697 out:
698         kfree(encap_header);
699         return err;
700 }
701
702 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
703                               struct ip_tunnel_info *tun_info,
704                               struct net_device *mirred_dev,
705                               struct mlx5_esw_flow_attr *attr)
706 {
707         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
708         unsigned short family = ip_tunnel_info_af(tun_info);
709         struct ip_tunnel_key *key = &tun_info->key;
710         struct mlx5_encap_info info;
711         struct mlx5_encap_entry *e;
712         struct net_device *out_dev;
713         uintptr_t hash_key;
714         bool found = false;
715         int tunnel_type;
716         int err;
717
718         /* udp dst port must be given */
719         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
720                 return -EOPNOTSUPP;
721
722         if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
723             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
724                 info.tp_dst = key->tp_dst;
725                 info.tun_id = tunnel_id_to_key32(key->tun_id);
726                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
727         } else {
728                 return -EOPNOTSUPP;
729         }
730
731         switch (family) {
732         case AF_INET:
733                 info.daddr = key->u.ipv4.dst;
734                 break;
735         default:
736                 return -EOPNOTSUPP;
737         }
738
739         hash_key = hash_encap_info(&info);
740
741         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
742                                    encap_hlist, hash_key) {
743                 if (!cmp_encap_info(&e->tun_info, &info)) {
744                         found = true;
745                         break;
746                 }
747         }
748
749         if (found) {
750                 attr->encap = e;
751                 return 0;
752         }
753
754         e = kzalloc(sizeof(*e), GFP_KERNEL);
755         if (!e)
756                 return -ENOMEM;
757
758         e->tun_info = info;
759         e->tunnel_type = tunnel_type;
760         INIT_LIST_HEAD(&e->flows);
761
762         err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
763         if (err)
764                 goto out_err;
765
766         attr->encap = e;
767         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
768
769         return err;
770
771 out_err:
772         kfree(e);
773         return err;
774 }
775
776 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
777                                 struct mlx5e_tc_flow *flow)
778 {
779         struct mlx5_esw_flow_attr *attr = flow->attr;
780         struct ip_tunnel_info *info = NULL;
781         const struct tc_action *a;
782         LIST_HEAD(actions);
783         bool encap = false;
784         int err;
785
786         if (tc_no_actions(exts))
787                 return -EINVAL;
788
789         memset(attr, 0, sizeof(*attr));
790         attr->in_rep = priv->ppriv;
791
792         tcf_exts_to_list(exts, &actions);
793         list_for_each_entry(a, &actions, list) {
794                 if (is_tcf_gact_shot(a)) {
795                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
796                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
797                         continue;
798                 }
799
800                 if (is_tcf_mirred_egress_redirect(a)) {
801                         int ifindex = tcf_mirred_ifindex(a);
802                         struct net_device *out_dev;
803                         struct mlx5e_priv *out_priv;
804
805                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
806
807                         if (switchdev_port_same_parent_id(priv->netdev,
808                                                           out_dev)) {
809                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
810                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
811                                 out_priv = netdev_priv(out_dev);
812                                 attr->out_rep = out_priv->ppriv;
813                         } else if (encap) {
814                                 err = mlx5e_attach_encap(priv, info,
815                                                          out_dev, attr);
816                                 if (err)
817                                         return err;
818                                 list_add(&flow->encap, &attr->encap->flows);
819                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
820                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
821                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
822                                 out_priv = netdev_priv(attr->encap->out_dev);
823                                 attr->out_rep = out_priv->ppriv;
824                         } else {
825                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
826                                        priv->netdev->name, out_dev->name);
827                                 return -EINVAL;
828                         }
829                         continue;
830                 }
831
832                 if (is_tcf_tunnel_set(a)) {
833                         info = tcf_tunnel_info(a);
834                         if (info)
835                                 encap = true;
836                         else
837                                 return -EOPNOTSUPP;
838                         continue;
839                 }
840
841                 if (is_tcf_vlan(a)) {
842                         if (tcf_vlan_action(a) == VLAN_F_POP) {
843                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
844                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
845                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
846                                         return -EOPNOTSUPP;
847
848                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
849                                 attr->vlan = tcf_vlan_push_vid(a);
850                         }
851                         continue;
852                 }
853
854                 if (is_tcf_tunnel_release(a)) {
855                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
856                         continue;
857                 }
858
859                 return -EINVAL;
860         }
861         return 0;
862 }
863
864 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
865                            struct tc_cls_flower_offload *f)
866 {
867         struct mlx5e_tc_table *tc = &priv->fs.tc;
868         int err = 0;
869         bool fdb_flow = false;
870         u32 flow_tag, action;
871         struct mlx5e_tc_flow *flow;
872         struct mlx5_flow_spec *spec;
873         struct mlx5_flow_handle *old = NULL;
874         struct mlx5_esw_flow_attr *old_attr = NULL;
875         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
876
877         if (esw && esw->mode == SRIOV_OFFLOADS)
878                 fdb_flow = true;
879
880         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
881                                       tc->ht_params);
882         if (flow) {
883                 old = flow->rule;
884                 old_attr = flow->attr;
885         } else {
886                 if (fdb_flow)
887                         flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
888                                        GFP_KERNEL);
889                 else
890                         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
891         }
892
893         spec = mlx5_vzalloc(sizeof(*spec));
894         if (!spec || !flow) {
895                 err = -ENOMEM;
896                 goto err_free;
897         }
898
899         flow->cookie = f->cookie;
900
901         err = parse_cls_flower(priv, spec, f);
902         if (err < 0)
903                 goto err_free;
904
905         if (fdb_flow) {
906                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
907                 err = parse_tc_fdb_actions(priv, f->exts, flow);
908                 if (err < 0)
909                         goto err_free;
910                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
911         } else {
912                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
913                 if (err < 0)
914                         goto err_free;
915                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
916         }
917
918         if (IS_ERR(flow->rule)) {
919                 err = PTR_ERR(flow->rule);
920                 goto err_free;
921         }
922
923         err = rhashtable_insert_fast(&tc->ht, &flow->node,
924                                      tc->ht_params);
925         if (err)
926                 goto err_del_rule;
927
928         if (old)
929                 mlx5e_tc_del_flow(priv, old, old_attr);
930
931         goto out;
932
933 err_del_rule:
934         mlx5_del_flow_rules(flow->rule);
935
936 err_free:
937         if (!old)
938                 kfree(flow);
939 out:
940         kvfree(spec);
941         return err;
942 }
943
944 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
945                                struct mlx5e_tc_flow *flow) {
946         struct list_head *next = flow->encap.next;
947
948         list_del(&flow->encap);
949         if (list_empty(next)) {
950                 struct mlx5_encap_entry *e;
951
952                 e = list_entry(next, struct mlx5_encap_entry, flows);
953                 if (e->n) {
954                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
955                         neigh_release(e->n);
956                 }
957                 hlist_del_rcu(&e->encap_hlist);
958                 kfree(e);
959         }
960 }
961
962 int mlx5e_delete_flower(struct mlx5e_priv *priv,
963                         struct tc_cls_flower_offload *f)
964 {
965         struct mlx5e_tc_flow *flow;
966         struct mlx5e_tc_table *tc = &priv->fs.tc;
967
968         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
969                                       tc->ht_params);
970         if (!flow)
971                 return -EINVAL;
972
973         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
974
975         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
976
977         if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
978                 mlx5e_detach_encap(priv, flow);
979
980         kfree(flow);
981
982         return 0;
983 }
984
985 int mlx5e_stats_flower(struct mlx5e_priv *priv,
986                        struct tc_cls_flower_offload *f)
987 {
988         struct mlx5e_tc_table *tc = &priv->fs.tc;
989         struct mlx5e_tc_flow *flow;
990         struct tc_action *a;
991         struct mlx5_fc *counter;
992         LIST_HEAD(actions);
993         u64 bytes;
994         u64 packets;
995         u64 lastuse;
996
997         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
998                                       tc->ht_params);
999         if (!flow)
1000                 return -EINVAL;
1001
1002         counter = mlx5_flow_rule_counter(flow->rule);
1003         if (!counter)
1004                 return 0;
1005
1006         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1007
1008         tcf_exts_to_list(f->exts, &actions);
1009         list_for_each_entry(a, &actions, list)
1010                 tcf_action_stats_update(a, bytes, packets, lastuse);
1011
1012         return 0;
1013 }
1014
1015 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1016         .head_offset = offsetof(struct mlx5e_tc_flow, node),
1017         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1018         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1019         .automatic_shrinking = true,
1020 };
1021
1022 int mlx5e_tc_init(struct mlx5e_priv *priv)
1023 {
1024         struct mlx5e_tc_table *tc = &priv->fs.tc;
1025
1026         tc->ht_params = mlx5e_tc_flow_ht_params;
1027         return rhashtable_init(&tc->ht, &tc->ht_params);
1028 }
1029
1030 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1031 {
1032         struct mlx5e_tc_flow *flow = ptr;
1033         struct mlx5e_priv *priv = arg;
1034
1035         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
1036         kfree(flow);
1037 }
1038
1039 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1040 {
1041         struct mlx5e_tc_table *tc = &priv->fs.tc;
1042
1043         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1044
1045         if (!IS_ERR_OR_NULL(tc->t)) {
1046                 mlx5_destroy_flow_table(tc->t);
1047                 tc->t = NULL;
1048         }
1049 }