2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
43 struct mlx5e_tc_flow {
44 struct rhash_head node;
46 struct mlx5_flow_rule *rule;
49 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
50 #define MLX5E_TC_TABLE_NUM_GROUPS 4
52 static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53 u32 *match_c, u32 *match_v,
54 u32 action, u32 flow_tag)
56 struct mlx5_core_dev *dev = priv->mdev;
57 struct mlx5_flow_destination dest = { 0 };
58 struct mlx5_fc *counter = NULL;
59 struct mlx5_flow_rule *rule;
60 bool table_created = false;
62 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
63 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
64 dest.ft = priv->fs.vlan.ft.t;
66 counter = mlx5_fc_create(dev, true);
68 return ERR_CAST(counter);
70 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
71 dest.counter = counter;
74 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
76 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
78 MLX5E_TC_TABLE_NUM_ENTRIES,
79 MLX5E_TC_TABLE_NUM_GROUPS,
81 if (IS_ERR(priv->fs.tc.t)) {
82 netdev_err(priv->netdev,
83 "Failed to create tc offload table\n");
84 rule = ERR_CAST(priv->fs.tc.t);
91 rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
103 mlx5_destroy_flow_table(priv->fs.tc.t);
104 priv->fs.tc.t = NULL;
107 mlx5_fc_destroy(dev, counter);
112 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
113 struct mlx5_flow_rule *rule)
115 struct mlx5_fc *counter = NULL;
117 counter = mlx5_flow_rule_counter(rule);
119 mlx5_del_flow_rule(rule);
121 mlx5_fc_destroy(priv->mdev, counter);
123 if (!mlx5e_tc_num_filters(priv)) {
124 mlx5_destroy_flow_table(priv->fs.tc.t);
125 priv->fs.tc.t = NULL;
129 static int parse_cls_flower(struct mlx5e_priv *priv,
130 u32 *match_c, u32 *match_v,
131 struct tc_cls_flower_offload *f)
133 void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
138 if (f->dissector->used_keys &
139 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
140 BIT(FLOW_DISSECTOR_KEY_BASIC) |
141 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
142 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
143 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
144 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
145 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
146 f->dissector->used_keys);
150 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
151 struct flow_dissector_key_control *key =
152 skb_flow_dissector_target(f->dissector,
153 FLOW_DISSECTOR_KEY_BASIC,
155 addr_type = key->addr_type;
158 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
159 struct flow_dissector_key_basic *key =
160 skb_flow_dissector_target(f->dissector,
161 FLOW_DISSECTOR_KEY_BASIC,
163 struct flow_dissector_key_basic *mask =
164 skb_flow_dissector_target(f->dissector,
165 FLOW_DISSECTOR_KEY_BASIC,
167 ip_proto = key->ip_proto;
169 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
170 ntohs(mask->n_proto));
171 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
172 ntohs(key->n_proto));
174 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
176 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
180 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
181 struct flow_dissector_key_eth_addrs *key =
182 skb_flow_dissector_target(f->dissector,
183 FLOW_DISSECTOR_KEY_ETH_ADDRS,
185 struct flow_dissector_key_eth_addrs *mask =
186 skb_flow_dissector_target(f->dissector,
187 FLOW_DISSECTOR_KEY_ETH_ADDRS,
190 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
193 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
197 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
200 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
205 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
206 struct flow_dissector_key_ipv4_addrs *key =
207 skb_flow_dissector_target(f->dissector,
208 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
210 struct flow_dissector_key_ipv4_addrs *mask =
211 skb_flow_dissector_target(f->dissector,
212 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
215 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
216 src_ipv4_src_ipv6.ipv4_layout.ipv4),
217 &mask->src, sizeof(mask->src));
218 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
219 src_ipv4_src_ipv6.ipv4_layout.ipv4),
220 &key->src, sizeof(key->src));
221 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
222 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
223 &mask->dst, sizeof(mask->dst));
224 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
225 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
226 &key->dst, sizeof(key->dst));
229 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
230 struct flow_dissector_key_ipv6_addrs *key =
231 skb_flow_dissector_target(f->dissector,
232 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
234 struct flow_dissector_key_ipv6_addrs *mask =
235 skb_flow_dissector_target(f->dissector,
236 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
239 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
240 src_ipv4_src_ipv6.ipv6_layout.ipv6),
241 &mask->src, sizeof(mask->src));
242 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
243 src_ipv4_src_ipv6.ipv6_layout.ipv6),
244 &key->src, sizeof(key->src));
246 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
247 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
248 &mask->dst, sizeof(mask->dst));
249 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
250 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
251 &key->dst, sizeof(key->dst));
254 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
255 struct flow_dissector_key_ports *key =
256 skb_flow_dissector_target(f->dissector,
257 FLOW_DISSECTOR_KEY_PORTS,
259 struct flow_dissector_key_ports *mask =
260 skb_flow_dissector_target(f->dissector,
261 FLOW_DISSECTOR_KEY_PORTS,
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 tcp_sport, ntohs(mask->src));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 tcp_sport, ntohs(key->src));
270 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
271 tcp_dport, ntohs(mask->dst));
272 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
273 tcp_dport, ntohs(key->dst));
277 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
278 udp_sport, ntohs(mask->src));
279 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
280 udp_sport, ntohs(key->src));
282 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
283 udp_dport, ntohs(mask->dst));
284 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
285 udp_dport, ntohs(key->dst));
288 netdev_err(priv->netdev,
289 "Only UDP and TCP transport are supported\n");
297 static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
298 u32 *action, u32 *flow_tag)
300 const struct tc_action *a;
302 if (tc_no_actions(exts))
305 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
308 tc_for_each_action(a, exts) {
309 /* Only support a single action per rule */
313 if (is_tcf_gact_shot(a)) {
314 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
315 if (MLX5_CAP_FLOWTABLE(priv->mdev,
316 flow_table_properties_nic_receive.flow_counter))
317 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
321 if (is_tcf_skbedit_mark(a)) {
322 u32 mark = tcf_skbedit_mark(a);
324 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
325 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
331 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
341 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
342 struct tc_cls_flower_offload *f)
344 struct mlx5e_tc_table *tc = &priv->fs.tc;
350 struct mlx5e_tc_flow *flow;
351 struct mlx5_flow_rule *old = NULL;
353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
358 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
360 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
361 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
362 if (!match_c || !match_v || !flow) {
367 flow->cookie = f->cookie;
369 err = parse_cls_flower(priv, match_c, match_v, f);
373 err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
377 err = rhashtable_insert_fast(&tc->ht, &flow->node,
382 flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
384 if (IS_ERR(flow->rule)) {
385 err = PTR_ERR(flow->rule);
390 mlx5e_tc_del_flow(priv, old);
395 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
406 int mlx5e_delete_flower(struct mlx5e_priv *priv,
407 struct tc_cls_flower_offload *f)
409 struct mlx5e_tc_flow *flow;
410 struct mlx5e_tc_table *tc = &priv->fs.tc;
412 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
417 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
419 mlx5e_tc_del_flow(priv, flow->rule);
426 int mlx5e_stats_flower(struct mlx5e_priv *priv,
427 struct tc_cls_flower_offload *f)
429 struct mlx5e_tc_table *tc = &priv->fs.tc;
430 struct mlx5e_tc_flow *flow;
432 struct mlx5_fc *counter;
437 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
442 counter = mlx5_flow_rule_counter(flow->rule);
446 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
448 tc_for_each_action(a, f->exts)
449 tcf_action_stats_update(a, bytes, packets, lastuse);
454 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
455 .head_offset = offsetof(struct mlx5e_tc_flow, node),
456 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
457 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
458 .automatic_shrinking = true,
461 int mlx5e_tc_init(struct mlx5e_priv *priv)
463 struct mlx5e_tc_table *tc = &priv->fs.tc;
465 tc->ht_params = mlx5e_tc_flow_ht_params;
466 return rhashtable_init(&tc->ht, &tc->ht_params);
469 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
471 struct mlx5e_tc_flow *flow = ptr;
472 struct mlx5e_priv *priv = arg;
474 mlx5e_tc_del_flow(priv, flow->rule);
478 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
480 struct mlx5e_tc_table *tc = &priv->fs.tc;
482 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
484 if (!IS_ERR_OR_NULL(tc->t)) {
485 mlx5_destroy_flow_table(tc->t);