2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
41 static struct mlx5_flow_rule *
42 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
44 struct mlx5_flow_destination dest;
45 struct mlx5_flow_rule *flow_rule;
46 int match_header = MLX5_MATCH_MISC_PARAMETERS;
47 u32 *match_v, *match_c;
50 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
51 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
52 if (!match_v || !match_c) {
53 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
54 flow_rule = ERR_PTR(-ENOMEM);
58 misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
59 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
60 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
62 misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
63 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
64 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
66 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
67 dest.vport_num = vport;
69 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, match_header, match_c,
70 match_v, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
72 if (IS_ERR(flow_rule))
73 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
80 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
81 struct mlx5_eswitch_rep *rep)
83 struct mlx5_esw_sq *esw_sq, *tmp;
85 if (esw->mode != SRIOV_OFFLOADS)
88 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
89 mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
90 list_del(&esw_sq->list);
95 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
96 struct mlx5_eswitch_rep *rep,
97 u16 *sqns_array, int sqns_num)
99 struct mlx5_flow_rule *flow_rule;
100 struct mlx5_esw_sq *esw_sq;
105 if (esw->mode != SRIOV_OFFLOADS)
108 vport = rep->vport == 0 ?
109 FDB_UPLINK_VPORT : rep->vport;
111 for (i = 0; i < sqns_num; i++) {
112 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
118 /* Add re-inject rule to the PF/representor sqs */
119 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
122 if (IS_ERR(flow_rule)) {
123 err = PTR_ERR(flow_rule);
127 esw_sq->send_to_vport_rule = flow_rule;
128 list_add(&esw_sq->list, &rep->vport_sqs_list);
133 mlx5_eswitch_sqs2vport_stop(esw, rep);
137 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
139 struct mlx5_flow_destination dest;
140 struct mlx5_flow_rule *flow_rule = NULL;
141 u32 *match_v, *match_c;
144 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
145 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
146 if (!match_v || !match_c) {
147 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
152 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
155 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, 0, match_c, match_v,
156 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest);
157 if (IS_ERR(flow_rule)) {
158 err = PTR_ERR(flow_rule);
159 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
163 esw->fdb_table.offloads.miss_rule = flow_rule;
170 #define MAX_PF_SQ 256
172 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
174 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
175 struct mlx5_core_dev *dev = esw->dev;
176 struct mlx5_flow_namespace *root_ns;
177 struct mlx5_flow_table *fdb = NULL;
178 struct mlx5_flow_group *g;
180 void *match_criteria;
181 int table_size, ix, err = 0;
183 flow_group_in = mlx5_vzalloc(inlen);
187 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
189 esw_warn(dev, "Failed to get FDB flow namespace\n");
193 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
194 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
196 table_size = nvports + MAX_PF_SQ + 1;
197 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
200 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
203 esw->fdb_table.fdb = fdb;
205 /* create send-to-vport group */
206 memset(flow_group_in, 0, inlen);
207 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
208 MLX5_MATCH_MISC_PARAMETERS);
210 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
212 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
213 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
215 ix = nvports + MAX_PF_SQ;
216 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
217 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
219 g = mlx5_create_flow_group(fdb, flow_group_in);
222 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
225 esw->fdb_table.offloads.send_to_vport_grp = g;
227 /* create miss group */
228 memset(flow_group_in, 0, inlen);
229 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
231 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
232 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
234 g = mlx5_create_flow_group(fdb, flow_group_in);
237 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
240 esw->fdb_table.offloads.miss_grp = g;
242 err = esw_add_fdb_miss_rule(esw);
249 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
251 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
253 mlx5_destroy_flow_table(fdb);
256 kvfree(flow_group_in);
260 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
262 if (!esw->fdb_table.fdb)
265 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
266 mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
267 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
268 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
270 mlx5_destroy_flow_table(esw->fdb_table.fdb);
273 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
275 struct mlx5_flow_namespace *ns;
276 struct mlx5_flow_table *ft_offloads;
277 struct mlx5_core_dev *dev = esw->dev;
280 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
282 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
286 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
287 if (IS_ERR(ft_offloads)) {
288 err = PTR_ERR(ft_offloads);
289 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
293 esw->offloads.ft_offloads = ft_offloads;
297 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
299 struct mlx5_esw_offload *offloads = &esw->offloads;
301 mlx5_destroy_flow_table(offloads->ft_offloads);
304 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
306 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
307 struct mlx5_flow_group *g;
308 struct mlx5_priv *priv = &esw->dev->priv;
310 void *match_criteria, *misc;
312 int nvports = priv->sriov.num_vfs + 2;
314 flow_group_in = mlx5_vzalloc(inlen);
318 /* create vport rx group */
319 memset(flow_group_in, 0, inlen);
320 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
321 MLX5_MATCH_MISC_PARAMETERS);
323 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
324 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
327 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
328 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
330 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
334 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
338 esw->offloads.vport_rx_group = g;
340 kfree(flow_group_in);
344 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
346 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
349 struct mlx5_flow_rule *
350 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
352 struct mlx5_flow_destination dest;
353 struct mlx5_flow_rule *flow_rule;
354 int match_header = MLX5_MATCH_MISC_PARAMETERS;
355 u32 *match_v, *match_c;
358 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
359 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
360 if (!match_v || !match_c) {
361 esw_warn(esw->dev, "Failed to alloc match parameters\n");
362 flow_rule = ERR_PTR(-ENOMEM);
366 misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
367 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
369 misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
370 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
372 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
375 flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, match_header, match_c,
376 match_v, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
378 if (IS_ERR(flow_rule)) {
379 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
389 static int esw_offloads_start(struct mlx5_eswitch *esw)
391 int err, num_vfs = esw->dev->priv.sriov.num_vfs;
393 if (esw->mode != SRIOV_LEGACY) {
394 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
398 mlx5_eswitch_disable_sriov(esw);
399 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
401 esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
405 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
407 struct mlx5_eswitch_rep *rep;
411 err = esw_create_offloads_fdb_table(esw, nvports);
415 err = esw_create_offloads_table(esw);
419 err = esw_create_vport_rx_group(esw);
423 for (vport = 0; vport < nvports; vport++) {
424 rep = &esw->offloads.vport_reps[vport];
428 err = rep->load(esw, rep);
435 for (vport--; vport >= 0; vport--) {
436 rep = &esw->offloads.vport_reps[vport];
439 rep->unload(esw, rep);
441 esw_destroy_vport_rx_group(esw);
444 esw_destroy_offloads_table(esw);
447 esw_destroy_offloads_fdb_table(esw);
451 static int esw_offloads_stop(struct mlx5_eswitch *esw)
453 int err, num_vfs = esw->dev->priv.sriov.num_vfs;
455 mlx5_eswitch_disable_sriov(esw);
456 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
458 esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
463 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
465 struct mlx5_eswitch_rep *rep;
468 for (vport = 0; vport < nvports; vport++) {
469 rep = &esw->offloads.vport_reps[vport];
472 rep->unload(esw, rep);
475 esw_destroy_vport_rx_group(esw);
476 esw_destroy_offloads_table(esw);
477 esw_destroy_offloads_fdb_table(esw);
480 static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
483 case DEVLINK_ESWITCH_MODE_LEGACY:
484 *mlx5_mode = SRIOV_LEGACY;
486 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
487 *mlx5_mode = SRIOV_OFFLOADS;
496 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
498 struct mlx5_core_dev *dev;
499 u16 cur_mlx5_mode, mlx5_mode = 0;
501 dev = devlink_priv(devlink);
503 if (!MLX5_CAP_GEN(dev, vport_group_manager))
506 cur_mlx5_mode = dev->priv.eswitch->mode;
508 if (cur_mlx5_mode == SRIOV_NONE)
511 if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
514 if (cur_mlx5_mode == mlx5_mode)
517 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
518 return esw_offloads_start(dev->priv.eswitch);
519 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
520 return esw_offloads_stop(dev->priv.eswitch);
525 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
527 struct mlx5_core_dev *dev;
529 dev = devlink_priv(devlink);
531 if (!MLX5_CAP_GEN(dev, vport_group_manager))
534 if (dev->priv.eswitch->mode == SRIOV_NONE)
537 *mode = dev->priv.eswitch->mode;
542 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
543 struct mlx5_eswitch_rep *rep)
545 struct mlx5_esw_offload *offloads = &esw->offloads;
547 memcpy(&offloads->vport_reps[rep->vport], rep,
548 sizeof(struct mlx5_eswitch_rep));
550 INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
551 offloads->vport_reps[rep->vport].valid = true;
554 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
557 struct mlx5_esw_offload *offloads = &esw->offloads;
558 struct mlx5_eswitch_rep *rep;
560 rep = &offloads->vport_reps[vport];
562 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
563 rep->unload(esw, rep);
565 offloads->vport_reps[vport].valid = false;