2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
41 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
42 struct mlx5_flow_table *ft)
44 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
45 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47 MLX5_SET(set_flow_table_root_in, in, opcode,
48 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
49 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
50 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
52 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
53 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
56 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
59 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
61 enum fs_flow_table_type type, unsigned int level,
62 unsigned int log_size, struct mlx5_flow_table
63 *next_ft, unsigned int *table_id)
65 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
66 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
69 MLX5_SET(create_flow_table_in, in, opcode,
70 MLX5_CMD_OP_CREATE_FLOW_TABLE);
73 MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
74 MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
76 MLX5_SET(create_flow_table_in, in, table_type, type);
77 MLX5_SET(create_flow_table_in, in, level, level);
78 MLX5_SET(create_flow_table_in, in, log_size, log_size);
80 MLX5_SET(create_flow_table_in, in, vport_number, vport);
81 MLX5_SET(create_flow_table_in, in, other_vport, 1);
84 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
86 *table_id = MLX5_GET(create_flow_table_out, out,
91 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
92 struct mlx5_flow_table *ft)
94 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
95 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
97 MLX5_SET(destroy_flow_table_in, in, opcode,
98 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
99 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
100 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
102 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
103 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
106 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
109 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
110 struct mlx5_flow_table *ft,
111 struct mlx5_flow_table *next_ft)
113 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
114 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
116 MLX5_SET(modify_flow_table_in, in, opcode,
117 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
118 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
119 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
121 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
122 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
124 MLX5_SET(modify_flow_table_in, in, modify_field_select,
125 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
127 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
128 MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id);
130 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
133 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
136 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
137 struct mlx5_flow_table *ft,
139 unsigned int *group_id)
141 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
142 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
145 MLX5_SET(create_flow_group_in, in, opcode,
146 MLX5_CMD_OP_CREATE_FLOW_GROUP);
147 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
148 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
150 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
151 MLX5_SET(create_flow_group_in, in, other_vport, 1);
154 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
156 *group_id = MLX5_GET(create_flow_group_out, out,
161 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
162 struct mlx5_flow_table *ft,
163 unsigned int group_id)
165 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
166 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
168 MLX5_SET(destroy_flow_group_in, in, opcode,
169 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
170 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
171 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
172 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
174 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
175 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
178 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
181 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
182 int opmod, int modify_mask,
183 struct mlx5_flow_table *ft,
187 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
188 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
189 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
190 struct mlx5_flow_rule *dst;
191 void *in_flow_context;
192 void *in_match_value;
197 in = mlx5_vzalloc(inlen);
199 mlx5_core_warn(dev, "failed to allocate inbox\n");
203 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
204 MLX5_SET(set_fte_in, in, op_mod, opmod);
205 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
206 MLX5_SET(set_fte_in, in, table_type, ft->type);
207 MLX5_SET(set_fte_in, in, table_id, ft->id);
208 MLX5_SET(set_fte_in, in, flow_index, fte->index);
210 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
211 MLX5_SET(set_fte_in, in, other_vport, 1);
214 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
215 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
216 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
217 MLX5_SET(flow_context, in_flow_context, action, fte->action);
218 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
220 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
222 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
223 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
226 list_for_each_entry(dst, &fte->node.children, node.list) {
229 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
232 MLX5_SET(dest_format_struct, in_dests, destination_type,
233 dst->dest_attr.type);
234 if (dst->dest_attr.type ==
235 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
236 id = dst->dest_attr.ft->id;
238 id = dst->dest_attr.tir_num;
240 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
241 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
245 MLX5_SET(flow_context, in_flow_context, destination_list_size,
249 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
252 list_for_each_entry(dst, &fte->node.children, node.list) {
253 if (dst->dest_attr.type !=
254 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
257 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
258 dst->dest_attr.counter->id);
259 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
263 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
267 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
272 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
273 struct mlx5_flow_table *ft,
277 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
280 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
281 struct mlx5_flow_table *ft,
287 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
288 flow_table_properties_nic_receive.
294 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
297 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
298 struct mlx5_flow_table *ft,
301 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
302 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
304 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
305 MLX5_SET(delete_fte_in, in, table_type, ft->type);
306 MLX5_SET(delete_fte_in, in, table_id, ft->id);
307 MLX5_SET(delete_fte_in, in, flow_index, index);
309 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
310 MLX5_SET(delete_fte_in, in, other_vport, 1);
313 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
316 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
318 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
319 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
322 MLX5_SET(alloc_flow_counter_in, in, opcode,
323 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
325 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
327 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
331 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
333 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
334 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
336 MLX5_SET(dealloc_flow_counter_in, in, opcode,
337 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
338 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
339 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
342 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
343 u64 *packets, u64 *bytes)
345 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
346 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
347 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
351 MLX5_SET(query_flow_counter_in, in, opcode,
352 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
353 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
354 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
355 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
359 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
360 *packets = MLX5_GET64(traffic_counter, stats, packets);
361 *bytes = MLX5_GET64(traffic_counter, stats, octets);
365 struct mlx5_cmd_fc_bulk {
372 struct mlx5_cmd_fc_bulk *
373 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
375 struct mlx5_cmd_fc_bulk *b;
376 int outlen = sizeof(*b) +
377 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
378 MLX5_ST_SZ_BYTES(traffic_counter) * num;
380 b = kzalloc(outlen, GFP_KERNEL);
391 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
397 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
399 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
401 MLX5_SET(query_flow_counter_in, in, opcode,
402 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
403 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
404 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
405 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
406 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
409 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
410 struct mlx5_cmd_fc_bulk *b, u16 id,
411 u64 *packets, u64 *bytes)
413 int index = id - b->id;
416 if (index < 0 || index >= b->num) {
417 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
418 id, b->id, b->id + b->num - 1);
422 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
423 flow_statistics[index]);
424 *packets = MLX5_GET64(traffic_counter, stats, packets);
425 *bytes = MLX5_GET64(traffic_counter, stats, octets);
428 #define MAX_ENCAP_SIZE (128)
430 int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
436 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
437 u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
438 (MAX_ENCAP_SIZE / sizeof(u32))];
439 void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
441 void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
443 int inlen = header - (void *)in + size;
446 if (size > MAX_ENCAP_SIZE)
449 memset(in, 0, inlen);
450 MLX5_SET(alloc_encap_header_in, in, opcode,
451 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
452 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
453 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
454 memcpy(header, encap_header, size);
456 memset(out, 0, sizeof(out));
457 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
459 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
463 void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
465 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
466 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
468 memset(in, 0, sizeof(in));
469 MLX5_SET(dealloc_encap_header_in, in, opcode,
470 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
471 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
473 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));