]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
40
41 #include "spectrum.h"
42 #include "core.h"
43 #include "port.h"
44 #include "reg.h"
45
46 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
47                                                  u8 pool,
48                                                  enum mlxsw_reg_sbxx_dir dir)
49 {
50         return &mlxsw_sp->sb.prs[dir][pool];
51 }
52
53 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
54                                                  u8 local_port, u8 pg_buff,
55                                                  enum mlxsw_reg_sbxx_dir dir)
56 {
57         return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
58 }
59
60 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
61                                                  u8 local_port, u8 pool,
62                                                  enum mlxsw_reg_sbxx_dir dir)
63 {
64         return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
65 }
66
67 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
68                                 enum mlxsw_reg_sbxx_dir dir,
69                                 enum mlxsw_reg_sbpr_mode mode, u32 size)
70 {
71         char sbpr_pl[MLXSW_REG_SBPR_LEN];
72         struct mlxsw_sp_sb_pr *pr;
73         int err;
74
75         mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
76         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
77         if (err)
78                 return err;
79
80         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
81         pr->mode = mode;
82         pr->size = size;
83         return 0;
84 }
85
86 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
87                                 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
88                                 u32 min_buff, u32 max_buff, u8 pool)
89 {
90         char sbcm_pl[MLXSW_REG_SBCM_LEN];
91         int err;
92
93         mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
94                             min_buff, max_buff, pool);
95         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
96         if (err)
97                 return err;
98         if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
99                 struct mlxsw_sp_sb_cm *cm;
100
101                 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
102                 cm->min_buff = min_buff;
103                 cm->max_buff = max_buff;
104                 cm->pool = pool;
105         }
106         return 0;
107 }
108
109 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
110                                 u8 pool, enum mlxsw_reg_sbxx_dir dir,
111                                 u32 min_buff, u32 max_buff)
112 {
113         char sbpm_pl[MLXSW_REG_SBPM_LEN];
114         struct mlxsw_sp_sb_pm *pm;
115         int err;
116
117         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
118                             min_buff, max_buff);
119         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
120         if (err)
121                 return err;
122
123         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
124         pm->min_buff = min_buff;
125         pm->max_buff = max_buff;
126         return 0;
127 }
128
129 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
130                                     u8 pool, enum mlxsw_reg_sbxx_dir dir,
131                                     struct list_head *bulk_list)
132 {
133         char sbpm_pl[MLXSW_REG_SBPM_LEN];
134
135         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
136         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
137                                      bulk_list, NULL, 0);
138 }
139
140 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
141                                         char *sbpm_pl, size_t sbpm_pl_len,
142                                         unsigned long cb_priv)
143 {
144         struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
145
146         mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
147 }
148
149 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
150                                     u8 pool, enum mlxsw_reg_sbxx_dir dir,
151                                     struct list_head *bulk_list)
152 {
153         char sbpm_pl[MLXSW_REG_SBPM_LEN];
154         struct mlxsw_sp_sb_pm *pm;
155
156         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
157         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
158         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
159                                      bulk_list,
160                                      mlxsw_sp_sb_pm_occ_query_cb,
161                                      (unsigned long) pm);
162 }
163
164 static const u16 mlxsw_sp_pbs[] = {
165         [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
166         [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
167 };
168
169 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
170 #define MLXSW_SP_PB_UNUSED 8
171
172 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
173 {
174         char pbmc_pl[MLXSW_REG_PBMC_LEN];
175         int i;
176
177         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
178                             0xffff, 0xffff / 2);
179         for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
180                 if (i == MLXSW_SP_PB_UNUSED)
181                         continue;
182                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
183         }
184         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
185                                          MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
186         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
187                                MLXSW_REG(pbmc), pbmc_pl);
188 }
189
190 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
191 {
192         char pptb_pl[MLXSW_REG_PPTB_LEN];
193         int i;
194
195         mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
196         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
197                 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
198         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
199                                pptb_pl);
200 }
201
202 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204         int err;
205
206         err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
207         if (err)
208                 return err;
209         return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
210 }
211
212 #define MLXSW_SP_SB_PR_INGRESS_SIZE                             \
213         (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
214 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
215 #define MLXSW_SP_SB_PR_EGRESS_SIZE                              \
216         (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
217
218 #define MLXSW_SP_SB_PR(_mode, _size)    \
219         {                               \
220                 .mode = _mode,          \
221                 .size = _size,          \
222         }
223
224 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
225         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
226                        MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
227         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
228         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
229         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
230                        MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
231 };
232
233 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
234
235 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
236         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
237                        MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
238         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
239         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
240         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
241 };
242
243 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
244
245 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
246                                   enum mlxsw_reg_sbxx_dir dir,
247                                   const struct mlxsw_sp_sb_pr *prs,
248                                   size_t prs_len)
249 {
250         int i;
251         int err;
252
253         for (i = 0; i < prs_len; i++) {
254                 const struct mlxsw_sp_sb_pr *pr;
255
256                 pr = &prs[i];
257                 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
258                                            pr->mode, pr->size);
259                 if (err)
260                         return err;
261         }
262         return 0;
263 }
264
265 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
266 {
267         int err;
268
269         err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
270                                      mlxsw_sp_sb_prs_ingress,
271                                      MLXSW_SP_SB_PRS_INGRESS_LEN);
272         if (err)
273                 return err;
274         return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
275                                       mlxsw_sp_sb_prs_egress,
276                                       MLXSW_SP_SB_PRS_EGRESS_LEN);
277 }
278
279 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)     \
280         {                                               \
281                 .min_buff = _min_buff,                  \
282                 .max_buff = _max_buff,                  \
283                 .pool = _pool,                          \
284         }
285
286 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
287         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
288         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
289         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
290         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
291         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
292         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
293         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
294         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
295         MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
296         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
297 };
298
299 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
300
301 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
302         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
303         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
304         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
305         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
306         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
307         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
308         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
309         MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
310         MLXSW_SP_SB_CM(0, 0, 0),
311         MLXSW_SP_SB_CM(0, 0, 0),
312         MLXSW_SP_SB_CM(0, 0, 0),
313         MLXSW_SP_SB_CM(0, 0, 0),
314         MLXSW_SP_SB_CM(0, 0, 0),
315         MLXSW_SP_SB_CM(0, 0, 0),
316         MLXSW_SP_SB_CM(0, 0, 0),
317         MLXSW_SP_SB_CM(0, 0, 0),
318         MLXSW_SP_SB_CM(1, 0xff, 0),
319 };
320
321 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
322
323 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
324
325 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
326         MLXSW_SP_CPU_PORT_SB_CM,
327         MLXSW_SP_CPU_PORT_SB_CM,
328         MLXSW_SP_CPU_PORT_SB_CM,
329         MLXSW_SP_CPU_PORT_SB_CM,
330         MLXSW_SP_CPU_PORT_SB_CM,
331         MLXSW_SP_CPU_PORT_SB_CM,
332         MLXSW_SP_CPU_PORT_SB_CM,
333         MLXSW_SP_CPU_PORT_SB_CM,
334         MLXSW_SP_CPU_PORT_SB_CM,
335         MLXSW_SP_CPU_PORT_SB_CM,
336         MLXSW_SP_CPU_PORT_SB_CM,
337         MLXSW_SP_CPU_PORT_SB_CM,
338         MLXSW_SP_CPU_PORT_SB_CM,
339         MLXSW_SP_CPU_PORT_SB_CM,
340         MLXSW_SP_CPU_PORT_SB_CM,
341         MLXSW_SP_CPU_PORT_SB_CM,
342         MLXSW_SP_CPU_PORT_SB_CM,
343         MLXSW_SP_CPU_PORT_SB_CM,
344         MLXSW_SP_CPU_PORT_SB_CM,
345         MLXSW_SP_CPU_PORT_SB_CM,
346         MLXSW_SP_CPU_PORT_SB_CM,
347         MLXSW_SP_CPU_PORT_SB_CM,
348         MLXSW_SP_CPU_PORT_SB_CM,
349         MLXSW_SP_CPU_PORT_SB_CM,
350         MLXSW_SP_CPU_PORT_SB_CM,
351         MLXSW_SP_CPU_PORT_SB_CM,
352         MLXSW_SP_CPU_PORT_SB_CM,
353         MLXSW_SP_CPU_PORT_SB_CM,
354         MLXSW_SP_CPU_PORT_SB_CM,
355         MLXSW_SP_CPU_PORT_SB_CM,
356         MLXSW_SP_CPU_PORT_SB_CM,
357         MLXSW_SP_CPU_PORT_SB_CM,
358 };
359
360 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
361         ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
362
363 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
364                                   enum mlxsw_reg_sbxx_dir dir,
365                                   const struct mlxsw_sp_sb_cm *cms,
366                                   size_t cms_len)
367 {
368         int i;
369         int err;
370
371         for (i = 0; i < cms_len; i++) {
372                 const struct mlxsw_sp_sb_cm *cm;
373
374                 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
375                         continue; /* PG number 8 does not exist, skip it */
376                 cm = &cms[i];
377                 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
378                                            cm->min_buff, cm->max_buff,
379                                            cm->pool);
380                 if (err)
381                         return err;
382         }
383         return 0;
384 }
385
386 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
387 {
388         int err;
389
390         err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
391                                      mlxsw_sp_port->local_port,
392                                      MLXSW_REG_SBXX_DIR_INGRESS,
393                                      mlxsw_sp_sb_cms_ingress,
394                                      MLXSW_SP_SB_CMS_INGRESS_LEN);
395         if (err)
396                 return err;
397         return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
398                                       mlxsw_sp_port->local_port,
399                                       MLXSW_REG_SBXX_DIR_EGRESS,
400                                       mlxsw_sp_sb_cms_egress,
401                                       MLXSW_SP_SB_CMS_EGRESS_LEN);
402 }
403
404 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
405 {
406         return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
407                                       mlxsw_sp_cpu_port_sb_cms,
408                                       MLXSW_SP_CPU_PORT_SB_MCS_LEN);
409 }
410
411 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
412         {                                       \
413                 .min_buff = _min_buff,          \
414                 .max_buff = _max_buff,          \
415         }
416
417 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
418         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
419         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
420         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
421         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
422 };
423
424 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
425
426 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
427         MLXSW_SP_SB_PM(0, 7),
428         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
429         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
430         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
431 };
432
433 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
434
435 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
436                                        enum mlxsw_reg_sbxx_dir dir,
437                                        const struct mlxsw_sp_sb_pm *pms,
438                                        size_t pms_len)
439 {
440         int i;
441         int err;
442
443         for (i = 0; i < pms_len; i++) {
444                 const struct mlxsw_sp_sb_pm *pm;
445
446                 pm = &pms[i];
447                 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
448                                            pm->min_buff, pm->max_buff);
449                 if (err)
450                         return err;
451         }
452         return 0;
453 }
454
455 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
456 {
457         int err;
458
459         err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
460                                           mlxsw_sp_port->local_port,
461                                           MLXSW_REG_SBXX_DIR_INGRESS,
462                                           mlxsw_sp_sb_pms_ingress,
463                                           MLXSW_SP_SB_PMS_INGRESS_LEN);
464         if (err)
465                 return err;
466         return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
467                                            mlxsw_sp_port->local_port,
468                                            MLXSW_REG_SBXX_DIR_EGRESS,
469                                            mlxsw_sp_sb_pms_egress,
470                                            MLXSW_SP_SB_PMS_EGRESS_LEN);
471 }
472
473 struct mlxsw_sp_sb_mm {
474         u32 min_buff;
475         u32 max_buff;
476         u8 pool;
477 };
478
479 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)     \
480         {                                               \
481                 .min_buff = _min_buff,                  \
482                 .max_buff = _max_buff,                  \
483                 .pool = _pool,                          \
484         }
485
486 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
487         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
488         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
489         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
490         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
491         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
492         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
493         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
494         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
495         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
496         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
497         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
498         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
499         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
500         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
501         MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
502 };
503
504 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
505
506 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
507 {
508         char sbmm_pl[MLXSW_REG_SBMM_LEN];
509         int i;
510         int err;
511
512         for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
513                 const struct mlxsw_sp_sb_mm *mc;
514
515                 mc = &mlxsw_sp_sb_mms[i];
516                 mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
517                                     mc->max_buff, mc->pool);
518                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
519                 if (err)
520                         return err;
521         }
522         return 0;
523 }
524
525 #define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
526
527 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
528 {
529         int err;
530
531         err = mlxsw_sp_sb_prs_init(mlxsw_sp);
532         if (err)
533                 return err;
534         err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
535         if (err)
536                 return err;
537         err = mlxsw_sp_sb_mms_init(mlxsw_sp);
538         if (err)
539                 return err;
540         return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
541                                    MLXSW_SP_SB_SIZE,
542                                    MLXSW_SP_SB_POOL_COUNT,
543                                    MLXSW_SP_SB_POOL_COUNT,
544                                    MLXSW_SP_SB_TC_COUNT,
545                                    MLXSW_SP_SB_TC_COUNT);
546 }
547
548 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
549 {
550         devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
551 }
552
553 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
554 {
555         int err;
556
557         err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
558         if (err)
559                 return err;
560         err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
561         if (err)
562                 return err;
563         err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
564
565         return err;
566 }
567
568 static u8 pool_get(u16 pool_index)
569 {
570         return pool_index % MLXSW_SP_SB_POOL_COUNT;
571 }
572
573 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
574 {
575         u16 pool_index;
576
577         pool_index = pool;
578         if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
579                 pool_index += MLXSW_SP_SB_POOL_COUNT;
580         return pool_index;
581 }
582
583 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
584 {
585         return pool_index < MLXSW_SP_SB_POOL_COUNT ?
586                MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
587 }
588
589 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
590                          unsigned int sb_index, u16 pool_index,
591                          struct devlink_sb_pool_info *pool_info)
592 {
593         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
594         u8 pool = pool_get(pool_index);
595         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
596         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
597
598         pool_info->pool_type = dir;
599         pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
600         pool_info->threshold_type = pr->mode;
601         return 0;
602 }
603
604 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
605                          unsigned int sb_index, u16 pool_index, u32 size,
606                          enum devlink_sb_threshold_type threshold_type)
607 {
608         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
609         u8 pool = pool_get(pool_index);
610         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
611         enum mlxsw_reg_sbpr_mode mode = threshold_type;
612         u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
613
614         return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
615 }
616
617 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
618
619 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
620                                      enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
621 {
622         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
623
624         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
625                 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
626         return MLXSW_SP_CELLS_TO_BYTES(max_buff);
627 }
628
629 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
630                                     enum mlxsw_reg_sbxx_dir dir, u32 threshold,
631                                     u32 *p_max_buff)
632 {
633         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
634
635         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
636                 int val;
637
638                 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
639                 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
640                     val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
641                         return -EINVAL;
642                 *p_max_buff = val;
643         } else {
644                 *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
645         }
646         return 0;
647 }
648
649 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
650                               unsigned int sb_index, u16 pool_index,
651                               u32 *p_threshold)
652 {
653         struct mlxsw_sp_port *mlxsw_sp_port =
654                         mlxsw_core_port_driver_priv(mlxsw_core_port);
655         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
656         u8 local_port = mlxsw_sp_port->local_port;
657         u8 pool = pool_get(pool_index);
658         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
659         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
660                                                        pool, dir);
661
662         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
663                                                  pm->max_buff);
664         return 0;
665 }
666
667 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
668                               unsigned int sb_index, u16 pool_index,
669                               u32 threshold)
670 {
671         struct mlxsw_sp_port *mlxsw_sp_port =
672                         mlxsw_core_port_driver_priv(mlxsw_core_port);
673         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
674         u8 local_port = mlxsw_sp_port->local_port;
675         u8 pool = pool_get(pool_index);
676         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
677         u32 max_buff;
678         int err;
679
680         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
681                                        threshold, &max_buff);
682         if (err)
683                 return err;
684
685         return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
686                                     0, max_buff);
687 }
688
689 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
690                                  unsigned int sb_index, u16 tc_index,
691                                  enum devlink_sb_pool_type pool_type,
692                                  u16 *p_pool_index, u32 *p_threshold)
693 {
694         struct mlxsw_sp_port *mlxsw_sp_port =
695                         mlxsw_core_port_driver_priv(mlxsw_core_port);
696         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
697         u8 local_port = mlxsw_sp_port->local_port;
698         u8 pg_buff = tc_index;
699         enum mlxsw_reg_sbxx_dir dir = pool_type;
700         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
701                                                        pg_buff, dir);
702
703         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
704                                                  cm->max_buff);
705         *p_pool_index = pool_index_get(cm->pool, pool_type);
706         return 0;
707 }
708
709 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
710                                  unsigned int sb_index, u16 tc_index,
711                                  enum devlink_sb_pool_type pool_type,
712                                  u16 pool_index, u32 threshold)
713 {
714         struct mlxsw_sp_port *mlxsw_sp_port =
715                         mlxsw_core_port_driver_priv(mlxsw_core_port);
716         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
717         u8 local_port = mlxsw_sp_port->local_port;
718         u8 pg_buff = tc_index;
719         enum mlxsw_reg_sbxx_dir dir = pool_type;
720         u8 pool = pool_index;
721         u32 max_buff;
722         int err;
723
724         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
725                                        threshold, &max_buff);
726         if (err)
727                 return err;
728
729         if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
730                 if (pool < MLXSW_SP_SB_POOL_COUNT)
731                         return -EINVAL;
732                 pool -= MLXSW_SP_SB_POOL_COUNT;
733         } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
734                 return -EINVAL;
735         }
736         return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
737                                     0, max_buff, pool);
738 }
739
740 #define MASKED_COUNT_MAX \
741         (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
742
743 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
744         u8 masked_count;
745         u8 local_port_1;
746 };
747
748 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
749                                         char *sbsr_pl, size_t sbsr_pl_len,
750                                         unsigned long cb_priv)
751 {
752         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
753         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
754         u8 masked_count;
755         u8 local_port;
756         int rec_index = 0;
757         struct mlxsw_sp_sb_cm *cm;
758         int i;
759
760         memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
761
762         masked_count = 0;
763         for (local_port = cb_ctx.local_port_1;
764              local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
765                 if (!mlxsw_sp->ports[local_port])
766                         continue;
767                 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
768                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
769                                                 MLXSW_REG_SBXX_DIR_INGRESS);
770                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
771                                                   &cm->occ.cur, &cm->occ.max);
772                 }
773                 if (++masked_count == cb_ctx.masked_count)
774                         break;
775         }
776         masked_count = 0;
777         for (local_port = cb_ctx.local_port_1;
778              local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
779                 if (!mlxsw_sp->ports[local_port])
780                         continue;
781                 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
782                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
783                                                 MLXSW_REG_SBXX_DIR_EGRESS);
784                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
785                                                   &cm->occ.cur, &cm->occ.max);
786                 }
787                 if (++masked_count == cb_ctx.masked_count)
788                         break;
789         }
790 }
791
792 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
793                              unsigned int sb_index)
794 {
795         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
796         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
797         unsigned long cb_priv;
798         LIST_HEAD(bulk_list);
799         char *sbsr_pl;
800         u8 masked_count;
801         u8 local_port_1;
802         u8 local_port = 0;
803         int i;
804         int err;
805         int err2;
806
807         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
808         if (!sbsr_pl)
809                 return -ENOMEM;
810
811 next_batch:
812         local_port++;
813         local_port_1 = local_port;
814         masked_count = 0;
815         mlxsw_reg_sbsr_pack(sbsr_pl, false);
816         for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
817                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
818                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
819         }
820         for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
821                 if (!mlxsw_sp->ports[local_port])
822                         continue;
823                 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
824                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
825                 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
826                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
827                                                        MLXSW_REG_SBXX_DIR_INGRESS,
828                                                        &bulk_list);
829                         if (err)
830                                 goto out;
831                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
832                                                        MLXSW_REG_SBXX_DIR_EGRESS,
833                                                        &bulk_list);
834                         if (err)
835                                 goto out;
836                 }
837                 if (++masked_count == MASKED_COUNT_MAX)
838                         goto do_query;
839         }
840
841 do_query:
842         cb_ctx.masked_count = masked_count;
843         cb_ctx.local_port_1 = local_port_1;
844         memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
845         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
846                                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
847                                     cb_priv);
848         if (err)
849                 goto out;
850         if (local_port < MLXSW_PORT_MAX_PORTS)
851                 goto next_batch;
852
853 out:
854         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
855         if (!err)
856                 err = err2;
857         kfree(sbsr_pl);
858         return err;
859 }
860
861 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
862                               unsigned int sb_index)
863 {
864         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
865         LIST_HEAD(bulk_list);
866         char *sbsr_pl;
867         unsigned int masked_count;
868         u8 local_port = 0;
869         int i;
870         int err;
871         int err2;
872
873         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
874         if (!sbsr_pl)
875                 return -ENOMEM;
876
877 next_batch:
878         local_port++;
879         masked_count = 0;
880         mlxsw_reg_sbsr_pack(sbsr_pl, true);
881         for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
882                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
883                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
884         }
885         for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
886                 if (!mlxsw_sp->ports[local_port])
887                         continue;
888                 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
889                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
890                 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
891                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
892                                                        MLXSW_REG_SBXX_DIR_INGRESS,
893                                                        &bulk_list);
894                         if (err)
895                                 goto out;
896                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
897                                                        MLXSW_REG_SBXX_DIR_EGRESS,
898                                                        &bulk_list);
899                         if (err)
900                                 goto out;
901                 }
902                 if (++masked_count == MASKED_COUNT_MAX)
903                         goto do_query;
904         }
905
906 do_query:
907         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
908                                     &bulk_list, NULL, 0);
909         if (err)
910                 goto out;
911         if (local_port < MLXSW_PORT_MAX_PORTS)
912                 goto next_batch;
913
914 out:
915         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
916         if (!err)
917                 err = err2;
918         kfree(sbsr_pl);
919         return err;
920 }
921
922 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
923                                   unsigned int sb_index, u16 pool_index,
924                                   u32 *p_cur, u32 *p_max)
925 {
926         struct mlxsw_sp_port *mlxsw_sp_port =
927                         mlxsw_core_port_driver_priv(mlxsw_core_port);
928         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
929         u8 local_port = mlxsw_sp_port->local_port;
930         u8 pool = pool_get(pool_index);
931         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
932         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
933                                                        pool, dir);
934
935         *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
936         *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
937         return 0;
938 }
939
940 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
941                                      unsigned int sb_index, u16 tc_index,
942                                      enum devlink_sb_pool_type pool_type,
943                                      u32 *p_cur, u32 *p_max)
944 {
945         struct mlxsw_sp_port *mlxsw_sp_port =
946                         mlxsw_core_port_driver_priv(mlxsw_core_port);
947         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
948         u8 local_port = mlxsw_sp_port->local_port;
949         u8 pg_buff = tc_index;
950         enum mlxsw_reg_sbxx_dir dir = pool_type;
951         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
952                                                        pg_buff, dir);
953
954         *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
955         *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
956         return 0;
957 }