2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4 * Copyright (C) 2014 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/skbuff.h>
18 #include <linux/inetdevice.h>
19 #include <linux/mbus.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpumask.h>
24 #include <linux/of_irq.h>
25 #include <linux/of_mdio.h>
26 #include <linux/of_net.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/phy.h>
30 #include <linux/clk.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <uapi/linux/ppp_defs.h>
37 /* RX Fifo Registers */
38 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41 #define MVPP2_RX_FIFO_INIT_REG 0x64
43 /* RX DMA Top Registers */
44 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48 #define MVPP2_POOL_BUF_SIZE_OFFSET 5
49 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52 #define MVPP2_RXQ_POOL_SHORT_OFFS 20
53 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
55 #define MVPP2_RXQ_POOL_LONG_OFFS 24
56 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
58 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60 #define MVPP2_RXQ_DISABLE_MASK BIT(31)
62 /* Parser Registers */
63 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64 #define MVPP2_PRS_PORT_LU_MAX 0xf
65 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73 #define MVPP2_PRS_TCAM_IDX_REG 0x1100
74 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75 #define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76 #define MVPP2_PRS_SRAM_IDX_REG 0x1200
77 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79 #define MVPP2_PRS_TCAM_EN_MASK BIT(0)
81 /* Classifier Registers */
82 #define MVPP2_CLS_MODE_REG 0x1800
83 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84 #define MVPP2_CLS_PORT_WAY_REG 0x1810
85 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86 #define MVPP2_CLS_LKP_INDEX_REG 0x1814
87 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88 #define MVPP2_CLS_LKP_TBL_REG 0x1818
89 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
102 /* Descriptor Manager Top Registers */
103 #define MVPP2_RXQ_NUM_REG 0x2040
104 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044
105 #define MVPP22_DESC_ADDR_OFFS 8
106 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110 #define MVPP2_RXQ_NUM_NEW_OFFSET 16
111 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115 #define MVPP2_RXQ_THRESH_REG 0x204c
116 #define MVPP2_OCCUPIED_THRESH_OFFSET 0
117 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118 #define MVPP2_RXQ_INDEX_REG 0x2050
119 #define MVPP2_TXQ_NUM_REG 0x2080
120 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
123 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
124 #define MVPP2_TXQ_INDEX_REG 0x2098
125 #define MVPP2_TXQ_PREF_BUF_REG 0x209c
126 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
127 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
128 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
129 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
130 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
131 #define MVPP2_TXQ_PENDING_REG 0x20a0
132 #define MVPP2_TXQ_PENDING_MASK 0x3fff
133 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4
134 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
135 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16
136 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
137 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
138 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
139 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
140 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
141 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
142 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16
143 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
144 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
145 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
146 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
147 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
148 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
149 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
151 /* MBUS bridge registers */
152 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
153 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
154 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
155 #define MVPP2_BASE_ADDR_ENABLE 0x4060
157 /* Interrupt Cause and Mask registers */
158 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
159 #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
160 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
161 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
162 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
163 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
164 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
165 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
166 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
167 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
168 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
169 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
170 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
171 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
172 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
173 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
174 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
175 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
176 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
177 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
178 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
180 /* Buffer Manager registers */
181 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
182 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
183 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
184 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0
185 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
186 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
187 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
188 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
189 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
190 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
191 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
192 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
193 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
194 #define MVPP2_BM_START_MASK BIT(0)
195 #define MVPP2_BM_STOP_MASK BIT(1)
196 #define MVPP2_BM_STATE_MASK BIT(4)
197 #define MVPP2_BM_LOW_THRESH_OFFS 8
198 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00
199 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
200 MVPP2_BM_LOW_THRESH_OFFS)
201 #define MVPP2_BM_HIGH_THRESH_OFFS 16
202 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
203 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
204 MVPP2_BM_HIGH_THRESH_OFFS)
205 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
206 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
207 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
208 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
209 #define MVPP2_BM_BPPE_FULL_MASK BIT(3)
210 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
211 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
212 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
213 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
214 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440
215 #define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
216 #define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
217 #define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
218 #define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
219 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
220 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
221 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
222 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
223 #define MVPP2_BM_VIRT_RLS_REG 0x64c0
224 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
225 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
226 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
227 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
229 /* TX Scheduler registers */
230 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
231 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
232 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff
233 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8
234 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
235 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
236 #define MVPP2_TXP_SCHED_MTU_REG 0x801c
237 #define MVPP2_TXP_MTU_MAX 0x7FFFF
238 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020
239 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
240 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
241 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
242 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
243 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
244 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
245 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
246 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
247 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
248 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
249 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
250 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
251 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
253 /* TX general registers */
254 #define MVPP2_TX_SNOOP_REG 0x8800
255 #define MVPP2_TX_PORT_FLUSH_REG 0x8810
256 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
259 #define MVPP2_SRC_ADDR_MIDDLE 0x24
260 #define MVPP2_SRC_ADDR_HIGH 0x28
261 #define MVPP2_PHY_AN_CFG0_REG 0x34
262 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
263 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
264 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
266 /* Per-port registers */
267 #define MVPP2_GMAC_CTRL_0_REG 0x0
268 #define MVPP2_GMAC_PORT_EN_MASK BIT(0)
269 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
270 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
271 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
272 #define MVPP2_GMAC_CTRL_1_REG 0x4
273 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
274 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
275 #define MVPP2_GMAC_PCS_LB_EN_BIT 6
276 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
277 #define MVPP2_GMAC_SA_LOW_OFFS 7
278 #define MVPP2_GMAC_CTRL_2_REG 0x8
279 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
280 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
281 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
282 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
283 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc
284 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
285 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
286 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
287 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
288 #define MVPP2_GMAC_AN_SPEED_EN BIT(7)
289 #define MVPP2_GMAC_FC_ADV_EN BIT(9)
290 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
291 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
292 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
293 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
294 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
295 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
296 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
297 #define MVPP22_GMAC_CTRL_4_REG 0x90
298 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
299 #define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
300 #define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
301 #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
303 /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
304 * relative to port->base.
306 #define MVPP22_XLG_CTRL3_REG 0x11c
307 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
308 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
310 /* SMI registers. PPv2.2 only, relative to priv->iface_base. */
311 #define MVPP22_SMI_MISC_CFG_REG 0x1204
312 #define MVPP22_SMI_POLLING_EN BIT(10)
314 #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
316 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
318 /* Descriptor ring Macros */
319 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
320 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
322 /* Various constants */
325 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
326 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
327 #define MVPP2_RX_COAL_PKTS 32
328 #define MVPP2_RX_COAL_USEC 100
330 /* The two bytes Marvell header. Either contains a special value used
331 * by Marvell switches when a specific hardware mode is enabled (not
332 * supported by this driver) or is filled automatically by zeroes on
333 * the RX side. Those two bytes being at the front of the Ethernet
334 * header, they allow to have the IP header aligned on a 4 bytes
335 * boundary automatically: the hardware skips those two bytes on its
338 #define MVPP2_MH_SIZE 2
339 #define MVPP2_ETH_TYPE_LEN 2
340 #define MVPP2_PPPOE_HDR_SIZE 8
341 #define MVPP2_VLAN_TAG_LEN 4
343 /* Lbtd 802.3 type */
344 #define MVPP2_IP_LBDT_TYPE 0xfffa
346 #define MVPP2_TX_CSUM_MAX_SIZE 9800
348 /* Timeout constants */
349 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
350 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
352 #define MVPP2_TX_MTU_MAX 0x7ffff
354 /* Maximum number of T-CONTs of PON port */
355 #define MVPP2_MAX_TCONT 16
357 /* Maximum number of supported ports */
358 #define MVPP2_MAX_PORTS 4
360 /* Maximum number of TXQs used by single port */
361 #define MVPP2_MAX_TXQ 8
363 /* Maximum number of RXQs used by single port */
364 #define MVPP2_MAX_RXQ 8
366 /* Dfault number of RXQs in use */
367 #define MVPP2_DEFAULT_RXQ 4
369 /* Total number of RXQs available to all ports */
370 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
372 /* Max number of Rx descriptors */
373 #define MVPP2_MAX_RXD 128
375 /* Max number of Tx descriptors */
376 #define MVPP2_MAX_TXD 1024
378 /* Amount of Tx descriptors that can be reserved at once by CPU */
379 #define MVPP2_CPU_DESC_CHUNK 64
381 /* Max number of Tx descriptors in each aggregated queue */
382 #define MVPP2_AGGR_TXQ_SIZE 256
384 /* Descriptor aligned size */
385 #define MVPP2_DESC_ALIGNED_SIZE 32
387 /* Descriptor alignment mask */
388 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
390 /* RX FIFO constants */
391 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
392 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
393 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
395 /* RX buffer constants */
396 #define MVPP2_SKB_SHINFO_SIZE \
397 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
399 #define MVPP2_RX_PKT_SIZE(mtu) \
400 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
401 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
403 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
404 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
405 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
406 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
408 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
410 /* IPv6 max L3 address size */
411 #define MVPP2_MAX_L3_ADDR_SIZE 16
414 #define MVPP2_F_LOOPBACK BIT(0)
416 /* Marvell tag types */
417 enum mvpp2_tag_type {
418 MVPP2_TAG_TYPE_NONE = 0,
419 MVPP2_TAG_TYPE_MH = 1,
420 MVPP2_TAG_TYPE_DSA = 2,
421 MVPP2_TAG_TYPE_EDSA = 3,
422 MVPP2_TAG_TYPE_VLAN = 4,
423 MVPP2_TAG_TYPE_LAST = 5
426 /* Parser constants */
427 #define MVPP2_PRS_TCAM_SRAM_SIZE 256
428 #define MVPP2_PRS_TCAM_WORDS 6
429 #define MVPP2_PRS_SRAM_WORDS 4
430 #define MVPP2_PRS_FLOW_ID_SIZE 64
431 #define MVPP2_PRS_FLOW_ID_MASK 0x3f
432 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1
433 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
434 #define MVPP2_PRS_IPV4_HEAD 0x40
435 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
436 #define MVPP2_PRS_IPV4_MC 0xe0
437 #define MVPP2_PRS_IPV4_MC_MASK 0xf0
438 #define MVPP2_PRS_IPV4_BC_MASK 0xff
439 #define MVPP2_PRS_IPV4_IHL 0x5
440 #define MVPP2_PRS_IPV4_IHL_MASK 0xf
441 #define MVPP2_PRS_IPV6_MC 0xff
442 #define MVPP2_PRS_IPV6_MC_MASK 0xff
443 #define MVPP2_PRS_IPV6_HOP_MASK 0xff
444 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff
445 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
446 #define MVPP2_PRS_DBL_VLANS_MAX 100
449 * - lookup ID - 4 bits
451 * - additional information - 1 byte
452 * - header data - 8 bytes
453 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
455 #define MVPP2_PRS_AI_BITS 8
456 #define MVPP2_PRS_PORT_MASK 0xff
457 #define MVPP2_PRS_LU_MASK 0xf
458 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
459 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
460 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
461 (((offs) * 2) - ((offs) % 2) + 2)
462 #define MVPP2_PRS_TCAM_AI_BYTE 16
463 #define MVPP2_PRS_TCAM_PORT_BYTE 17
464 #define MVPP2_PRS_TCAM_LU_BYTE 20
465 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
466 #define MVPP2_PRS_TCAM_INV_WORD 5
467 /* Tcam entries ID */
468 #define MVPP2_PE_DROP_ALL 0
469 #define MVPP2_PE_FIRST_FREE_TID 1
470 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
471 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
472 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
473 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
474 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
475 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
476 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
477 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
478 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
479 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
480 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
481 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
482 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
483 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
484 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
485 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
486 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
487 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
488 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
489 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
490 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
491 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
492 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
493 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
494 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
497 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
499 #define MVPP2_PRS_SRAM_RI_OFFS 0
500 #define MVPP2_PRS_SRAM_RI_WORD 0
501 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
502 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
503 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
504 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64
505 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
506 #define MVPP2_PRS_SRAM_UDF_OFFS 73
507 #define MVPP2_PRS_SRAM_UDF_BITS 8
508 #define MVPP2_PRS_SRAM_UDF_MASK 0xff
509 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
510 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
511 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
512 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
513 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
514 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
515 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
516 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
517 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
518 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
519 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
520 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
521 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
522 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
523 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
524 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
525 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
526 #define MVPP2_PRS_SRAM_AI_OFFS 90
527 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
528 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
529 #define MVPP2_PRS_SRAM_AI_MASK 0xff
530 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
531 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
532 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110
533 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111
535 /* Sram result info bits assignment */
536 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1
537 #define MVPP2_PRS_RI_DSA_MASK 0x2
538 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
539 #define MVPP2_PRS_RI_VLAN_NONE 0x0
540 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
541 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
542 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
543 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
544 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
545 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
546 #define MVPP2_PRS_RI_L2_UCAST 0x0
547 #define MVPP2_PRS_RI_L2_MCAST BIT(9)
548 #define MVPP2_PRS_RI_L2_BCAST BIT(10)
549 #define MVPP2_PRS_RI_PPPOE_MASK 0x800
550 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
551 #define MVPP2_PRS_RI_L3_UN 0x0
552 #define MVPP2_PRS_RI_L3_IP4 BIT(12)
553 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
554 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
555 #define MVPP2_PRS_RI_L3_IP6 BIT(14)
556 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
557 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
558 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
559 #define MVPP2_PRS_RI_L3_UCAST 0x0
560 #define MVPP2_PRS_RI_L3_MCAST BIT(15)
561 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
562 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
563 #define MVPP2_PRS_RI_UDF3_MASK 0x300000
564 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
565 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
566 #define MVPP2_PRS_RI_L4_TCP BIT(22)
567 #define MVPP2_PRS_RI_L4_UDP BIT(23)
568 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
569 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000
570 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
571 #define MVPP2_PRS_RI_DROP_MASK 0x80000000
573 /* Sram additional info bits assignment */
574 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
575 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
576 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
577 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
578 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
579 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
580 #define MVPP2_PRS_SINGLE_VLAN_AI 0
581 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
584 #define MVPP2_PRS_TAGGED true
585 #define MVPP2_PRS_UNTAGGED false
586 #define MVPP2_PRS_EDSA true
587 #define MVPP2_PRS_DSA false
589 /* MAC entries, shadow udf */
591 MVPP2_PRS_UDF_MAC_DEF,
592 MVPP2_PRS_UDF_MAC_RANGE,
593 MVPP2_PRS_UDF_L2_DEF,
594 MVPP2_PRS_UDF_L2_DEF_COPY,
595 MVPP2_PRS_UDF_L2_USER,
599 enum mvpp2_prs_lookup {
613 enum mvpp2_prs_l3_cast {
614 MVPP2_PRS_L3_UNI_CAST,
615 MVPP2_PRS_L3_MULTI_CAST,
616 MVPP2_PRS_L3_BROAD_CAST
619 /* Classifier constants */
620 #define MVPP2_CLS_FLOWS_TBL_SIZE 512
621 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
622 #define MVPP2_CLS_LKP_TBL_SIZE 64
625 #define MVPP2_BM_POOLS_NUM 8
626 #define MVPP2_BM_LONG_BUF_NUM 1024
627 #define MVPP2_BM_SHORT_BUF_NUM 2048
628 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
629 #define MVPP2_BM_POOL_PTR_ALIGN 128
630 #define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
631 #define MVPP2_BM_SWF_SHORT_POOL 3
633 /* BM cookie (32 bits) definition */
634 #define MVPP2_BM_COOKIE_POOL_OFFS 8
635 #define MVPP2_BM_COOKIE_CPU_OFFS 24
637 /* BM short pool packet size
638 * These value assure that for SWF the total number
639 * of bytes allocated for each buffer will be 512
641 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
643 #define MVPP21_ADDR_SPACE_SZ 0
644 #define MVPP22_ADDR_SPACE_SZ SZ_64K
646 #define MVPP2_MAX_CPUS 4
656 /* Shared Packet Processor resources */
658 /* Shared registers' base addresses */
659 void __iomem *lms_base;
660 void __iomem *iface_base;
662 /* On PPv2.2, each CPU can access the base register through a
663 * separate address space, each 64 KB apart from each
666 void __iomem *cpu_base[MVPP2_MAX_CPUS];
672 /* List of pointers to port structures */
673 struct mvpp2_port **port_list;
675 /* Aggregated TXQs */
676 struct mvpp2_tx_queue *aggr_txqs;
679 struct mvpp2_bm_pool *bm_pools;
681 /* PRS shadow table */
682 struct mvpp2_prs_shadow *prs_shadow;
683 /* PRS auxiliary table for double vlan entries control */
684 bool *prs_double_vlans;
690 enum { MVPP21, MVPP22 } hw_version;
693 struct mvpp2_pcpu_stats {
694 struct u64_stats_sync syncp;
701 /* Per-CPU port control */
702 struct mvpp2_port_pcpu {
703 struct hrtimer tx_done_timer;
704 bool timer_scheduled;
705 /* Tasklet for egress finalization */
706 struct tasklet_struct tx_done_tasklet;
712 /* Index of the port from the "group of ports" complex point
721 /* Per-port registers' base address */
724 struct mvpp2_rx_queue **rxqs;
725 struct mvpp2_tx_queue **txqs;
726 struct net_device *dev;
730 u32 pending_cause_rx;
731 struct napi_struct napi;
733 /* Per-CPU port control */
734 struct mvpp2_port_pcpu __percpu *pcpu;
741 struct mvpp2_pcpu_stats __percpu *stats;
743 phy_interface_t phy_interface;
744 struct device_node *phy_node;
749 struct mvpp2_bm_pool *pool_long;
750 struct mvpp2_bm_pool *pool_short;
752 /* Index of first port's physical RXQ */
756 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
757 * layout of the transmit and reception DMA descriptors, and their
758 * layout is therefore defined by the hardware design
761 #define MVPP2_TXD_L3_OFF_SHIFT 0
762 #define MVPP2_TXD_IP_HLEN_SHIFT 8
763 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
764 #define MVPP2_TXD_L4_CSUM_NOT BIT(14)
765 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
766 #define MVPP2_TXD_PADDING_DISABLE BIT(23)
767 #define MVPP2_TXD_L4_UDP BIT(24)
768 #define MVPP2_TXD_L3_IP6 BIT(26)
769 #define MVPP2_TXD_L_DESC BIT(28)
770 #define MVPP2_TXD_F_DESC BIT(29)
772 #define MVPP2_RXD_ERR_SUMMARY BIT(15)
773 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
774 #define MVPP2_RXD_ERR_CRC 0x0
775 #define MVPP2_RXD_ERR_OVERRUN BIT(13)
776 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
777 #define MVPP2_RXD_BM_POOL_ID_OFFS 16
778 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
779 #define MVPP2_RXD_HWF_SYNC BIT(21)
780 #define MVPP2_RXD_L4_CSUM_OK BIT(22)
781 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
782 #define MVPP2_RXD_L4_TCP BIT(25)
783 #define MVPP2_RXD_L4_UDP BIT(26)
784 #define MVPP2_RXD_L3_IP4 BIT(28)
785 #define MVPP2_RXD_L3_IP6 BIT(30)
786 #define MVPP2_RXD_BUF_HDR BIT(31)
788 /* HW TX descriptor for PPv2.1 */
789 struct mvpp21_tx_desc {
790 u32 command; /* Options used by HW for packet transmitting.*/
791 u8 packet_offset; /* the offset from the buffer beginning */
792 u8 phys_txq; /* destination queue ID */
793 u16 data_size; /* data size of transmitted packet in bytes */
794 u32 buf_dma_addr; /* physical addr of transmitted buffer */
795 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
796 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
797 u32 reserved2; /* reserved (for future use) */
800 /* HW RX descriptor for PPv2.1 */
801 struct mvpp21_rx_desc {
802 u32 status; /* info about received packet */
803 u16 reserved1; /* parser_info (for future use, PnC) */
804 u16 data_size; /* size of received packet in bytes */
805 u32 buf_dma_addr; /* physical address of the buffer */
806 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
807 u16 reserved2; /* gem_port_id (for future use, PON) */
808 u16 reserved3; /* csum_l4 (for future use, PnC) */
809 u8 reserved4; /* bm_qset (for future use, BM) */
811 u16 reserved6; /* classify_info (for future use, PnC) */
812 u32 reserved7; /* flow_id (for future use, PnC) */
816 /* HW TX descriptor for PPv2.2 */
817 struct mvpp22_tx_desc {
823 u64 buf_dma_addr_ptp;
827 /* HW RX descriptor for PPv2.2 */
828 struct mvpp22_rx_desc {
834 u64 buf_dma_addr_key_hash;
838 /* Opaque type used by the driver to manipulate the HW TX and RX
841 struct mvpp2_tx_desc {
843 struct mvpp21_tx_desc pp21;
844 struct mvpp22_tx_desc pp22;
848 struct mvpp2_rx_desc {
850 struct mvpp21_rx_desc pp21;
851 struct mvpp22_rx_desc pp22;
855 struct mvpp2_txq_pcpu_buf {
856 /* Transmitted SKB */
859 /* Physical address of transmitted buffer */
862 /* Size transmitted */
866 /* Per-CPU Tx queue control */
867 struct mvpp2_txq_pcpu {
870 /* Number of Tx DMA descriptors in the descriptor ring */
873 /* Number of currently used Tx DMA descriptor in the
878 /* Number of Tx DMA descriptors reserved for each CPU */
881 /* Infos about transmitted buffers */
882 struct mvpp2_txq_pcpu_buf *buffs;
884 /* Index of last TX DMA descriptor that was inserted */
887 /* Index of the TX DMA descriptor to be cleaned up */
891 struct mvpp2_tx_queue {
892 /* Physical number of this Tx queue */
895 /* Logical number of this Tx queue */
898 /* Number of Tx DMA descriptors in the descriptor ring */
901 /* Number of currently used Tx DMA descriptor in the descriptor ring */
904 /* Per-CPU control of physical Tx queues */
905 struct mvpp2_txq_pcpu __percpu *pcpu;
909 /* Virtual address of thex Tx DMA descriptors array */
910 struct mvpp2_tx_desc *descs;
912 /* DMA address of the Tx DMA descriptors array */
913 dma_addr_t descs_dma;
915 /* Index of the last Tx DMA descriptor */
918 /* Index of the next Tx DMA descriptor to process */
919 int next_desc_to_proc;
922 struct mvpp2_rx_queue {
923 /* RX queue number, in the range 0-31 for physical RXQs */
926 /* Num of rx descriptors in the rx descriptor ring */
932 /* Virtual address of the RX DMA descriptors array */
933 struct mvpp2_rx_desc *descs;
935 /* DMA address of the RX DMA descriptors array */
936 dma_addr_t descs_dma;
938 /* Index of the last RX DMA descriptor */
941 /* Index of the next RX DMA descriptor to process */
942 int next_desc_to_proc;
944 /* ID of port to which physical RXQ is mapped */
947 /* Port's logic RXQ number to which physical RXQ is mapped */
951 union mvpp2_prs_tcam_entry {
952 u32 word[MVPP2_PRS_TCAM_WORDS];
953 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
956 union mvpp2_prs_sram_entry {
957 u32 word[MVPP2_PRS_SRAM_WORDS];
958 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
961 struct mvpp2_prs_entry {
963 union mvpp2_prs_tcam_entry tcam;
964 union mvpp2_prs_sram_entry sram;
967 struct mvpp2_prs_shadow {
974 /* User defined offset */
982 struct mvpp2_cls_flow_entry {
984 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
987 struct mvpp2_cls_lookup_entry {
993 struct mvpp2_bm_pool {
994 /* Pool number in the range 0-7 */
996 enum mvpp2_bm_type type;
998 /* Buffer Pointers Pool External (BPPE) size */
1000 /* BPPE size in bytes */
1002 /* Number of buffers for this pool */
1004 /* Pool buffer size */
1010 /* BPPE virtual base address */
1012 /* BPPE DMA base address */
1013 dma_addr_t dma_addr;
1015 /* Ports using BM pool */
1019 /* Static declaractions */
1021 /* Number of RXQs used by single port */
1022 static int rxq_number = MVPP2_DEFAULT_RXQ;
1023 /* Number of TXQs used by single port */
1024 static int txq_number = MVPP2_MAX_TXQ;
1026 #define MVPP2_DRIVER_NAME "mvpp2"
1027 #define MVPP2_DRIVER_VERSION "1.0"
1029 /* Utility/helper methods */
1031 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1033 writel(data, priv->cpu_base[0] + offset);
1036 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1038 return readl(priv->cpu_base[0] + offset);
1041 /* These accessors should be used to access:
1043 * - per-CPU registers, where each CPU has its own copy of the
1046 * MVPP2_BM_VIRT_ALLOC_REG
1047 * MVPP2_BM_ADDR_HIGH_ALLOC
1048 * MVPP22_BM_ADDR_HIGH_RLS_REG
1049 * MVPP2_BM_VIRT_RLS_REG
1050 * MVPP2_ISR_RX_TX_CAUSE_REG
1051 * MVPP2_ISR_RX_TX_MASK_REG
1053 * MVPP2_AGGR_TXQ_UPDATE_REG
1054 * MVPP2_TXQ_RSVD_REQ_REG
1055 * MVPP2_TXQ_RSVD_RSLT_REG
1056 * MVPP2_TXQ_SENT_REG
1059 * - global registers that must be accessed through a specific CPU
1060 * window, because they are related to an access to a per-CPU
1063 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1064 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1065 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1066 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1067 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1068 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1069 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1070 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1071 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1072 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1073 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1074 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1075 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1077 static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1078 u32 offset, u32 data)
1080 writel(data, priv->cpu_base[cpu] + offset);
1083 static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1086 return readl(priv->cpu_base[cpu] + offset);
1089 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1090 struct mvpp2_tx_desc *tx_desc)
1092 if (port->priv->hw_version == MVPP21)
1093 return tx_desc->pp21.buf_dma_addr;
1095 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
1098 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1099 struct mvpp2_tx_desc *tx_desc,
1100 dma_addr_t dma_addr)
1102 if (port->priv->hw_version == MVPP21) {
1103 tx_desc->pp21.buf_dma_addr = dma_addr;
1105 u64 val = (u64)dma_addr;
1107 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1108 tx_desc->pp22.buf_dma_addr_ptp |= val;
1112 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1113 struct mvpp2_tx_desc *tx_desc)
1115 if (port->priv->hw_version == MVPP21)
1116 return tx_desc->pp21.data_size;
1118 return tx_desc->pp22.data_size;
1121 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1122 struct mvpp2_tx_desc *tx_desc,
1125 if (port->priv->hw_version == MVPP21)
1126 tx_desc->pp21.data_size = size;
1128 tx_desc->pp22.data_size = size;
1131 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1132 struct mvpp2_tx_desc *tx_desc,
1135 if (port->priv->hw_version == MVPP21)
1136 tx_desc->pp21.phys_txq = txq;
1138 tx_desc->pp22.phys_txq = txq;
1141 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1142 struct mvpp2_tx_desc *tx_desc,
1143 unsigned int command)
1145 if (port->priv->hw_version == MVPP21)
1146 tx_desc->pp21.command = command;
1148 tx_desc->pp22.command = command;
1151 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1152 struct mvpp2_tx_desc *tx_desc,
1153 unsigned int offset)
1155 if (port->priv->hw_version == MVPP21)
1156 tx_desc->pp21.packet_offset = offset;
1158 tx_desc->pp22.packet_offset = offset;
1161 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1162 struct mvpp2_tx_desc *tx_desc)
1164 if (port->priv->hw_version == MVPP21)
1165 return tx_desc->pp21.packet_offset;
1167 return tx_desc->pp22.packet_offset;
1170 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1171 struct mvpp2_rx_desc *rx_desc)
1173 if (port->priv->hw_version == MVPP21)
1174 return rx_desc->pp21.buf_dma_addr;
1176 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1179 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1180 struct mvpp2_rx_desc *rx_desc)
1182 if (port->priv->hw_version == MVPP21)
1183 return rx_desc->pp21.buf_cookie;
1185 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1188 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1189 struct mvpp2_rx_desc *rx_desc)
1191 if (port->priv->hw_version == MVPP21)
1192 return rx_desc->pp21.data_size;
1194 return rx_desc->pp22.data_size;
1197 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1198 struct mvpp2_rx_desc *rx_desc)
1200 if (port->priv->hw_version == MVPP21)
1201 return rx_desc->pp21.status;
1203 return rx_desc->pp22.status;
1206 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1208 txq_pcpu->txq_get_index++;
1209 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1210 txq_pcpu->txq_get_index = 0;
1213 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1214 struct mvpp2_txq_pcpu *txq_pcpu,
1215 struct sk_buff *skb,
1216 struct mvpp2_tx_desc *tx_desc)
1218 struct mvpp2_txq_pcpu_buf *tx_buf =
1219 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1221 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1222 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1223 mvpp2_txdesc_offset_get(port, tx_desc);
1224 txq_pcpu->txq_put_index++;
1225 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1226 txq_pcpu->txq_put_index = 0;
1229 /* Get number of physical egress port */
1230 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1232 return MVPP2_MAX_TCONT + port->id;
1235 /* Get number of physical TXQ */
1236 static inline int mvpp2_txq_phys(int port, int txq)
1238 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1241 /* Parser configuration routines */
1243 /* Update parser tcam and sram hw entries */
1244 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1248 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1251 /* Clear entry invalidation bit */
1252 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1254 /* Write tcam index - indirect access */
1255 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1256 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1257 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1259 /* Write sram index - indirect access */
1260 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1261 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1262 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1267 /* Read tcam entry from hw */
1268 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1272 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1275 /* Write tcam index - indirect access */
1276 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1278 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1279 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1280 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1281 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1283 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1284 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1286 /* Write sram index - indirect access */
1287 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1288 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1289 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1294 /* Invalidate tcam hw entry */
1295 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1297 /* Write index - indirect access */
1298 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1299 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1300 MVPP2_PRS_TCAM_INV_MASK);
1303 /* Enable shadow table entry and set its lookup ID */
1304 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1306 priv->prs_shadow[index].valid = true;
1307 priv->prs_shadow[index].lu = lu;
1310 /* Update ri fields in shadow table entry */
1311 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1312 unsigned int ri, unsigned int ri_mask)
1314 priv->prs_shadow[index].ri_mask = ri_mask;
1315 priv->prs_shadow[index].ri = ri;
1318 /* Update lookup field in tcam sw entry */
1319 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1321 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1323 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1324 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1327 /* Update mask for single port in tcam sw entry */
1328 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1329 unsigned int port, bool add)
1331 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1334 pe->tcam.byte[enable_off] &= ~(1 << port);
1336 pe->tcam.byte[enable_off] |= 1 << port;
1339 /* Update port map in tcam sw entry */
1340 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1343 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1344 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1346 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1347 pe->tcam.byte[enable_off] &= ~port_mask;
1348 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1351 /* Obtain port map from tcam sw entry */
1352 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1354 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1356 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1359 /* Set byte of data and its enable bits in tcam sw entry */
1360 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1361 unsigned int offs, unsigned char byte,
1362 unsigned char enable)
1364 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1365 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1368 /* Get byte of data and its enable bits from tcam sw entry */
1369 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1370 unsigned int offs, unsigned char *byte,
1371 unsigned char *enable)
1373 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1374 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1377 /* Compare tcam data bytes with a pattern */
1378 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1381 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1384 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1385 if (tcam_data != data)
1390 /* Update ai bits in tcam sw entry */
1391 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1392 unsigned int bits, unsigned int enable)
1394 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1396 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1398 if (!(enable & BIT(i)))
1402 pe->tcam.byte[ai_idx] |= 1 << i;
1404 pe->tcam.byte[ai_idx] &= ~(1 << i);
1407 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1410 /* Get ai bits from tcam sw entry */
1411 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1413 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1416 /* Set ethertype in tcam sw entry */
1417 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1418 unsigned short ethertype)
1420 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1421 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1424 /* Set bits in sram sw entry */
1425 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1428 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1431 /* Clear bits in sram sw entry */
1432 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1435 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1438 /* Update ri bits in sram sw entry */
1439 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1440 unsigned int bits, unsigned int mask)
1444 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1445 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1447 if (!(mask & BIT(i)))
1451 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1453 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1455 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1459 /* Obtain ri bits from sram sw entry */
1460 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1462 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1465 /* Update ai bits in sram sw entry */
1466 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1467 unsigned int bits, unsigned int mask)
1470 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1472 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1474 if (!(mask & BIT(i)))
1478 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1480 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1482 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1486 /* Read ai bits from sram sw entry */
1487 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1490 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1491 int ai_en_off = ai_off + 1;
1492 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1494 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1495 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1500 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1503 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1506 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1508 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1509 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1510 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1513 /* In the sram sw entry set sign and value of the next lookup offset
1514 * and the offset value generated to the classifier
1516 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1521 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1524 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1528 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1529 (unsigned char)shift;
1531 /* Reset and set operation */
1532 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1533 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1534 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1536 /* Set base offset as current */
1537 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1540 /* In the sram sw entry set sign and value of the user defined offset
1541 * generated to the classifier
1543 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1544 unsigned int type, int offset,
1549 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1550 offset = 0 - offset;
1552 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1556 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1557 MVPP2_PRS_SRAM_UDF_MASK);
1558 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1559 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1560 MVPP2_PRS_SRAM_UDF_BITS)] &=
1561 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1562 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1563 MVPP2_PRS_SRAM_UDF_BITS)] |=
1564 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1566 /* Set offset type */
1567 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1568 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1569 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1571 /* Set offset operation */
1572 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1573 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1574 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1576 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1577 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1578 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1579 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1581 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1582 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1583 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1585 /* Set base offset as current */
1586 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1589 /* Find parser flow entry */
1590 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1592 struct mvpp2_prs_entry *pe;
1595 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1598 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1600 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1601 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1604 if (!priv->prs_shadow[tid].valid ||
1605 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1609 mvpp2_prs_hw_read(priv, pe);
1610 bits = mvpp2_prs_sram_ai_get(pe);
1612 /* Sram store classification lookup ID in AI bits [5:0] */
1613 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1621 /* Return first free tcam index, seeking from start to end */
1622 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1630 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1631 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1633 for (tid = start; tid <= end; tid++) {
1634 if (!priv->prs_shadow[tid].valid)
1641 /* Enable/disable dropping all mac da's */
1642 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1644 struct mvpp2_prs_entry pe;
1646 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1647 /* Entry exist - update port only */
1648 pe.index = MVPP2_PE_DROP_ALL;
1649 mvpp2_prs_hw_read(priv, &pe);
1651 /* Entry doesn't exist - create new */
1652 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1653 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1654 pe.index = MVPP2_PE_DROP_ALL;
1656 /* Non-promiscuous mode for all ports - DROP unknown packets */
1657 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1658 MVPP2_PRS_RI_DROP_MASK);
1660 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1661 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1663 /* Update shadow table */
1664 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1666 /* Mask all ports */
1667 mvpp2_prs_tcam_port_map_set(&pe, 0);
1670 /* Update port mask */
1671 mvpp2_prs_tcam_port_set(&pe, port, add);
1673 mvpp2_prs_hw_write(priv, &pe);
1676 /* Set port to promiscuous mode */
1677 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1679 struct mvpp2_prs_entry pe;
1681 /* Promiscuous mode - Accept unknown packets */
1683 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1684 /* Entry exist - update port only */
1685 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1686 mvpp2_prs_hw_read(priv, &pe);
1688 /* Entry doesn't exist - create new */
1689 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1690 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1691 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1693 /* Continue - set next lookup */
1694 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1696 /* Set result info bits */
1697 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1698 MVPP2_PRS_RI_L2_CAST_MASK);
1700 /* Shift to ethertype */
1701 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1702 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1704 /* Mask all ports */
1705 mvpp2_prs_tcam_port_map_set(&pe, 0);
1707 /* Update shadow table */
1708 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1711 /* Update port mask */
1712 mvpp2_prs_tcam_port_set(&pe, port, add);
1714 mvpp2_prs_hw_write(priv, &pe);
1717 /* Accept multicast */
1718 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1721 struct mvpp2_prs_entry pe;
1722 unsigned char da_mc;
1724 /* Ethernet multicast address first byte is
1725 * 0x01 for IPv4 and 0x33 for IPv6
1727 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1729 if (priv->prs_shadow[index].valid) {
1730 /* Entry exist - update port only */
1732 mvpp2_prs_hw_read(priv, &pe);
1734 /* Entry doesn't exist - create new */
1735 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1736 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1739 /* Continue - set next lookup */
1740 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1742 /* Set result info bits */
1743 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1744 MVPP2_PRS_RI_L2_CAST_MASK);
1746 /* Update tcam entry data first byte */
1747 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1749 /* Shift to ethertype */
1750 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1751 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1753 /* Mask all ports */
1754 mvpp2_prs_tcam_port_map_set(&pe, 0);
1756 /* Update shadow table */
1757 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1760 /* Update port mask */
1761 mvpp2_prs_tcam_port_set(&pe, port, add);
1763 mvpp2_prs_hw_write(priv, &pe);
1766 /* Set entry for dsa packets */
1767 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1768 bool tagged, bool extend)
1770 struct mvpp2_prs_entry pe;
1774 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1777 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1781 if (priv->prs_shadow[tid].valid) {
1782 /* Entry exist - update port only */
1784 mvpp2_prs_hw_read(priv, &pe);
1786 /* Entry doesn't exist - create new */
1787 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1788 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1791 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1792 mvpp2_prs_sram_shift_set(&pe, shift,
1793 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1795 /* Update shadow table */
1796 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1799 /* Set tagged bit in DSA tag */
1800 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1801 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1802 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1803 /* Clear all ai bits for next iteration */
1804 mvpp2_prs_sram_ai_update(&pe, 0,
1805 MVPP2_PRS_SRAM_AI_MASK);
1806 /* If packet is tagged continue check vlans */
1807 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1809 /* Set result info bits to 'no vlans' */
1810 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1811 MVPP2_PRS_RI_VLAN_MASK);
1812 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1815 /* Mask all ports */
1816 mvpp2_prs_tcam_port_map_set(&pe, 0);
1819 /* Update port mask */
1820 mvpp2_prs_tcam_port_set(&pe, port, add);
1822 mvpp2_prs_hw_write(priv, &pe);
1825 /* Set entry for dsa ethertype */
1826 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1827 bool add, bool tagged, bool extend)
1829 struct mvpp2_prs_entry pe;
1830 int tid, shift, port_mask;
1833 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1834 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1838 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1839 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1840 port_mask = MVPP2_PRS_PORT_MASK;
1844 if (priv->prs_shadow[tid].valid) {
1845 /* Entry exist - update port only */
1847 mvpp2_prs_hw_read(priv, &pe);
1849 /* Entry doesn't exist - create new */
1850 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1851 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1855 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1856 mvpp2_prs_match_etype(&pe, 2, 0);
1858 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1859 MVPP2_PRS_RI_DSA_MASK);
1860 /* Shift ethertype + 2 byte reserved + tag*/
1861 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1862 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1864 /* Update shadow table */
1865 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1868 /* Set tagged bit in DSA tag */
1869 mvpp2_prs_tcam_data_byte_set(&pe,
1870 MVPP2_ETH_TYPE_LEN + 2 + 3,
1871 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1872 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1873 /* Clear all ai bits for next iteration */
1874 mvpp2_prs_sram_ai_update(&pe, 0,
1875 MVPP2_PRS_SRAM_AI_MASK);
1876 /* If packet is tagged continue check vlans */
1877 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1879 /* Set result info bits to 'no vlans' */
1880 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1881 MVPP2_PRS_RI_VLAN_MASK);
1882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1884 /* Mask/unmask all ports, depending on dsa type */
1885 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1888 /* Update port mask */
1889 mvpp2_prs_tcam_port_set(&pe, port, add);
1891 mvpp2_prs_hw_write(priv, &pe);
1894 /* Search for existing single/triple vlan entry */
1895 static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1896 unsigned short tpid, int ai)
1898 struct mvpp2_prs_entry *pe;
1901 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1904 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1906 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1907 for (tid = MVPP2_PE_FIRST_FREE_TID;
1908 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1909 unsigned int ri_bits, ai_bits;
1912 if (!priv->prs_shadow[tid].valid ||
1913 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1918 mvpp2_prs_hw_read(priv, pe);
1919 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1924 ri_bits = mvpp2_prs_sram_ri_get(pe);
1925 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1927 /* Get current ai value from tcam */
1928 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1929 /* Clear double vlan bit */
1930 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1935 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1936 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1944 /* Add/update single/triple vlan entry */
1945 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1946 unsigned int port_map)
1948 struct mvpp2_prs_entry *pe;
1952 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1955 /* Create new tcam entry */
1956 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1957 MVPP2_PE_FIRST_FREE_TID);
1961 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1965 /* Get last double vlan tid */
1966 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1967 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1968 unsigned int ri_bits;
1970 if (!priv->prs_shadow[tid_aux].valid ||
1971 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1974 pe->index = tid_aux;
1975 mvpp2_prs_hw_read(priv, pe);
1976 ri_bits = mvpp2_prs_sram_ri_get(pe);
1977 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1978 MVPP2_PRS_RI_VLAN_DOUBLE)
1982 if (tid <= tid_aux) {
1987 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1988 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1991 mvpp2_prs_match_etype(pe, 0, tpid);
1993 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1994 /* Shift 4 bytes - skip 1 vlan tag */
1995 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1996 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1997 /* Clear all ai bits for next iteration */
1998 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2000 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2001 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2002 MVPP2_PRS_RI_VLAN_MASK);
2004 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2005 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2006 MVPP2_PRS_RI_VLAN_MASK);
2008 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2010 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2012 /* Update ports' mask */
2013 mvpp2_prs_tcam_port_map_set(pe, port_map);
2015 mvpp2_prs_hw_write(priv, pe);
2023 /* Get first free double vlan ai number */
2024 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2028 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2029 if (!priv->prs_double_vlans[i])
2036 /* Search for existing double vlan entry */
2037 static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2038 unsigned short tpid1,
2039 unsigned short tpid2)
2041 struct mvpp2_prs_entry *pe;
2044 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2047 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2049 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2050 for (tid = MVPP2_PE_FIRST_FREE_TID;
2051 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2052 unsigned int ri_mask;
2055 if (!priv->prs_shadow[tid].valid ||
2056 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2060 mvpp2_prs_hw_read(priv, pe);
2062 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2063 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2068 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2069 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2077 /* Add or update double vlan entry */
2078 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2079 unsigned short tpid2,
2080 unsigned int port_map)
2082 struct mvpp2_prs_entry *pe;
2083 int tid_aux, tid, ai, ret = 0;
2085 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2088 /* Create new tcam entry */
2089 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2090 MVPP2_PE_LAST_FREE_TID);
2094 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2098 /* Set ai value for new double vlan entry */
2099 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
2105 /* Get first single/triple vlan tid */
2106 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2107 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2108 unsigned int ri_bits;
2110 if (!priv->prs_shadow[tid_aux].valid ||
2111 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2114 pe->index = tid_aux;
2115 mvpp2_prs_hw_read(priv, pe);
2116 ri_bits = mvpp2_prs_sram_ri_get(pe);
2117 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2118 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2119 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2123 if (tid >= tid_aux) {
2128 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
2129 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2132 priv->prs_double_vlans[ai] = true;
2134 mvpp2_prs_match_etype(pe, 0, tpid1);
2135 mvpp2_prs_match_etype(pe, 4, tpid2);
2137 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2138 /* Shift 8 bytes - skip 2 vlan tags */
2139 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2140 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2141 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2142 MVPP2_PRS_RI_VLAN_MASK);
2143 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2144 MVPP2_PRS_SRAM_AI_MASK);
2146 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2149 /* Update ports' mask */
2150 mvpp2_prs_tcam_port_map_set(pe, port_map);
2151 mvpp2_prs_hw_write(priv, pe);
2158 /* IPv4 header parsing for fragmentation and L4 offset */
2159 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2160 unsigned int ri, unsigned int ri_mask)
2162 struct mvpp2_prs_entry pe;
2165 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2166 (proto != IPPROTO_IGMP))
2169 /* Fragmented packet */
2170 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2171 MVPP2_PE_LAST_FREE_TID);
2175 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2176 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2179 /* Set next lu to IPv4 */
2180 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2181 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2183 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2184 sizeof(struct iphdr) - 4,
2185 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2186 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2187 MVPP2_PRS_IPV4_DIP_AI_BIT);
2188 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2189 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2191 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2192 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2193 /* Unmask all ports */
2194 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2196 /* Update shadow table and hw entry */
2197 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2198 mvpp2_prs_hw_write(priv, &pe);
2200 /* Not fragmented packet */
2201 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2202 MVPP2_PE_LAST_FREE_TID);
2207 /* Clear ri before updating */
2208 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2209 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2210 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2212 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2213 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2215 /* Update shadow table and hw entry */
2216 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2217 mvpp2_prs_hw_write(priv, &pe);
2222 /* IPv4 L3 multicast or broadcast */
2223 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2225 struct mvpp2_prs_entry pe;
2228 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2229 MVPP2_PE_LAST_FREE_TID);
2233 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2234 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2238 case MVPP2_PRS_L3_MULTI_CAST:
2239 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2240 MVPP2_PRS_IPV4_MC_MASK);
2241 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2242 MVPP2_PRS_RI_L3_ADDR_MASK);
2244 case MVPP2_PRS_L3_BROAD_CAST:
2245 mask = MVPP2_PRS_IPV4_BC_MASK;
2246 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2247 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2248 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2249 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2250 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2251 MVPP2_PRS_RI_L3_ADDR_MASK);
2257 /* Finished: go to flowid generation */
2258 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2259 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2261 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2262 MVPP2_PRS_IPV4_DIP_AI_BIT);
2263 /* Unmask all ports */
2264 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2266 /* Update shadow table and hw entry */
2267 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2268 mvpp2_prs_hw_write(priv, &pe);
2273 /* Set entries for protocols over IPv6 */
2274 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2275 unsigned int ri, unsigned int ri_mask)
2277 struct mvpp2_prs_entry pe;
2280 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2281 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2284 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2285 MVPP2_PE_LAST_FREE_TID);
2289 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2290 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2293 /* Finished: go to flowid generation */
2294 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2295 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2296 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2297 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2298 sizeof(struct ipv6hdr) - 6,
2299 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2301 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2302 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2303 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2304 /* Unmask all ports */
2305 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2308 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2309 mvpp2_prs_hw_write(priv, &pe);
2314 /* IPv6 L3 multicast entry */
2315 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2317 struct mvpp2_prs_entry pe;
2320 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2323 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2324 MVPP2_PE_LAST_FREE_TID);
2328 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2329 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2332 /* Finished: go to flowid generation */
2333 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2334 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2335 MVPP2_PRS_RI_L3_ADDR_MASK);
2336 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2337 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2338 /* Shift back to IPv6 NH */
2339 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2341 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2342 MVPP2_PRS_IPV6_MC_MASK);
2343 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2344 /* Unmask all ports */
2345 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2347 /* Update shadow table and hw entry */
2348 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2349 mvpp2_prs_hw_write(priv, &pe);
2354 /* Parser per-port initialization */
2355 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2356 int lu_max, int offset)
2361 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2362 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2363 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2364 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2366 /* Set maximum number of loops for packet received from port */
2367 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2368 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2369 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2370 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2372 /* Set initial offset for packet header extraction for the first
2375 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2376 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2377 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2378 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2381 /* Default flow entries initialization for all ports */
2382 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2384 struct mvpp2_prs_entry pe;
2387 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2388 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2389 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2390 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2392 /* Mask all ports */
2393 mvpp2_prs_tcam_port_map_set(&pe, 0);
2396 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2397 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2399 /* Update shadow table and hw entry */
2400 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2401 mvpp2_prs_hw_write(priv, &pe);
2405 /* Set default entry for Marvell Header field */
2406 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2408 struct mvpp2_prs_entry pe;
2410 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2412 pe.index = MVPP2_PE_MH_DEFAULT;
2413 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2414 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2415 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2416 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2418 /* Unmask all ports */
2419 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2421 /* Update shadow table and hw entry */
2422 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2423 mvpp2_prs_hw_write(priv, &pe);
2426 /* Set default entires (place holder) for promiscuous, non-promiscuous and
2427 * multicast MAC addresses
2429 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2431 struct mvpp2_prs_entry pe;
2433 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2435 /* Non-promiscuous mode for all ports - DROP unknown packets */
2436 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2437 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2439 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2440 MVPP2_PRS_RI_DROP_MASK);
2441 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2442 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2444 /* Unmask all ports */
2445 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2447 /* Update shadow table and hw entry */
2448 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2449 mvpp2_prs_hw_write(priv, &pe);
2451 /* place holders only - no ports */
2452 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2453 mvpp2_prs_mac_promisc_set(priv, 0, false);
2454 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2455 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2458 /* Set default entries for various types of dsa packets */
2459 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2461 struct mvpp2_prs_entry pe;
2463 /* None tagged EDSA entry - place holder */
2464 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2467 /* Tagged EDSA entry - place holder */
2468 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2470 /* None tagged DSA entry - place holder */
2471 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2474 /* Tagged DSA entry - place holder */
2475 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2477 /* None tagged EDSA ethertype entry - place holder*/
2478 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2479 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2481 /* Tagged EDSA ethertype entry - place holder*/
2482 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2483 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2485 /* None tagged DSA ethertype entry */
2486 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2487 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2489 /* Tagged DSA ethertype entry */
2490 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2491 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2493 /* Set default entry, in case DSA or EDSA tag not found */
2494 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2495 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2496 pe.index = MVPP2_PE_DSA_DEFAULT;
2497 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2500 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2501 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2503 /* Clear all sram ai bits for next iteration */
2504 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2506 /* Unmask all ports */
2507 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2509 mvpp2_prs_hw_write(priv, &pe);
2512 /* Match basic ethertypes */
2513 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2515 struct mvpp2_prs_entry pe;
2518 /* Ethertype: PPPoE */
2519 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2520 MVPP2_PE_LAST_FREE_TID);
2524 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2525 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2528 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2530 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2531 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2532 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2533 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2534 MVPP2_PRS_RI_PPPOE_MASK);
2536 /* Update shadow table and hw entry */
2537 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2538 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2539 priv->prs_shadow[pe.index].finish = false;
2540 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2541 MVPP2_PRS_RI_PPPOE_MASK);
2542 mvpp2_prs_hw_write(priv, &pe);
2544 /* Ethertype: ARP */
2545 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2546 MVPP2_PE_LAST_FREE_TID);
2550 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2551 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2554 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2556 /* Generate flow in the next iteration*/
2557 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2558 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2559 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2560 MVPP2_PRS_RI_L3_PROTO_MASK);
2562 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2564 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2566 /* Update shadow table and hw entry */
2567 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2568 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2569 priv->prs_shadow[pe.index].finish = true;
2570 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2571 MVPP2_PRS_RI_L3_PROTO_MASK);
2572 mvpp2_prs_hw_write(priv, &pe);
2574 /* Ethertype: LBTD */
2575 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2576 MVPP2_PE_LAST_FREE_TID);
2580 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2581 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2584 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2586 /* Generate flow in the next iteration*/
2587 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2588 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2589 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2590 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2591 MVPP2_PRS_RI_CPU_CODE_MASK |
2592 MVPP2_PRS_RI_UDF3_MASK);
2594 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2596 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2598 /* Update shadow table and hw entry */
2599 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2600 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2601 priv->prs_shadow[pe.index].finish = true;
2602 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2603 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2604 MVPP2_PRS_RI_CPU_CODE_MASK |
2605 MVPP2_PRS_RI_UDF3_MASK);
2606 mvpp2_prs_hw_write(priv, &pe);
2608 /* Ethertype: IPv4 without options */
2609 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2610 MVPP2_PE_LAST_FREE_TID);
2614 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2615 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2618 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2619 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2620 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2621 MVPP2_PRS_IPV4_HEAD_MASK |
2622 MVPP2_PRS_IPV4_IHL_MASK);
2624 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2625 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2626 MVPP2_PRS_RI_L3_PROTO_MASK);
2627 /* Skip eth_type + 4 bytes of IP header */
2628 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2629 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2631 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2633 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2635 /* Update shadow table and hw entry */
2636 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2637 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2638 priv->prs_shadow[pe.index].finish = false;
2639 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2640 MVPP2_PRS_RI_L3_PROTO_MASK);
2641 mvpp2_prs_hw_write(priv, &pe);
2643 /* Ethertype: IPv4 with options */
2644 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2645 MVPP2_PE_LAST_FREE_TID);
2651 /* Clear tcam data before updating */
2652 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2653 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2655 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2656 MVPP2_PRS_IPV4_HEAD,
2657 MVPP2_PRS_IPV4_HEAD_MASK);
2659 /* Clear ri before updating */
2660 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2661 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2662 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2663 MVPP2_PRS_RI_L3_PROTO_MASK);
2665 /* Update shadow table and hw entry */
2666 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2667 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2668 priv->prs_shadow[pe.index].finish = false;
2669 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2670 MVPP2_PRS_RI_L3_PROTO_MASK);
2671 mvpp2_prs_hw_write(priv, &pe);
2673 /* Ethertype: IPv6 without options */
2674 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2675 MVPP2_PE_LAST_FREE_TID);
2679 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2680 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2683 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2685 /* Skip DIP of IPV6 header */
2686 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2687 MVPP2_MAX_L3_ADDR_SIZE,
2688 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2689 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2690 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2691 MVPP2_PRS_RI_L3_PROTO_MASK);
2693 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2695 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2697 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2698 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2699 priv->prs_shadow[pe.index].finish = false;
2700 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2701 MVPP2_PRS_RI_L3_PROTO_MASK);
2702 mvpp2_prs_hw_write(priv, &pe);
2704 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2705 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2706 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2707 pe.index = MVPP2_PE_ETH_TYPE_UN;
2709 /* Unmask all ports */
2710 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2712 /* Generate flow in the next iteration*/
2713 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2714 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2715 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2716 MVPP2_PRS_RI_L3_PROTO_MASK);
2717 /* Set L3 offset even it's unknown L3 */
2718 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2720 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2722 /* Update shadow table and hw entry */
2723 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2724 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2725 priv->prs_shadow[pe.index].finish = true;
2726 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2727 MVPP2_PRS_RI_L3_PROTO_MASK);
2728 mvpp2_prs_hw_write(priv, &pe);
2733 /* Configure vlan entries and detect up to 2 successive VLAN tags.
2740 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2742 struct mvpp2_prs_entry pe;
2745 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2746 MVPP2_PRS_DBL_VLANS_MAX,
2748 if (!priv->prs_double_vlans)
2751 /* Double VLAN: 0x8100, 0x88A8 */
2752 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2753 MVPP2_PRS_PORT_MASK);
2757 /* Double VLAN: 0x8100, 0x8100 */
2758 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2759 MVPP2_PRS_PORT_MASK);
2763 /* Single VLAN: 0x88a8 */
2764 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2765 MVPP2_PRS_PORT_MASK);
2769 /* Single VLAN: 0x8100 */
2770 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2771 MVPP2_PRS_PORT_MASK);
2775 /* Set default double vlan entry */
2776 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2777 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2778 pe.index = MVPP2_PE_VLAN_DBL;
2780 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2781 /* Clear ai for next iterations */
2782 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2783 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2784 MVPP2_PRS_RI_VLAN_MASK);
2786 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2787 MVPP2_PRS_DBL_VLAN_AI_BIT);
2788 /* Unmask all ports */
2789 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2791 /* Update shadow table and hw entry */
2792 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2793 mvpp2_prs_hw_write(priv, &pe);
2795 /* Set default vlan none entry */
2796 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2797 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2798 pe.index = MVPP2_PE_VLAN_NONE;
2800 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2801 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2802 MVPP2_PRS_RI_VLAN_MASK);
2804 /* Unmask all ports */
2805 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2807 /* Update shadow table and hw entry */
2808 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2809 mvpp2_prs_hw_write(priv, &pe);
2814 /* Set entries for PPPoE ethertype */
2815 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2817 struct mvpp2_prs_entry pe;
2820 /* IPv4 over PPPoE with options */
2821 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2822 MVPP2_PE_LAST_FREE_TID);
2826 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2827 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2830 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2832 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2833 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2834 MVPP2_PRS_RI_L3_PROTO_MASK);
2835 /* Skip eth_type + 4 bytes of IP header */
2836 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2837 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2839 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2841 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2843 /* Update shadow table and hw entry */
2844 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2845 mvpp2_prs_hw_write(priv, &pe);
2847 /* IPv4 over PPPoE without options */
2848 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2849 MVPP2_PE_LAST_FREE_TID);
2855 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2856 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2857 MVPP2_PRS_IPV4_HEAD_MASK |
2858 MVPP2_PRS_IPV4_IHL_MASK);
2860 /* Clear ri before updating */
2861 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2862 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2863 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2864 MVPP2_PRS_RI_L3_PROTO_MASK);
2866 /* Update shadow table and hw entry */
2867 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2868 mvpp2_prs_hw_write(priv, &pe);
2870 /* IPv6 over PPPoE */
2871 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2872 MVPP2_PE_LAST_FREE_TID);
2876 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2877 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2880 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2884 MVPP2_PRS_RI_L3_PROTO_MASK);
2885 /* Skip eth_type + 4 bytes of IPv6 header */
2886 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2887 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2889 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2891 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2893 /* Update shadow table and hw entry */
2894 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2895 mvpp2_prs_hw_write(priv, &pe);
2897 /* Non-IP over PPPoE */
2898 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2899 MVPP2_PE_LAST_FREE_TID);
2903 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2904 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2907 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2908 MVPP2_PRS_RI_L3_PROTO_MASK);
2910 /* Finished: go to flowid generation */
2911 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2912 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2913 /* Set L3 offset even if it's unknown L3 */
2914 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2916 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2918 /* Update shadow table and hw entry */
2919 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2920 mvpp2_prs_hw_write(priv, &pe);
2925 /* Initialize entries for IPv4 */
2926 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2928 struct mvpp2_prs_entry pe;
2931 /* Set entries for TCP, UDP and IGMP over IPv4 */
2932 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2933 MVPP2_PRS_RI_L4_PROTO_MASK);
2937 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2938 MVPP2_PRS_RI_L4_PROTO_MASK);
2942 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2943 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2944 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2945 MVPP2_PRS_RI_CPU_CODE_MASK |
2946 MVPP2_PRS_RI_UDF3_MASK);
2950 /* IPv4 Broadcast */
2951 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2955 /* IPv4 Multicast */
2956 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2960 /* Default IPv4 entry for unknown protocols */
2961 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2962 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2963 pe.index = MVPP2_PE_IP4_PROTO_UN;
2965 /* Set next lu to IPv4 */
2966 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2967 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2969 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2970 sizeof(struct iphdr) - 4,
2971 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2972 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2973 MVPP2_PRS_IPV4_DIP_AI_BIT);
2974 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2975 MVPP2_PRS_RI_L4_PROTO_MASK);
2977 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2978 /* Unmask all ports */
2979 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2981 /* Update shadow table and hw entry */
2982 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2983 mvpp2_prs_hw_write(priv, &pe);
2985 /* Default IPv4 entry for unicast address */
2986 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2987 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2988 pe.index = MVPP2_PE_IP4_ADDR_UN;
2990 /* Finished: go to flowid generation */
2991 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2992 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2993 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2994 MVPP2_PRS_RI_L3_ADDR_MASK);
2996 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2997 MVPP2_PRS_IPV4_DIP_AI_BIT);
2998 /* Unmask all ports */
2999 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3001 /* Update shadow table and hw entry */
3002 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3003 mvpp2_prs_hw_write(priv, &pe);
3008 /* Initialize entries for IPv6 */
3009 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3011 struct mvpp2_prs_entry pe;
3014 /* Set entries for TCP, UDP and ICMP over IPv6 */
3015 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3016 MVPP2_PRS_RI_L4_TCP,
3017 MVPP2_PRS_RI_L4_PROTO_MASK);
3021 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3022 MVPP2_PRS_RI_L4_UDP,
3023 MVPP2_PRS_RI_L4_PROTO_MASK);
3027 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3028 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3029 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3030 MVPP2_PRS_RI_CPU_CODE_MASK |
3031 MVPP2_PRS_RI_UDF3_MASK);
3035 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3036 /* Result Info: UDF7=1, DS lite */
3037 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3038 MVPP2_PRS_RI_UDF7_IP6_LITE,
3039 MVPP2_PRS_RI_UDF7_MASK);
3043 /* IPv6 multicast */
3044 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3048 /* Entry for checking hop limit */
3049 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3050 MVPP2_PE_LAST_FREE_TID);
3054 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3055 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3058 /* Finished: go to flowid generation */
3059 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3060 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3061 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3062 MVPP2_PRS_RI_DROP_MASK,
3063 MVPP2_PRS_RI_L3_PROTO_MASK |
3064 MVPP2_PRS_RI_DROP_MASK);
3066 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3067 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3068 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3070 /* Update shadow table and hw entry */
3071 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3072 mvpp2_prs_hw_write(priv, &pe);
3074 /* Default IPv6 entry for unknown protocols */
3075 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3076 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3077 pe.index = MVPP2_PE_IP6_PROTO_UN;
3079 /* Finished: go to flowid generation */
3080 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3081 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3082 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3083 MVPP2_PRS_RI_L4_PROTO_MASK);
3084 /* Set L4 offset relatively to our current place */
3085 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3086 sizeof(struct ipv6hdr) - 4,
3087 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3089 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3090 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3091 /* Unmask all ports */
3092 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3094 /* Update shadow table and hw entry */
3095 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3096 mvpp2_prs_hw_write(priv, &pe);
3098 /* Default IPv6 entry for unknown ext protocols */
3099 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3100 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3101 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3103 /* Finished: go to flowid generation */
3104 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3105 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3106 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3107 MVPP2_PRS_RI_L4_PROTO_MASK);
3109 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3110 MVPP2_PRS_IPV6_EXT_AI_BIT);
3111 /* Unmask all ports */
3112 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3114 /* Update shadow table and hw entry */
3115 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3116 mvpp2_prs_hw_write(priv, &pe);
3118 /* Default IPv6 entry for unicast address */
3119 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3120 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3121 pe.index = MVPP2_PE_IP6_ADDR_UN;
3123 /* Finished: go to IPv6 again */
3124 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3125 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3126 MVPP2_PRS_RI_L3_ADDR_MASK);
3127 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3128 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3129 /* Shift back to IPV6 NH */
3130 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3132 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3133 /* Unmask all ports */
3134 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3136 /* Update shadow table and hw entry */
3137 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3138 mvpp2_prs_hw_write(priv, &pe);
3143 /* Parser default initialization */
3144 static int mvpp2_prs_default_init(struct platform_device *pdev,
3149 /* Enable tcam table */
3150 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3152 /* Clear all tcam and sram entries */
3153 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3154 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3155 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3156 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3158 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3159 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3160 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3163 /* Invalidate all tcam entries */
3164 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3165 mvpp2_prs_hw_inv(priv, index);
3167 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3168 sizeof(struct mvpp2_prs_shadow),
3170 if (!priv->prs_shadow)
3173 /* Always start from lookup = 0 */
3174 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3175 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3176 MVPP2_PRS_PORT_LU_MAX, 0);
3178 mvpp2_prs_def_flow_init(priv);
3180 mvpp2_prs_mh_init(priv);
3182 mvpp2_prs_mac_init(priv);
3184 mvpp2_prs_dsa_init(priv);
3186 err = mvpp2_prs_etype_init(priv);
3190 err = mvpp2_prs_vlan_init(pdev, priv);
3194 err = mvpp2_prs_pppoe_init(priv);
3198 err = mvpp2_prs_ip6_init(priv);
3202 err = mvpp2_prs_ip4_init(priv);
3209 /* Compare MAC DA with tcam entry data */
3210 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3211 const u8 *da, unsigned char *mask)
3213 unsigned char tcam_byte, tcam_mask;
3216 for (index = 0; index < ETH_ALEN; index++) {
3217 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3218 if (tcam_mask != mask[index])
3221 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3228 /* Find tcam entry with matched pair <MAC DA, port> */
3229 static struct mvpp2_prs_entry *
3230 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3231 unsigned char *mask, int udf_type)
3233 struct mvpp2_prs_entry *pe;
3236 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3239 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3241 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3242 for (tid = MVPP2_PE_FIRST_FREE_TID;
3243 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3244 unsigned int entry_pmap;
3246 if (!priv->prs_shadow[tid].valid ||
3247 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3248 (priv->prs_shadow[tid].udf != udf_type))
3252 mvpp2_prs_hw_read(priv, pe);
3253 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3255 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3264 /* Update parser's mac da entry */
3265 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3266 const u8 *da, bool add)
3268 struct mvpp2_prs_entry *pe;
3269 unsigned int pmap, len, ri;
3270 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3273 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3274 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3275 MVPP2_PRS_UDF_MAC_DEF);
3282 /* Create new TCAM entry */
3283 /* Find first range mac entry*/
3284 for (tid = MVPP2_PE_FIRST_FREE_TID;
3285 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3286 if (priv->prs_shadow[tid].valid &&
3287 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3288 (priv->prs_shadow[tid].udf ==
3289 MVPP2_PRS_UDF_MAC_RANGE))
3292 /* Go through the all entries from first to last */
3293 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3298 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3301 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3304 /* Mask all ports */
3305 mvpp2_prs_tcam_port_map_set(pe, 0);
3308 /* Update port mask */
3309 mvpp2_prs_tcam_port_set(pe, port, add);
3311 /* Invalidate the entry if no ports are left enabled */
3312 pmap = mvpp2_prs_tcam_port_map_get(pe);
3318 mvpp2_prs_hw_inv(priv, pe->index);
3319 priv->prs_shadow[pe->index].valid = false;
3324 /* Continue - set next lookup */
3325 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3327 /* Set match on DA */
3330 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3332 /* Set result info bits */
3333 if (is_broadcast_ether_addr(da))
3334 ri = MVPP2_PRS_RI_L2_BCAST;
3335 else if (is_multicast_ether_addr(da))
3336 ri = MVPP2_PRS_RI_L2_MCAST;
3338 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3340 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3341 MVPP2_PRS_RI_MAC_ME_MASK);
3342 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3343 MVPP2_PRS_RI_MAC_ME_MASK);
3345 /* Shift to ethertype */
3346 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3347 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3349 /* Update shadow table and hw entry */
3350 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3351 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3352 mvpp2_prs_hw_write(priv, pe);
3359 static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3361 struct mvpp2_port *port = netdev_priv(dev);
3364 /* Remove old parser entry */
3365 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3370 /* Add new parser entry */
3371 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3375 /* Set addr in the device */
3376 ether_addr_copy(dev->dev_addr, da);
3381 /* Delete all port's multicast simple (not range) entries */
3382 static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3384 struct mvpp2_prs_entry pe;
3387 for (tid = MVPP2_PE_FIRST_FREE_TID;
3388 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3389 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3391 if (!priv->prs_shadow[tid].valid ||
3392 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3393 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3396 /* Only simple mac entries */
3398 mvpp2_prs_hw_read(priv, &pe);
3400 /* Read mac addr from entry */
3401 for (index = 0; index < ETH_ALEN; index++)
3402 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3405 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3406 /* Delete this entry */
3407 mvpp2_prs_mac_da_accept(priv, port, da, false);
3411 static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3414 case MVPP2_TAG_TYPE_EDSA:
3415 /* Add port to EDSA entries */
3416 mvpp2_prs_dsa_tag_set(priv, port, true,
3417 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3418 mvpp2_prs_dsa_tag_set(priv, port, true,
3419 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3420 /* Remove port from DSA entries */
3421 mvpp2_prs_dsa_tag_set(priv, port, false,
3422 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3423 mvpp2_prs_dsa_tag_set(priv, port, false,
3424 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3427 case MVPP2_TAG_TYPE_DSA:
3428 /* Add port to DSA entries */
3429 mvpp2_prs_dsa_tag_set(priv, port, true,
3430 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3431 mvpp2_prs_dsa_tag_set(priv, port, true,
3432 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3433 /* Remove port from EDSA entries */
3434 mvpp2_prs_dsa_tag_set(priv, port, false,
3435 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3436 mvpp2_prs_dsa_tag_set(priv, port, false,
3437 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3440 case MVPP2_TAG_TYPE_MH:
3441 case MVPP2_TAG_TYPE_NONE:
3442 /* Remove port form EDSA and DSA entries */
3443 mvpp2_prs_dsa_tag_set(priv, port, false,
3444 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3445 mvpp2_prs_dsa_tag_set(priv, port, false,
3446 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3447 mvpp2_prs_dsa_tag_set(priv, port, false,
3448 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3449 mvpp2_prs_dsa_tag_set(priv, port, false,
3450 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3454 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3461 /* Set prs flow for the port */
3462 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3464 struct mvpp2_prs_entry *pe;
3467 pe = mvpp2_prs_flow_find(port->priv, port->id);
3469 /* Such entry not exist */
3471 /* Go through the all entires from last to first */
3472 tid = mvpp2_prs_tcam_first_free(port->priv,
3473 MVPP2_PE_LAST_FREE_TID,
3474 MVPP2_PE_FIRST_FREE_TID);
3478 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3482 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3486 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3487 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3489 /* Update shadow table */
3490 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3493 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3494 mvpp2_prs_hw_write(port->priv, pe);
3500 /* Classifier configuration routines */
3502 /* Update classification flow table registers */
3503 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3504 struct mvpp2_cls_flow_entry *fe)
3506 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3507 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3508 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3509 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3512 /* Update classification lookup table register */
3513 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3514 struct mvpp2_cls_lookup_entry *le)
3518 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3519 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3520 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3523 /* Classifier default initialization */
3524 static void mvpp2_cls_init(struct mvpp2 *priv)
3526 struct mvpp2_cls_lookup_entry le;
3527 struct mvpp2_cls_flow_entry fe;
3530 /* Enable classifier */
3531 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3533 /* Clear classifier flow table */
3534 memset(&fe.data, 0, sizeof(fe.data));
3535 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3537 mvpp2_cls_flow_write(priv, &fe);
3540 /* Clear classifier lookup table */
3542 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3545 mvpp2_cls_lookup_write(priv, &le);
3548 mvpp2_cls_lookup_write(priv, &le);
3552 static void mvpp2_cls_port_config(struct mvpp2_port *port)
3554 struct mvpp2_cls_lookup_entry le;
3557 /* Set way for the port */
3558 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3559 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3560 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3562 /* Pick the entry to be accessed in lookup ID decoding table
3563 * according to the way and lkpid.
3565 le.lkpid = port->id;
3569 /* Set initial CPU queue for receiving packets */
3570 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3571 le.data |= port->first_rxq;
3573 /* Disable classification engines */
3574 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3576 /* Update lookup ID table entry */
3577 mvpp2_cls_lookup_write(port->priv, &le);
3580 /* Set CPU queue number for oversize packets */
3581 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3585 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3586 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3588 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3589 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3591 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3592 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3593 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3596 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3598 if (likely(pool->frag_size <= PAGE_SIZE))
3599 return netdev_alloc_frag(pool->frag_size);
3601 return kmalloc(pool->frag_size, GFP_ATOMIC);
3604 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3606 if (likely(pool->frag_size <= PAGE_SIZE))
3607 skb_free_frag(data);
3612 /* Buffer Manager configuration routines */
3615 static int mvpp2_bm_pool_create(struct platform_device *pdev,
3617 struct mvpp2_bm_pool *bm_pool, int size)
3621 /* Number of buffer pointers must be a multiple of 16, as per
3622 * hardware constraints
3624 if (!IS_ALIGNED(size, 16))
3627 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3628 * bytes per buffer pointer
3630 if (priv->hw_version == MVPP21)
3631 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3633 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3635 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
3638 if (!bm_pool->virt_addr)
3641 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3642 MVPP2_BM_POOL_PTR_ALIGN)) {
3643 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3644 bm_pool->virt_addr, bm_pool->dma_addr);
3645 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3646 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3650 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3651 lower_32_bits(bm_pool->dma_addr));
3652 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3654 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3655 val |= MVPP2_BM_START_MASK;
3656 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3658 bm_pool->type = MVPP2_BM_FREE;
3659 bm_pool->size = size;
3660 bm_pool->pkt_size = 0;
3661 bm_pool->buf_num = 0;
3666 /* Set pool buffer size */
3667 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3668 struct mvpp2_bm_pool *bm_pool,
3673 bm_pool->buf_size = buf_size;
3675 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3676 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3679 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3680 struct mvpp2_bm_pool *bm_pool,
3681 dma_addr_t *dma_addr,
3682 phys_addr_t *phys_addr)
3684 int cpu = smp_processor_id();
3686 *dma_addr = mvpp2_percpu_read(priv, cpu,
3687 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3688 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
3690 if (priv->hw_version == MVPP22) {
3692 u32 dma_addr_highbits, phys_addr_highbits;
3694 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
3695 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3696 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3697 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3699 if (sizeof(dma_addr_t) == 8)
3700 *dma_addr |= (u64)dma_addr_highbits << 32;
3702 if (sizeof(phys_addr_t) == 8)
3703 *phys_addr |= (u64)phys_addr_highbits << 32;
3707 /* Free all buffers from the pool */
3708 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3709 struct mvpp2_bm_pool *bm_pool)
3713 for (i = 0; i < bm_pool->buf_num; i++) {
3714 dma_addr_t buf_dma_addr;
3715 phys_addr_t buf_phys_addr;
3718 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3719 &buf_dma_addr, &buf_phys_addr);
3721 dma_unmap_single(dev, buf_dma_addr,
3722 bm_pool->buf_size, DMA_FROM_DEVICE);
3724 data = (void *)phys_to_virt(buf_phys_addr);
3728 mvpp2_frag_free(bm_pool, data);
3731 /* Update BM driver with number of buffers removed from pool */
3732 bm_pool->buf_num -= i;
3736 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3738 struct mvpp2_bm_pool *bm_pool)
3742 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3743 if (bm_pool->buf_num) {
3744 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3748 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3749 val |= MVPP2_BM_STOP_MASK;
3750 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3752 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3758 static int mvpp2_bm_pools_init(struct platform_device *pdev,
3762 struct mvpp2_bm_pool *bm_pool;
3764 /* Create all pools with maximum size */
3765 size = MVPP2_BM_POOL_SIZE_MAX;
3766 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3767 bm_pool = &priv->bm_pools[i];
3769 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3771 goto err_unroll_pools;
3772 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3777 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3778 for (i = i - 1; i >= 0; i--)
3779 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3783 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3787 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3788 /* Mask BM all interrupts */
3789 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3790 /* Clear BM cause register */
3791 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3794 /* Allocate and initialize BM pools */
3795 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3796 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3797 if (!priv->bm_pools)
3800 err = mvpp2_bm_pools_init(pdev, priv);
3806 /* Attach long pool to rxq */
3807 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3808 int lrxq, int long_pool)
3813 /* Get queue physical ID */
3814 prxq = port->rxqs[lrxq]->id;
3816 if (port->priv->hw_version == MVPP21)
3817 mask = MVPP21_RXQ_POOL_LONG_MASK;
3819 mask = MVPP22_RXQ_POOL_LONG_MASK;
3821 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3823 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
3824 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3827 /* Attach short pool to rxq */
3828 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3829 int lrxq, int short_pool)
3834 /* Get queue physical ID */
3835 prxq = port->rxqs[lrxq]->id;
3837 if (port->priv->hw_version == MVPP21)
3838 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3840 mask = MVPP22_RXQ_POOL_SHORT_MASK;
3842 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3844 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
3845 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3848 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3849 struct mvpp2_bm_pool *bm_pool,
3850 dma_addr_t *buf_dma_addr,
3851 phys_addr_t *buf_phys_addr,
3854 dma_addr_t dma_addr;
3857 data = mvpp2_frag_alloc(bm_pool);
3861 dma_addr = dma_map_single(port->dev->dev.parent, data,
3862 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3864 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3865 mvpp2_frag_free(bm_pool, data);
3868 *buf_dma_addr = dma_addr;
3869 *buf_phys_addr = virt_to_phys(data);
3874 /* Set pool number in a BM cookie */
3875 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3879 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3880 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3885 /* Get pool number from a BM cookie */
3886 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
3888 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3891 /* Release buffer to BM */
3892 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3893 dma_addr_t buf_dma_addr,
3894 phys_addr_t buf_phys_addr)
3896 int cpu = smp_processor_id();
3898 if (port->priv->hw_version == MVPP22) {
3901 if (sizeof(dma_addr_t) == 8)
3902 val |= upper_32_bits(buf_dma_addr) &
3903 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3905 if (sizeof(phys_addr_t) == 8)
3906 val |= (upper_32_bits(buf_phys_addr)
3907 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3908 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3910 mvpp2_percpu_write(port->priv, cpu,
3911 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
3914 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3915 * returned in the "cookie" field of the RX
3916 * descriptor. Instead of storing the virtual address, we
3917 * store the physical address
3919 mvpp2_percpu_write(port->priv, cpu,
3920 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3921 mvpp2_percpu_write(port->priv, cpu,
3922 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
3925 /* Refill BM pool */
3926 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3927 dma_addr_t dma_addr,
3928 phys_addr_t phys_addr)
3930 int pool = mvpp2_bm_cookie_pool_get(bm);
3932 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3935 /* Allocate buffers for the pool */
3936 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3937 struct mvpp2_bm_pool *bm_pool, int buf_num)
3939 int i, buf_size, total_size;
3940 dma_addr_t dma_addr;
3941 phys_addr_t phys_addr;
3944 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3945 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3948 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3949 netdev_err(port->dev,
3950 "cannot allocate %d buffers for pool %d\n",
3951 buf_num, bm_pool->id);
3955 for (i = 0; i < buf_num; i++) {
3956 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3957 &phys_addr, GFP_KERNEL);
3961 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
3965 /* Update BM driver with number of buffers added to pool */
3966 bm_pool->buf_num += i;
3968 netdev_dbg(port->dev,
3969 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3970 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3971 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3973 netdev_dbg(port->dev,
3974 "%s pool %d: %d of %d buffers added\n",
3975 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3976 bm_pool->id, i, buf_num);
3980 /* Notify the driver that BM pool is being used as specific type and return the
3981 * pool pointer on success
3983 static struct mvpp2_bm_pool *
3984 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3987 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3990 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3991 netdev_err(port->dev, "mixing pool types is forbidden\n");
3995 if (new_pool->type == MVPP2_BM_FREE)
3996 new_pool->type = type;
3998 /* Allocate buffers in case BM pool is used as long pool, but packet
3999 * size doesn't match MTU or BM pool hasn't being used yet
4001 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4002 (new_pool->pkt_size == 0)) {
4005 /* Set default buffer number or free all the buffers in case
4006 * the pool is not empty
4008 pkts_num = new_pool->buf_num;
4010 pkts_num = type == MVPP2_BM_SWF_LONG ?
4011 MVPP2_BM_LONG_BUF_NUM :
4012 MVPP2_BM_SHORT_BUF_NUM;
4014 mvpp2_bm_bufs_free(port->dev->dev.parent,
4015 port->priv, new_pool);
4017 new_pool->pkt_size = pkt_size;
4018 new_pool->frag_size =
4019 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4020 MVPP2_SKB_SHINFO_SIZE;
4022 /* Allocate buffers for this pool */
4023 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4024 if (num != pkts_num) {
4025 WARN(1, "pool %d: %d of %d allocated\n",
4026 new_pool->id, num, pkts_num);
4031 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4032 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4037 /* Initialize pools for swf */
4038 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4042 if (!port->pool_long) {
4044 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4047 if (!port->pool_long)
4050 port->pool_long->port_map |= (1 << port->id);
4052 for (rxq = 0; rxq < rxq_number; rxq++)
4053 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4056 if (!port->pool_short) {
4058 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4060 MVPP2_BM_SHORT_PKT_SIZE);
4061 if (!port->pool_short)
4064 port->pool_short->port_map |= (1 << port->id);
4066 for (rxq = 0; rxq < rxq_number; rxq++)
4067 mvpp2_rxq_short_pool_set(port, rxq,
4068 port->pool_short->id);
4074 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4076 struct mvpp2_port *port = netdev_priv(dev);
4077 struct mvpp2_bm_pool *port_pool = port->pool_long;
4078 int num, pkts_num = port_pool->buf_num;
4079 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4081 /* Update BM pool with new buffer size */
4082 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
4083 if (port_pool->buf_num) {
4084 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4088 port_pool->pkt_size = pkt_size;
4089 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4090 MVPP2_SKB_SHINFO_SIZE;
4091 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4092 if (num != pkts_num) {
4093 WARN(1, "pool %d: %d of %d allocated\n",
4094 port_pool->id, num, pkts_num);
4098 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4099 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4101 netdev_update_features(dev);
4105 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4107 int cpu, cpu_mask = 0;
4109 for_each_present_cpu(cpu)
4110 cpu_mask |= 1 << cpu;
4111 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4112 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
4115 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4117 int cpu, cpu_mask = 0;
4119 for_each_present_cpu(cpu)
4120 cpu_mask |= 1 << cpu;
4121 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4122 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
4125 /* Mask the current CPU's Rx/Tx interrupts */
4126 static void mvpp2_interrupts_mask(void *arg)
4128 struct mvpp2_port *port = arg;
4130 mvpp2_percpu_write(port->priv, smp_processor_id(),
4131 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
4134 /* Unmask the current CPU's Rx/Tx interrupts */
4135 static void mvpp2_interrupts_unmask(void *arg)
4137 struct mvpp2_port *port = arg;
4139 mvpp2_percpu_write(port->priv, smp_processor_id(),
4140 MVPP2_ISR_RX_TX_MASK_REG(port->id),
4141 (MVPP2_CAUSE_MISC_SUM_MASK |
4142 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
4145 /* Port configuration routines */
4147 static void mvpp22_port_mii_set(struct mvpp2_port *port)
4153 /* Only GOP port 0 has an XLG MAC */
4154 if (port->gop_id == 0) {
4155 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4156 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4157 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4158 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4161 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4162 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4163 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4165 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4166 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4167 val |= MVPP22_CTRL4_SYNC_BYPASS;
4168 val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4169 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4172 static void mvpp2_port_mii_set(struct mvpp2_port *port)
4176 if (port->priv->hw_version == MVPP22)
4177 mvpp22_port_mii_set(port);
4179 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4181 switch (port->phy_interface) {
4182 case PHY_INTERFACE_MODE_SGMII:
4183 val |= MVPP2_GMAC_INBAND_AN_MASK;
4185 case PHY_INTERFACE_MODE_RGMII:
4186 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4188 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4191 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4194 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4198 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4199 val |= MVPP2_GMAC_FC_ADV_EN;
4200 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4203 static void mvpp2_port_enable(struct mvpp2_port *port)
4207 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4208 val |= MVPP2_GMAC_PORT_EN_MASK;
4209 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4210 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4213 static void mvpp2_port_disable(struct mvpp2_port *port)
4217 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4218 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4219 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4222 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4223 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4227 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4228 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4229 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4232 /* Configure loopback port */
4233 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4237 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4239 if (port->speed == 1000)
4240 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4242 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4244 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4245 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4247 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4249 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4252 static void mvpp2_port_reset(struct mvpp2_port *port)
4256 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4257 ~MVPP2_GMAC_PORT_RESET_MASK;
4258 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4260 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4261 MVPP2_GMAC_PORT_RESET_MASK)
4265 /* Change maximum receive size of the port */
4266 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4270 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4271 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4272 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4273 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4274 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4277 /* Set defaults to the MVPP2 port */
4278 static void mvpp2_defaults_set(struct mvpp2_port *port)
4280 int tx_port_num, val, queue, ptxq, lrxq;
4282 if (port->priv->hw_version == MVPP21) {
4283 /* Configure port to loopback if needed */
4284 if (port->flags & MVPP2_F_LOOPBACK)
4285 mvpp2_port_loopback_set(port);
4287 /* Update TX FIFO MIN Threshold */
4288 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4289 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4290 /* Min. TX threshold must be less than minimal packet length */
4291 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4292 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4295 /* Disable Legacy WRR, Disable EJP, Release from reset */
4296 tx_port_num = mvpp2_egress_port(port);
4297 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4299 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4301 /* Close bandwidth for all queues */
4302 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4303 ptxq = mvpp2_txq_phys(port->id, queue);
4304 mvpp2_write(port->priv,
4305 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4308 /* Set refill period to 1 usec, refill tokens
4309 * and bucket size to maximum
4311 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4312 port->priv->tclk / USEC_PER_SEC);
4313 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4314 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4315 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4316 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4317 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4318 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4319 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4321 /* Set MaximumLowLatencyPacketSize value to 256 */
4322 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4323 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4324 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4326 /* Enable Rx cache snoop */
4327 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4328 queue = port->rxqs[lrxq]->id;
4329 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4330 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4331 MVPP2_SNOOP_BUF_HDR_MASK;
4332 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4335 /* At default, mask all interrupts to all present cpus */
4336 mvpp2_interrupts_disable(port);
4339 /* Enable/disable receiving packets */
4340 static void mvpp2_ingress_enable(struct mvpp2_port *port)
4345 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4346 queue = port->rxqs[lrxq]->id;
4347 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4348 val &= ~MVPP2_RXQ_DISABLE_MASK;
4349 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4353 static void mvpp2_ingress_disable(struct mvpp2_port *port)
4358 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4359 queue = port->rxqs[lrxq]->id;
4360 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4361 val |= MVPP2_RXQ_DISABLE_MASK;
4362 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4366 /* Enable transmit via physical egress queue
4367 * - HW starts take descriptors from DRAM
4369 static void mvpp2_egress_enable(struct mvpp2_port *port)
4373 int tx_port_num = mvpp2_egress_port(port);
4375 /* Enable all initialized TXs. */
4377 for (queue = 0; queue < txq_number; queue++) {
4378 struct mvpp2_tx_queue *txq = port->txqs[queue];
4380 if (txq->descs != NULL)
4381 qmap |= (1 << queue);
4384 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4385 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4388 /* Disable transmit via physical egress queue
4389 * - HW doesn't take descriptors from DRAM
4391 static void mvpp2_egress_disable(struct mvpp2_port *port)
4395 int tx_port_num = mvpp2_egress_port(port);
4397 /* Issue stop command for active channels only */
4398 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4399 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4400 MVPP2_TXP_SCHED_ENQ_MASK;
4402 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4403 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4405 /* Wait for all Tx activity to terminate. */
4408 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4409 netdev_warn(port->dev,
4410 "Tx stop timed out, status=0x%08x\n",
4417 /* Check port TX Command register that all
4418 * Tx queues are stopped
4420 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4421 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4424 /* Rx descriptors helper methods */
4426 /* Get number of Rx descriptors occupied by received packets */
4428 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4430 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4432 return val & MVPP2_RXQ_OCCUPIED_MASK;
4435 /* Update Rx queue status with the number of occupied and available
4436 * Rx descriptor slots.
4439 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4440 int used_count, int free_count)
4442 /* Decrement the number of used descriptors and increment count
4443 * increment the number of free descriptors.
4445 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4447 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4450 /* Get pointer to next RX descriptor to be processed by SW */
4451 static inline struct mvpp2_rx_desc *
4452 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4454 int rx_desc = rxq->next_desc_to_proc;
4456 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4457 prefetch(rxq->descs + rxq->next_desc_to_proc);
4458 return rxq->descs + rx_desc;
4461 /* Set rx queue offset */
4462 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4463 int prxq, int offset)
4467 /* Convert offset from bytes to units of 32 bytes */
4468 offset = offset >> 5;
4470 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4471 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4474 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4475 MVPP2_RXQ_PACKET_OFFSET_MASK);
4477 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4480 /* Obtain BM cookie information from descriptor */
4481 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
4482 struct mvpp2_rx_desc *rx_desc)
4484 int cpu = smp_processor_id();
4487 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
4488 MVPP2_RXD_BM_POOL_ID_MASK) >>
4489 MVPP2_RXD_BM_POOL_ID_OFFS;
4491 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4492 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4495 /* Tx descriptors helper methods */
4497 /* Get pointer to next Tx descriptor to be processed (send) by HW */
4498 static struct mvpp2_tx_desc *
4499 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4501 int tx_desc = txq->next_desc_to_proc;
4503 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4504 return txq->descs + tx_desc;
4507 /* Update HW with number of aggregated Tx descriptors to be sent */
4508 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4510 /* aggregated access - relevant TXQ number is written in TX desc */
4511 mvpp2_percpu_write(port->priv, smp_processor_id(),
4512 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4516 /* Check if there are enough free descriptors in aggregated txq.
4517 * If not, update the number of occupied descriptors and repeat the check.
4519 static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4520 struct mvpp2_tx_queue *aggr_txq, int num)
4522 if ((aggr_txq->count + num) > aggr_txq->size) {
4523 /* Update number of occupied aggregated Tx descriptors */
4524 int cpu = smp_processor_id();
4525 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4527 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4530 if ((aggr_txq->count + num) > aggr_txq->size)
4536 /* Reserved Tx descriptors allocation request */
4537 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4538 struct mvpp2_tx_queue *txq, int num)
4541 int cpu = smp_processor_id();
4543 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4544 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
4546 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
4548 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4551 /* Check if there are enough reserved descriptors for transmission.
4552 * If not, request chunk of reserved descriptors and check again.
4554 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4555 struct mvpp2_tx_queue *txq,
4556 struct mvpp2_txq_pcpu *txq_pcpu,
4559 int req, cpu, desc_count;
4561 if (txq_pcpu->reserved_num >= num)
4564 /* Not enough descriptors reserved! Update the reserved descriptor
4565 * count and check again.
4569 /* Compute total of used descriptors */
4570 for_each_present_cpu(cpu) {
4571 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4573 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4574 desc_count += txq_pcpu_aux->count;
4575 desc_count += txq_pcpu_aux->reserved_num;
4578 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4582 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4585 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4587 /* OK, the descriptor cound has been updated: check again. */
4588 if (txq_pcpu->reserved_num < num)
4593 /* Release the last allocated Tx descriptor. Useful to handle DMA
4594 * mapping failures in the Tx path.
4596 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4598 if (txq->next_desc_to_proc == 0)
4599 txq->next_desc_to_proc = txq->last_desc - 1;
4601 txq->next_desc_to_proc--;
4604 /* Set Tx descriptors fields relevant for CSUM calculation */
4605 static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4606 int ip_hdr_len, int l4_proto)
4610 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4611 * G_L4_chk, L4_type required only for checksum calculation
4613 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4614 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4615 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4617 if (l3_proto == swab16(ETH_P_IP)) {
4618 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4619 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4621 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4624 if (l4_proto == IPPROTO_TCP) {
4625 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4626 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4627 } else if (l4_proto == IPPROTO_UDP) {
4628 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4629 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4631 command |= MVPP2_TXD_L4_CSUM_NOT;
4637 /* Get number of sent descriptors and decrement counter.
4638 * The number of sent descriptors is returned.
4641 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4642 struct mvpp2_tx_queue *txq)
4646 /* Reading status reg resets transmitted descriptor counter */
4647 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4648 MVPP2_TXQ_SENT_REG(txq->id));
4650 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4651 MVPP2_TRANSMITTED_COUNT_OFFSET;
4654 static void mvpp2_txq_sent_counter_clear(void *arg)
4656 struct mvpp2_port *port = arg;
4659 for (queue = 0; queue < txq_number; queue++) {
4660 int id = port->txqs[queue]->id;
4662 mvpp2_percpu_read(port->priv, smp_processor_id(),
4663 MVPP2_TXQ_SENT_REG(id));
4667 /* Set max sizes for Tx queues */
4668 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4671 int txq, tx_port_num;
4673 mtu = port->pkt_size * 8;
4674 if (mtu > MVPP2_TXP_MTU_MAX)
4675 mtu = MVPP2_TXP_MTU_MAX;
4677 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4680 /* Indirect access to registers */
4681 tx_port_num = mvpp2_egress_port(port);
4682 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4685 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4686 val &= ~MVPP2_TXP_MTU_MAX;
4688 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4690 /* TXP token size and all TXQs token size must be larger that MTU */
4691 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4692 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4695 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4697 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4700 for (txq = 0; txq < txq_number; txq++) {
4701 val = mvpp2_read(port->priv,
4702 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4703 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4707 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4709 mvpp2_write(port->priv,
4710 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4716 /* Set the number of packets that will be received before Rx interrupt
4717 * will be generated by HW.
4719 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
4720 struct mvpp2_rx_queue *rxq)
4722 int cpu = smp_processor_id();
4724 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4725 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
4727 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4728 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4732 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4734 u64 tmp = (u64)clk_hz * usec;
4736 do_div(tmp, USEC_PER_SEC);
4738 return tmp > U32_MAX ? U32_MAX : tmp;
4741 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4743 u64 tmp = (u64)cycles * USEC_PER_SEC;
4745 do_div(tmp, clk_hz);
4747 return tmp > U32_MAX ? U32_MAX : tmp;
4750 /* Set the time delay in usec before Rx interrupt */
4751 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4752 struct mvpp2_rx_queue *rxq)
4754 unsigned long freq = port->priv->tclk;
4755 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4757 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4759 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4761 /* re-evaluate to get actual register value */
4762 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4765 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
4768 /* Free Tx queue skbuffs */
4769 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4770 struct mvpp2_tx_queue *txq,
4771 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4775 for (i = 0; i < num; i++) {
4776 struct mvpp2_txq_pcpu_buf *tx_buf =
4777 txq_pcpu->buffs + txq_pcpu->txq_get_index;
4779 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
4780 tx_buf->size, DMA_TO_DEVICE);
4782 dev_kfree_skb_any(tx_buf->skb);
4784 mvpp2_txq_inc_get(txq_pcpu);
4788 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4791 int queue = fls(cause) - 1;
4793 return port->rxqs[queue];
4796 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4799 int queue = fls(cause) - 1;
4801 return port->txqs[queue];
4804 /* Handle end of transmission */
4805 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4806 struct mvpp2_txq_pcpu *txq_pcpu)
4808 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4811 if (txq_pcpu->cpu != smp_processor_id())
4812 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4814 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4817 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4819 txq_pcpu->count -= tx_done;
4821 if (netif_tx_queue_stopped(nq))
4822 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4823 netif_tx_wake_queue(nq);
4826 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4828 struct mvpp2_tx_queue *txq;
4829 struct mvpp2_txq_pcpu *txq_pcpu;
4830 unsigned int tx_todo = 0;
4833 txq = mvpp2_get_tx_queue(port, cause);
4837 txq_pcpu = this_cpu_ptr(txq->pcpu);
4839 if (txq_pcpu->count) {
4840 mvpp2_txq_done(port, txq, txq_pcpu);
4841 tx_todo += txq_pcpu->count;
4844 cause &= ~(1 << txq->log_id);
4849 /* Rx/Tx queue initialization/cleanup methods */
4851 /* Allocate and initialize descriptors for aggr TXQ */
4852 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4853 struct mvpp2_tx_queue *aggr_txq,
4854 int desc_num, int cpu,
4859 /* Allocate memory for TX descriptors */
4860 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4861 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4862 &aggr_txq->descs_dma, GFP_KERNEL);
4863 if (!aggr_txq->descs)
4866 aggr_txq->last_desc = aggr_txq->size - 1;
4868 /* Aggr TXQ no reset WA */
4869 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4870 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4872 /* Set Tx descriptors queue starting address indirect
4875 if (priv->hw_version == MVPP21)
4876 txq_dma = aggr_txq->descs_dma;
4878 txq_dma = aggr_txq->descs_dma >>
4879 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4881 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4882 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4887 /* Create a specified Rx queue */
4888 static int mvpp2_rxq_init(struct mvpp2_port *port,
4889 struct mvpp2_rx_queue *rxq)
4895 rxq->size = port->rx_ring_size;
4897 /* Allocate memory for RX descriptors */
4898 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4899 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4900 &rxq->descs_dma, GFP_KERNEL);
4904 rxq->last_desc = rxq->size - 1;
4906 /* Zero occupied and non-occupied counters - direct access */
4907 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4909 /* Set Rx descriptors queue starting address - indirect access */
4910 cpu = smp_processor_id();
4911 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4912 if (port->priv->hw_version == MVPP21)
4913 rxq_dma = rxq->descs_dma;
4915 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4916 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4917 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4918 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
4921 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4923 /* Set coalescing pkts and time */
4924 mvpp2_rx_pkts_coal_set(port, rxq);
4925 mvpp2_rx_time_coal_set(port, rxq);
4927 /* Add number of descriptors ready for receiving packets */
4928 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4933 /* Push packets received by the RXQ to BM pool */
4934 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4935 struct mvpp2_rx_queue *rxq)
4939 rx_received = mvpp2_rxq_received(port, rxq->id);
4943 for (i = 0; i < rx_received; i++) {
4944 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4945 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
4947 mvpp2_pool_refill(port, bm,
4948 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4949 mvpp2_rxdesc_cookie_get(port, rx_desc));
4951 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4954 /* Cleanup Rx queue */
4955 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4956 struct mvpp2_rx_queue *rxq)
4960 mvpp2_rxq_drop_pkts(port, rxq);
4963 dma_free_coherent(port->dev->dev.parent,
4964 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4970 rxq->next_desc_to_proc = 0;
4973 /* Clear Rx descriptors queue starting address and size;
4974 * free descriptor number
4976 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4977 cpu = smp_processor_id();
4978 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4979 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
4980 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
4983 /* Create and initialize a Tx queue */
4984 static int mvpp2_txq_init(struct mvpp2_port *port,
4985 struct mvpp2_tx_queue *txq)
4988 int cpu, desc, desc_per_txq, tx_port_num;
4989 struct mvpp2_txq_pcpu *txq_pcpu;
4991 txq->size = port->tx_ring_size;
4993 /* Allocate memory for Tx descriptors */
4994 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4995 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4996 &txq->descs_dma, GFP_KERNEL);
5000 txq->last_desc = txq->size - 1;
5002 /* Set Tx descriptors queue starting address - indirect access */
5003 cpu = smp_processor_id();
5004 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5005 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5007 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5008 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5009 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5010 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5011 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5012 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
5013 val &= ~MVPP2_TXQ_PENDING_MASK;
5014 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
5016 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5017 * for each existing TXQ.
5018 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5019 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5022 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5023 (txq->log_id * desc_per_txq);
5025 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5026 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5027 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
5029 /* WRR / EJP configuration - indirect access */
5030 tx_port_num = mvpp2_egress_port(port);
5031 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5033 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5034 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5035 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5036 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5037 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5039 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5040 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5043 for_each_present_cpu(cpu) {
5044 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5045 txq_pcpu->size = txq->size;
5046 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
5047 sizeof(struct mvpp2_txq_pcpu_buf),
5049 if (!txq_pcpu->buffs)
5052 txq_pcpu->count = 0;
5053 txq_pcpu->reserved_num = 0;
5054 txq_pcpu->txq_put_index = 0;
5055 txq_pcpu->txq_get_index = 0;
5061 for_each_present_cpu(cpu) {
5062 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5063 kfree(txq_pcpu->buffs);
5066 dma_free_coherent(port->dev->dev.parent,
5067 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5068 txq->descs, txq->descs_dma);
5073 /* Free allocated TXQ resources */
5074 static void mvpp2_txq_deinit(struct mvpp2_port *port,
5075 struct mvpp2_tx_queue *txq)
5077 struct mvpp2_txq_pcpu *txq_pcpu;
5080 for_each_present_cpu(cpu) {
5081 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5082 kfree(txq_pcpu->buffs);
5086 dma_free_coherent(port->dev->dev.parent,
5087 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5088 txq->descs, txq->descs_dma);
5092 txq->next_desc_to_proc = 0;
5095 /* Set minimum bandwidth for disabled TXQs */
5096 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5098 /* Set Tx descriptors queue starting address and size */
5099 cpu = smp_processor_id();
5100 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5101 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5102 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
5105 /* Cleanup Tx ports */
5106 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5108 struct mvpp2_txq_pcpu *txq_pcpu;
5109 int delay, pending, cpu;
5112 cpu = smp_processor_id();
5113 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5114 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
5115 val |= MVPP2_TXQ_DRAIN_EN_MASK;
5116 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5118 /* The napi queue has been stopped so wait for all packets
5119 * to be transmitted.
5123 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5124 netdev_warn(port->dev,
5125 "port %d: cleaning queue %d timed out\n",
5126 port->id, txq->log_id);
5132 pending = mvpp2_percpu_read(port->priv, cpu,
5133 MVPP2_TXQ_PENDING_REG);
5134 pending &= MVPP2_TXQ_PENDING_MASK;
5137 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
5138 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
5140 for_each_present_cpu(cpu) {
5141 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5143 /* Release all packets */
5144 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5147 txq_pcpu->count = 0;
5148 txq_pcpu->txq_put_index = 0;
5149 txq_pcpu->txq_get_index = 0;
5153 /* Cleanup all Tx queues */
5154 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5156 struct mvpp2_tx_queue *txq;
5160 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5162 /* Reset Tx ports and delete Tx queues */
5163 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5164 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5166 for (queue = 0; queue < txq_number; queue++) {
5167 txq = port->txqs[queue];
5168 mvpp2_txq_clean(port, txq);
5169 mvpp2_txq_deinit(port, txq);
5172 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5174 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5175 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5178 /* Cleanup all Rx queues */
5179 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5183 for (queue = 0; queue < rxq_number; queue++)
5184 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5187 /* Init all Rx queues for port */
5188 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5192 for (queue = 0; queue < rxq_number; queue++) {
5193 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5200 mvpp2_cleanup_rxqs(port);
5204 /* Init all tx queues for port */
5205 static int mvpp2_setup_txqs(struct mvpp2_port *port)
5207 struct mvpp2_tx_queue *txq;
5210 for (queue = 0; queue < txq_number; queue++) {
5211 txq = port->txqs[queue];
5212 err = mvpp2_txq_init(port, txq);
5217 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5221 mvpp2_cleanup_txqs(port);
5225 /* The callback for per-port interrupt */
5226 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5228 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
5230 mvpp2_interrupts_disable(port);
5232 napi_schedule(&port->napi);
5238 static void mvpp2_link_event(struct net_device *dev)
5240 struct mvpp2_port *port = netdev_priv(dev);
5241 struct phy_device *phydev = dev->phydev;
5242 int status_change = 0;
5246 if ((port->speed != phydev->speed) ||
5247 (port->duplex != phydev->duplex)) {
5250 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5251 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5252 MVPP2_GMAC_CONFIG_GMII_SPEED |
5253 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5254 MVPP2_GMAC_AN_SPEED_EN |
5255 MVPP2_GMAC_AN_DUPLEX_EN);
5258 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5260 if (phydev->speed == SPEED_1000)
5261 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5262 else if (phydev->speed == SPEED_100)
5263 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5265 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5267 port->duplex = phydev->duplex;
5268 port->speed = phydev->speed;
5272 if (phydev->link != port->link) {
5273 if (!phydev->link) {
5278 port->link = phydev->link;
5282 if (status_change) {
5284 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5285 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5286 MVPP2_GMAC_FORCE_LINK_DOWN);
5287 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5288 mvpp2_egress_enable(port);
5289 mvpp2_ingress_enable(port);
5291 mvpp2_ingress_disable(port);
5292 mvpp2_egress_disable(port);
5294 phy_print_status(phydev);
5298 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5302 if (!port_pcpu->timer_scheduled) {
5303 port_pcpu->timer_scheduled = true;
5304 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
5305 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5306 HRTIMER_MODE_REL_PINNED);
5310 static void mvpp2_tx_proc_cb(unsigned long data)
5312 struct net_device *dev = (struct net_device *)data;
5313 struct mvpp2_port *port = netdev_priv(dev);
5314 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5315 unsigned int tx_todo, cause;
5317 if (!netif_running(dev))
5319 port_pcpu->timer_scheduled = false;
5321 /* Process all the Tx queues */
5322 cause = (1 << txq_number) - 1;
5323 tx_todo = mvpp2_tx_done(port, cause);
5325 /* Set the timer in case not all the packets were processed */
5327 mvpp2_timer_set(port_pcpu);
5330 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5332 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5333 struct mvpp2_port_pcpu,
5336 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5338 return HRTIMER_NORESTART;
5341 /* Main RX/TX processing routines */
5343 /* Display more error info */
5344 static void mvpp2_rx_error(struct mvpp2_port *port,
5345 struct mvpp2_rx_desc *rx_desc)
5347 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5348 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
5350 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5351 case MVPP2_RXD_ERR_CRC:
5352 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5355 case MVPP2_RXD_ERR_OVERRUN:
5356 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5359 case MVPP2_RXD_ERR_RESOURCE:
5360 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5366 /* Handle RX checksum offload */
5367 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5368 struct sk_buff *skb)
5370 if (((status & MVPP2_RXD_L3_IP4) &&
5371 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5372 (status & MVPP2_RXD_L3_IP6))
5373 if (((status & MVPP2_RXD_L4_UDP) ||
5374 (status & MVPP2_RXD_L4_TCP)) &&
5375 (status & MVPP2_RXD_L4_CSUM_OK)) {
5377 skb->ip_summed = CHECKSUM_UNNECESSARY;
5381 skb->ip_summed = CHECKSUM_NONE;
5384 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5385 static int mvpp2_rx_refill(struct mvpp2_port *port,
5386 struct mvpp2_bm_pool *bm_pool, u32 bm)
5388 dma_addr_t dma_addr;
5389 phys_addr_t phys_addr;
5392 /* No recycle or too many buffers are in use, so allocate a new skb */
5393 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5398 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5403 /* Handle tx checksum */
5404 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5406 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5410 if (skb->protocol == htons(ETH_P_IP)) {
5411 struct iphdr *ip4h = ip_hdr(skb);
5413 /* Calculate IPv4 checksum and L4 checksum */
5414 ip_hdr_len = ip4h->ihl;
5415 l4_proto = ip4h->protocol;
5416 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5417 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5419 /* Read l4_protocol from one of IPv6 extra headers */
5420 if (skb_network_header_len(skb) > 0)
5421 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5422 l4_proto = ip6h->nexthdr;
5424 return MVPP2_TXD_L4_CSUM_NOT;
5427 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5428 skb->protocol, ip_hdr_len, l4_proto);
5431 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5434 /* Main rx processing */
5435 static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5436 struct mvpp2_rx_queue *rxq)
5438 struct net_device *dev = port->dev;
5444 /* Get number of received packets and clamp the to-do */
5445 rx_received = mvpp2_rxq_received(port, rxq->id);
5446 if (rx_todo > rx_received)
5447 rx_todo = rx_received;
5449 while (rx_done < rx_todo) {
5450 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5451 struct mvpp2_bm_pool *bm_pool;
5452 struct sk_buff *skb;
5453 unsigned int frag_size;
5454 dma_addr_t dma_addr;
5455 phys_addr_t phys_addr;
5457 int pool, rx_bytes, err;
5461 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5462 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5463 rx_bytes -= MVPP2_MH_SIZE;
5464 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5465 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5466 data = (void *)phys_to_virt(phys_addr);
5468 bm = mvpp2_bm_cookie_build(port, rx_desc);
5469 pool = mvpp2_bm_cookie_pool_get(bm);
5470 bm_pool = &port->priv->bm_pools[pool];
5472 /* In case of an error, release the requested buffer pointer
5473 * to the Buffer Manager. This request process is controlled
5474 * by the hardware, and the information about the buffer is
5475 * comprised by the RX descriptor.
5477 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5479 dev->stats.rx_errors++;
5480 mvpp2_rx_error(port, rx_desc);
5481 /* Return the buffer to the pool */
5482 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
5486 if (bm_pool->frag_size > PAGE_SIZE)
5489 frag_size = bm_pool->frag_size;
5491 skb = build_skb(data, frag_size);
5493 netdev_warn(port->dev, "skb build failed\n");
5494 goto err_drop_frame;
5497 err = mvpp2_rx_refill(port, bm_pool, bm);
5499 netdev_err(port->dev, "failed to refill BM pools\n");
5500 goto err_drop_frame;
5503 dma_unmap_single(dev->dev.parent, dma_addr,
5504 bm_pool->buf_size, DMA_FROM_DEVICE);
5507 rcvd_bytes += rx_bytes;
5509 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
5510 skb_put(skb, rx_bytes);
5511 skb->protocol = eth_type_trans(skb, dev);
5512 mvpp2_rx_csum(port, rx_status, skb);
5514 napi_gro_receive(&port->napi, skb);
5518 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5520 u64_stats_update_begin(&stats->syncp);
5521 stats->rx_packets += rcvd_pkts;
5522 stats->rx_bytes += rcvd_bytes;
5523 u64_stats_update_end(&stats->syncp);
5526 /* Update Rx queue management counters */
5528 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5534 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5535 struct mvpp2_tx_desc *desc)
5537 dma_addr_t buf_dma_addr =
5538 mvpp2_txdesc_dma_addr_get(port, desc);
5540 mvpp2_txdesc_size_get(port, desc);
5541 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5542 buf_sz, DMA_TO_DEVICE);
5543 mvpp2_txq_desc_put(txq);
5546 /* Handle tx fragmentation processing */
5547 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5548 struct mvpp2_tx_queue *aggr_txq,
5549 struct mvpp2_tx_queue *txq)
5551 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5552 struct mvpp2_tx_desc *tx_desc;
5554 dma_addr_t buf_dma_addr;
5556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5558 void *addr = page_address(frag->page.p) + frag->page_offset;
5560 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5561 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5562 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
5564 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
5567 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
5568 mvpp2_txq_desc_put(txq);
5572 mvpp2_txdesc_offset_set(port, tx_desc,
5573 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5574 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5575 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5577 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5578 /* Last descriptor */
5579 mvpp2_txdesc_cmd_set(port, tx_desc,
5581 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5583 /* Descriptor in the middle: Not First, Not Last */
5584 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5585 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5592 /* Release all descriptors that were used to map fragments of
5593 * this packet, as well as the corresponding DMA mappings
5595 for (i = i - 1; i >= 0; i--) {
5596 tx_desc = txq->descs + i;
5597 tx_desc_unmap_put(port, txq, tx_desc);
5603 /* Main tx processing */
5604 static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5606 struct mvpp2_port *port = netdev_priv(dev);
5607 struct mvpp2_tx_queue *txq, *aggr_txq;
5608 struct mvpp2_txq_pcpu *txq_pcpu;
5609 struct mvpp2_tx_desc *tx_desc;
5610 dma_addr_t buf_dma_addr;
5615 txq_id = skb_get_queue_mapping(skb);
5616 txq = port->txqs[txq_id];
5617 txq_pcpu = this_cpu_ptr(txq->pcpu);
5618 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5620 frags = skb_shinfo(skb)->nr_frags + 1;
5622 /* Check number of available descriptors */
5623 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5624 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5630 /* Get a descriptor for the first part of the packet */
5631 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5632 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5633 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
5635 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
5636 skb_headlen(skb), DMA_TO_DEVICE);
5637 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
5638 mvpp2_txq_desc_put(txq);
5643 mvpp2_txdesc_offset_set(port, tx_desc,
5644 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5645 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5646 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
5648 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5651 /* First and Last descriptor */
5652 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5653 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5654 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
5656 /* First but not Last */
5657 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5658 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5659 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
5661 /* Continue with other skb fragments */
5662 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5663 tx_desc_unmap_put(port, txq, tx_desc);
5669 txq_pcpu->reserved_num -= frags;
5670 txq_pcpu->count += frags;
5671 aggr_txq->count += frags;
5673 /* Enable transmit */
5675 mvpp2_aggr_txq_pend_desc_add(port, frags);
5677 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5678 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5680 netif_tx_stop_queue(nq);
5684 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5686 u64_stats_update_begin(&stats->syncp);
5687 stats->tx_packets++;
5688 stats->tx_bytes += skb->len;
5689 u64_stats_update_end(&stats->syncp);
5691 dev->stats.tx_dropped++;
5692 dev_kfree_skb_any(skb);
5695 /* Finalize TX processing */
5696 if (txq_pcpu->count >= txq->done_pkts_coal)
5697 mvpp2_txq_done(port, txq, txq_pcpu);
5699 /* Set the timer in case not all frags were processed */
5700 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5701 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5703 mvpp2_timer_set(port_pcpu);
5706 return NETDEV_TX_OK;
5709 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5711 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5712 netdev_err(dev, "FCS error\n");
5713 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5714 netdev_err(dev, "rx fifo overrun error\n");
5715 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5716 netdev_err(dev, "tx fifo underrun error\n");
5719 static int mvpp2_poll(struct napi_struct *napi, int budget)
5721 u32 cause_rx_tx, cause_rx, cause_misc;
5723 struct mvpp2_port *port = netdev_priv(napi->dev);
5724 int cpu = smp_processor_id();
5726 /* Rx/Tx cause register
5728 * Bits 0-15: each bit indicates received packets on the Rx queue
5729 * (bit 0 is for Rx queue 0).
5731 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5732 * (bit 16 is for Tx queue 0).
5734 * Each CPU has its own Rx/Tx cause register
5736 cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
5737 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5738 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5739 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5742 mvpp2_cause_error(port->dev, cause_misc);
5744 /* Clear the cause register */
5745 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5746 mvpp2_percpu_write(port->priv, cpu,
5747 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5748 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5751 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5753 /* Process RX packets */
5754 cause_rx |= port->pending_cause_rx;
5755 while (cause_rx && budget > 0) {
5757 struct mvpp2_rx_queue *rxq;
5759 rxq = mvpp2_get_rx_queue(port, cause_rx);
5763 count = mvpp2_rx(port, budget, rxq);
5767 /* Clear the bit associated to this Rx queue
5768 * so that next iteration will continue from
5769 * the next Rx queue.
5771 cause_rx &= ~(1 << rxq->logic_rxq);
5777 napi_complete_done(napi, rx_done);
5779 mvpp2_interrupts_enable(port);
5781 port->pending_cause_rx = cause_rx;
5785 /* Set hw internals when starting port */
5786 static void mvpp2_start_dev(struct mvpp2_port *port)
5788 struct net_device *ndev = port->dev;
5790 mvpp2_gmac_max_rx_size_set(port);
5791 mvpp2_txp_max_tx_size_set(port);
5793 napi_enable(&port->napi);
5795 /* Enable interrupts on all CPUs */
5796 mvpp2_interrupts_enable(port);
5798 mvpp2_port_enable(port);
5799 phy_start(ndev->phydev);
5800 netif_tx_start_all_queues(port->dev);
5803 /* Set hw internals when stopping port */
5804 static void mvpp2_stop_dev(struct mvpp2_port *port)
5806 struct net_device *ndev = port->dev;
5808 /* Stop new packets from arriving to RXQs */
5809 mvpp2_ingress_disable(port);
5813 /* Disable interrupts on all CPUs */
5814 mvpp2_interrupts_disable(port);
5816 napi_disable(&port->napi);
5818 netif_carrier_off(port->dev);
5819 netif_tx_stop_all_queues(port->dev);
5821 mvpp2_egress_disable(port);
5822 mvpp2_port_disable(port);
5823 phy_stop(ndev->phydev);
5826 static int mvpp2_check_ringparam_valid(struct net_device *dev,
5827 struct ethtool_ringparam *ring)
5829 u16 new_rx_pending = ring->rx_pending;
5830 u16 new_tx_pending = ring->tx_pending;
5832 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5835 if (ring->rx_pending > MVPP2_MAX_RXD)
5836 new_rx_pending = MVPP2_MAX_RXD;
5837 else if (!IS_ALIGNED(ring->rx_pending, 16))
5838 new_rx_pending = ALIGN(ring->rx_pending, 16);
5840 if (ring->tx_pending > MVPP2_MAX_TXD)
5841 new_tx_pending = MVPP2_MAX_TXD;
5842 else if (!IS_ALIGNED(ring->tx_pending, 32))
5843 new_tx_pending = ALIGN(ring->tx_pending, 32);
5845 if (ring->rx_pending != new_rx_pending) {
5846 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5847 ring->rx_pending, new_rx_pending);
5848 ring->rx_pending = new_rx_pending;
5851 if (ring->tx_pending != new_tx_pending) {
5852 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5853 ring->tx_pending, new_tx_pending);
5854 ring->tx_pending = new_tx_pending;
5860 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5862 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5864 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5865 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5866 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5867 addr[0] = (mac_addr_h >> 24) & 0xFF;
5868 addr[1] = (mac_addr_h >> 16) & 0xFF;
5869 addr[2] = (mac_addr_h >> 8) & 0xFF;
5870 addr[3] = mac_addr_h & 0xFF;
5871 addr[4] = mac_addr_m & 0xFF;
5872 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5875 static int mvpp2_phy_connect(struct mvpp2_port *port)
5877 struct phy_device *phy_dev;
5879 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5880 port->phy_interface);
5882 netdev_err(port->dev, "cannot connect to phy\n");
5885 phy_dev->supported &= PHY_GBIT_FEATURES;
5886 phy_dev->advertising = phy_dev->supported;
5895 static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5897 struct net_device *ndev = port->dev;
5899 phy_disconnect(ndev->phydev);
5902 static int mvpp2_open(struct net_device *dev)
5904 struct mvpp2_port *port = netdev_priv(dev);
5905 unsigned char mac_bcast[ETH_ALEN] = {
5906 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5909 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5911 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5914 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5915 dev->dev_addr, true);
5917 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5920 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5922 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5925 err = mvpp2_prs_def_flow(port);
5927 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5931 /* Allocate the Rx/Tx queues */
5932 err = mvpp2_setup_rxqs(port);
5934 netdev_err(port->dev, "cannot allocate Rx queues\n");
5938 err = mvpp2_setup_txqs(port);
5940 netdev_err(port->dev, "cannot allocate Tx queues\n");
5941 goto err_cleanup_rxqs;
5944 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5946 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5947 goto err_cleanup_txqs;
5950 /* In default link is down */
5951 netif_carrier_off(port->dev);
5953 err = mvpp2_phy_connect(port);
5957 /* Unmask interrupts on all CPUs */
5958 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5960 mvpp2_start_dev(port);
5965 free_irq(port->irq, port);
5967 mvpp2_cleanup_txqs(port);
5969 mvpp2_cleanup_rxqs(port);
5973 static int mvpp2_stop(struct net_device *dev)
5975 struct mvpp2_port *port = netdev_priv(dev);
5976 struct mvpp2_port_pcpu *port_pcpu;
5979 mvpp2_stop_dev(port);
5980 mvpp2_phy_disconnect(port);
5982 /* Mask interrupts on all CPUs */
5983 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5985 free_irq(port->irq, port);
5986 for_each_present_cpu(cpu) {
5987 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5989 hrtimer_cancel(&port_pcpu->tx_done_timer);
5990 port_pcpu->timer_scheduled = false;
5991 tasklet_kill(&port_pcpu->tx_done_tasklet);
5993 mvpp2_cleanup_rxqs(port);
5994 mvpp2_cleanup_txqs(port);
5999 static void mvpp2_set_rx_mode(struct net_device *dev)
6001 struct mvpp2_port *port = netdev_priv(dev);
6002 struct mvpp2 *priv = port->priv;
6003 struct netdev_hw_addr *ha;
6005 bool allmulti = dev->flags & IFF_ALLMULTI;
6007 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6008 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6009 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6011 /* Remove all port->id's mcast enries */
6012 mvpp2_prs_mcast_del_all(priv, id);
6014 if (allmulti && !netdev_mc_empty(dev)) {
6015 netdev_for_each_mc_addr(ha, dev)
6016 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6020 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6022 struct mvpp2_port *port = netdev_priv(dev);
6023 const struct sockaddr *addr = p;
6026 if (!is_valid_ether_addr(addr->sa_data)) {
6027 err = -EADDRNOTAVAIL;
6031 if (!netif_running(dev)) {
6032 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6035 /* Reconfigure parser to accept the original MAC address */
6036 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6041 mvpp2_stop_dev(port);
6043 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6047 /* Reconfigure parser accept the original MAC address */
6048 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6052 mvpp2_start_dev(port);
6053 mvpp2_egress_enable(port);
6054 mvpp2_ingress_enable(port);
6058 netdev_err(dev, "fail to change MAC address\n");
6062 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6064 struct mvpp2_port *port = netdev_priv(dev);
6067 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6068 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6069 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6070 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
6073 if (!netif_running(dev)) {
6074 err = mvpp2_bm_update_mtu(dev, mtu);
6076 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6080 /* Reconfigure BM to the original MTU */
6081 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6086 mvpp2_stop_dev(port);
6088 err = mvpp2_bm_update_mtu(dev, mtu);
6090 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6094 /* Reconfigure BM to the original MTU */
6095 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6100 mvpp2_start_dev(port);
6101 mvpp2_egress_enable(port);
6102 mvpp2_ingress_enable(port);
6107 netdev_err(dev, "fail to change MTU\n");
6112 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6114 struct mvpp2_port *port = netdev_priv(dev);
6118 for_each_possible_cpu(cpu) {
6119 struct mvpp2_pcpu_stats *cpu_stats;
6125 cpu_stats = per_cpu_ptr(port->stats, cpu);
6127 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6128 rx_packets = cpu_stats->rx_packets;
6129 rx_bytes = cpu_stats->rx_bytes;
6130 tx_packets = cpu_stats->tx_packets;
6131 tx_bytes = cpu_stats->tx_bytes;
6132 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6134 stats->rx_packets += rx_packets;
6135 stats->rx_bytes += rx_bytes;
6136 stats->tx_packets += tx_packets;
6137 stats->tx_bytes += tx_bytes;
6140 stats->rx_errors = dev->stats.rx_errors;
6141 stats->rx_dropped = dev->stats.rx_dropped;
6142 stats->tx_dropped = dev->stats.tx_dropped;
6145 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6152 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
6154 mvpp2_link_event(dev);
6159 /* Ethtool methods */
6161 /* Set interrupt coalescing for ethtools */
6162 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6163 struct ethtool_coalesce *c)
6165 struct mvpp2_port *port = netdev_priv(dev);
6168 for (queue = 0; queue < rxq_number; queue++) {
6169 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6171 rxq->time_coal = c->rx_coalesce_usecs;
6172 rxq->pkts_coal = c->rx_max_coalesced_frames;
6173 mvpp2_rx_pkts_coal_set(port, rxq);
6174 mvpp2_rx_time_coal_set(port, rxq);
6177 for (queue = 0; queue < txq_number; queue++) {
6178 struct mvpp2_tx_queue *txq = port->txqs[queue];
6180 txq->done_pkts_coal = c->tx_max_coalesced_frames;
6186 /* get coalescing for ethtools */
6187 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6188 struct ethtool_coalesce *c)
6190 struct mvpp2_port *port = netdev_priv(dev);
6192 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6193 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6194 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6198 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6199 struct ethtool_drvinfo *drvinfo)
6201 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6202 sizeof(drvinfo->driver));
6203 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6204 sizeof(drvinfo->version));
6205 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6206 sizeof(drvinfo->bus_info));
6209 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6210 struct ethtool_ringparam *ring)
6212 struct mvpp2_port *port = netdev_priv(dev);
6214 ring->rx_max_pending = MVPP2_MAX_RXD;
6215 ring->tx_max_pending = MVPP2_MAX_TXD;
6216 ring->rx_pending = port->rx_ring_size;
6217 ring->tx_pending = port->tx_ring_size;
6220 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6221 struct ethtool_ringparam *ring)
6223 struct mvpp2_port *port = netdev_priv(dev);
6224 u16 prev_rx_ring_size = port->rx_ring_size;
6225 u16 prev_tx_ring_size = port->tx_ring_size;
6228 err = mvpp2_check_ringparam_valid(dev, ring);
6232 if (!netif_running(dev)) {
6233 port->rx_ring_size = ring->rx_pending;
6234 port->tx_ring_size = ring->tx_pending;
6238 /* The interface is running, so we have to force a
6239 * reallocation of the queues
6241 mvpp2_stop_dev(port);
6242 mvpp2_cleanup_rxqs(port);
6243 mvpp2_cleanup_txqs(port);
6245 port->rx_ring_size = ring->rx_pending;
6246 port->tx_ring_size = ring->tx_pending;
6248 err = mvpp2_setup_rxqs(port);
6250 /* Reallocate Rx queues with the original ring size */
6251 port->rx_ring_size = prev_rx_ring_size;
6252 ring->rx_pending = prev_rx_ring_size;
6253 err = mvpp2_setup_rxqs(port);
6257 err = mvpp2_setup_txqs(port);
6259 /* Reallocate Tx queues with the original ring size */
6260 port->tx_ring_size = prev_tx_ring_size;
6261 ring->tx_pending = prev_tx_ring_size;
6262 err = mvpp2_setup_txqs(port);
6264 goto err_clean_rxqs;
6267 mvpp2_start_dev(port);
6268 mvpp2_egress_enable(port);
6269 mvpp2_ingress_enable(port);
6274 mvpp2_cleanup_rxqs(port);
6276 netdev_err(dev, "fail to change ring parameters");
6282 static const struct net_device_ops mvpp2_netdev_ops = {
6283 .ndo_open = mvpp2_open,
6284 .ndo_stop = mvpp2_stop,
6285 .ndo_start_xmit = mvpp2_tx,
6286 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6287 .ndo_set_mac_address = mvpp2_set_mac_address,
6288 .ndo_change_mtu = mvpp2_change_mtu,
6289 .ndo_get_stats64 = mvpp2_get_stats64,
6290 .ndo_do_ioctl = mvpp2_ioctl,
6293 static const struct ethtool_ops mvpp2_eth_tool_ops = {
6294 .nway_reset = phy_ethtool_nway_reset,
6295 .get_link = ethtool_op_get_link,
6296 .set_coalesce = mvpp2_ethtool_set_coalesce,
6297 .get_coalesce = mvpp2_ethtool_get_coalesce,
6298 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6299 .get_ringparam = mvpp2_ethtool_get_ringparam,
6300 .set_ringparam = mvpp2_ethtool_set_ringparam,
6301 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6302 .set_link_ksettings = phy_ethtool_set_link_ksettings,
6305 /* Initialize port HW */
6306 static int mvpp2_port_init(struct mvpp2_port *port)
6308 struct device *dev = port->dev->dev.parent;
6309 struct mvpp2 *priv = port->priv;
6310 struct mvpp2_txq_pcpu *txq_pcpu;
6311 int queue, cpu, err;
6313 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6317 mvpp2_egress_disable(port);
6318 mvpp2_port_disable(port);
6320 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6325 /* Associate physical Tx queues to this port and initialize.
6326 * The mapping is predefined.
6328 for (queue = 0; queue < txq_number; queue++) {
6329 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6330 struct mvpp2_tx_queue *txq;
6332 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
6335 goto err_free_percpu;
6338 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6341 goto err_free_percpu;
6344 txq->id = queue_phy_id;
6345 txq->log_id = queue;
6346 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6347 for_each_present_cpu(cpu) {
6348 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6349 txq_pcpu->cpu = cpu;
6352 port->txqs[queue] = txq;
6355 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6359 goto err_free_percpu;
6362 /* Allocate and initialize Rx queue for this port */
6363 for (queue = 0; queue < rxq_number; queue++) {
6364 struct mvpp2_rx_queue *rxq;
6366 /* Map physical Rx queue to port's logical Rx queue */
6367 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
6370 goto err_free_percpu;
6372 /* Map this Rx queue to a physical queue */
6373 rxq->id = port->first_rxq + queue;
6374 rxq->port = port->id;
6375 rxq->logic_rxq = queue;
6377 port->rxqs[queue] = rxq;
6380 /* Configure Rx queue group interrupt for this port */
6381 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6383 /* Create Rx descriptor rings */
6384 for (queue = 0; queue < rxq_number; queue++) {
6385 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6387 rxq->size = port->rx_ring_size;
6388 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6389 rxq->time_coal = MVPP2_RX_COAL_USEC;
6392 mvpp2_ingress_disable(port);
6394 /* Port default configuration */
6395 mvpp2_defaults_set(port);
6397 /* Port's classifier configuration */
6398 mvpp2_cls_oversize_rxq_set(port);
6399 mvpp2_cls_port_config(port);
6401 /* Provide an initial Rx packet size */
6402 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6404 /* Initialize pools for swf */
6405 err = mvpp2_swf_bm_pool_init(port);
6407 goto err_free_percpu;
6412 for (queue = 0; queue < txq_number; queue++) {
6413 if (!port->txqs[queue])
6415 free_percpu(port->txqs[queue]->pcpu);
6420 /* Ports initialization */
6421 static int mvpp2_port_probe(struct platform_device *pdev,
6422 struct device_node *port_node,
6424 int *next_first_rxq)
6426 struct device_node *phy_node;
6427 struct mvpp2_port *port;
6428 struct mvpp2_port_pcpu *port_pcpu;
6429 struct net_device *dev;
6430 struct resource *res;
6431 const char *dt_mac_addr;
6432 const char *mac_from;
6433 char hw_mac_addr[ETH_ALEN];
6439 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6444 phy_node = of_parse_phandle(port_node, "phy", 0);
6446 dev_err(&pdev->dev, "missing phy\n");
6448 goto err_free_netdev;
6451 phy_mode = of_get_phy_mode(port_node);
6453 dev_err(&pdev->dev, "incorrect phy mode\n");
6455 goto err_free_netdev;
6458 if (of_property_read_u32(port_node, "port-id", &id)) {
6460 dev_err(&pdev->dev, "missing port-id value\n");
6461 goto err_free_netdev;
6464 dev->tx_queue_len = MVPP2_MAX_TXD;
6465 dev->watchdog_timeo = 5 * HZ;
6466 dev->netdev_ops = &mvpp2_netdev_ops;
6467 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6469 port = netdev_priv(dev);
6471 port->irq = irq_of_parse_and_map(port_node, 0);
6472 if (port->irq <= 0) {
6474 goto err_free_netdev;
6477 if (of_property_read_bool(port_node, "marvell,loopback"))
6478 port->flags |= MVPP2_F_LOOPBACK;
6482 port->first_rxq = *next_first_rxq;
6483 port->phy_node = phy_node;
6484 port->phy_interface = phy_mode;
6486 if (priv->hw_version == MVPP21) {
6487 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6488 port->base = devm_ioremap_resource(&pdev->dev, res);
6489 if (IS_ERR(port->base)) {
6490 err = PTR_ERR(port->base);
6494 if (of_property_read_u32(port_node, "gop-port-id",
6497 dev_err(&pdev->dev, "missing gop-port-id value\n");
6501 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6504 /* Alloc per-cpu stats */
6505 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6511 dt_mac_addr = of_get_mac_address(port_node);
6512 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6513 mac_from = "device tree";
6514 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6516 if (priv->hw_version == MVPP21)
6517 mvpp21_get_mac_address(port, hw_mac_addr);
6518 if (is_valid_ether_addr(hw_mac_addr)) {
6519 mac_from = "hardware";
6520 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6522 mac_from = "random";
6523 eth_hw_addr_random(dev);
6527 port->tx_ring_size = MVPP2_MAX_TXD;
6528 port->rx_ring_size = MVPP2_MAX_RXD;
6530 SET_NETDEV_DEV(dev, &pdev->dev);
6532 err = mvpp2_port_init(port);
6534 dev_err(&pdev->dev, "failed to init port %d\n", id);
6535 goto err_free_stats;
6538 mvpp2_port_mii_set(port);
6539 mvpp2_port_periodic_xon_disable(port);
6541 if (priv->hw_version == MVPP21)
6542 mvpp2_port_fc_adv_enable(port);
6544 mvpp2_port_reset(port);
6546 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6549 goto err_free_txq_pcpu;
6552 for_each_present_cpu(cpu) {
6553 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6555 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6556 HRTIMER_MODE_REL_PINNED);
6557 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6558 port_pcpu->timer_scheduled = false;
6560 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6561 (unsigned long)dev);
6564 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6565 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6566 dev->features = features | NETIF_F_RXCSUM;
6567 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6568 dev->vlan_features |= features;
6570 /* MTU range: 68 - 9676 */
6571 dev->min_mtu = ETH_MIN_MTU;
6572 /* 9676 == 9700 - 20 and rounding to 8 */
6573 dev->max_mtu = 9676;
6575 err = register_netdev(dev);
6577 dev_err(&pdev->dev, "failed to register netdev\n");
6578 goto err_free_port_pcpu;
6580 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6582 /* Increment the first Rx queue number to be used by the next port */
6583 *next_first_rxq += rxq_number;
6584 priv->port_list[id] = port;
6588 free_percpu(port->pcpu);
6590 for (i = 0; i < txq_number; i++)
6591 free_percpu(port->txqs[i]->pcpu);
6593 free_percpu(port->stats);
6595 irq_dispose_mapping(port->irq);
6597 of_node_put(phy_node);
6602 /* Ports removal routine */
6603 static void mvpp2_port_remove(struct mvpp2_port *port)
6607 unregister_netdev(port->dev);
6608 of_node_put(port->phy_node);
6609 free_percpu(port->pcpu);
6610 free_percpu(port->stats);
6611 for (i = 0; i < txq_number; i++)
6612 free_percpu(port->txqs[i]->pcpu);
6613 irq_dispose_mapping(port->irq);
6614 free_netdev(port->dev);
6617 /* Initialize decoding windows */
6618 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6624 for (i = 0; i < 6; i++) {
6625 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6626 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6629 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6634 for (i = 0; i < dram->num_cs; i++) {
6635 const struct mbus_dram_window *cs = dram->cs + i;
6637 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6638 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6639 dram->mbus_dram_target_id);
6641 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6642 (cs->size - 1) & 0xffff0000);
6644 win_enable |= (1 << i);
6647 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6650 /* Initialize Rx FIFO's */
6651 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6655 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6656 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6657 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6658 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6659 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6662 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6663 MVPP2_RX_FIFO_PORT_MIN_PKT);
6664 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6667 /* Initialize network controller common part HW */
6668 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6670 const struct mbus_dram_target_info *dram_target_info;
6674 /* Checks for hardware constraints */
6675 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6676 (txq_number > MVPP2_MAX_TXQ)) {
6677 dev_err(&pdev->dev, "invalid queue size parameter\n");
6681 /* MBUS windows configuration */
6682 dram_target_info = mv_mbus_dram_info();
6683 if (dram_target_info)
6684 mvpp2_conf_mbus_windows(dram_target_info, priv);
6686 /* Disable HW PHY polling */
6687 if (priv->hw_version == MVPP21) {
6688 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6689 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6690 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6692 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6693 val &= ~MVPP22_SMI_POLLING_EN;
6694 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6697 /* Allocate and initialize aggregated TXQs */
6698 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6699 sizeof(struct mvpp2_tx_queue),
6701 if (!priv->aggr_txqs)
6704 for_each_present_cpu(i) {
6705 priv->aggr_txqs[i].id = i;
6706 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6707 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6708 MVPP2_AGGR_TXQ_SIZE, i, priv);
6714 mvpp2_rx_fifo_init(priv);
6716 /* Reset Rx queue group interrupt configuration */
6717 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6718 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6720 if (priv->hw_version == MVPP21)
6721 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6722 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6724 /* Allow cache snoop when transmiting packets */
6725 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6727 /* Buffer Manager initialization */
6728 err = mvpp2_bm_init(pdev, priv);
6732 /* Parser default initialization */
6733 err = mvpp2_prs_default_init(pdev, priv);
6737 /* Classifier default initialization */
6738 mvpp2_cls_init(priv);
6743 static int mvpp2_probe(struct platform_device *pdev)
6745 struct device_node *dn = pdev->dev.of_node;
6746 struct device_node *port_node;
6748 struct resource *res;
6750 int port_count, first_rxq, cpu;
6753 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6758 (unsigned long)of_device_get_match_data(&pdev->dev);
6760 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6761 base = devm_ioremap_resource(&pdev->dev, res);
6763 return PTR_ERR(base);
6765 if (priv->hw_version == MVPP21) {
6766 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6767 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6768 if (IS_ERR(priv->lms_base))
6769 return PTR_ERR(priv->lms_base);
6771 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6772 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6773 if (IS_ERR(priv->iface_base))
6774 return PTR_ERR(priv->iface_base);
6777 for_each_present_cpu(cpu) {
6780 addr_space_sz = (priv->hw_version == MVPP21 ?
6781 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6782 priv->cpu_base[cpu] = base + cpu * addr_space_sz;
6785 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6786 if (IS_ERR(priv->pp_clk))
6787 return PTR_ERR(priv->pp_clk);
6788 err = clk_prepare_enable(priv->pp_clk);
6792 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6793 if (IS_ERR(priv->gop_clk)) {
6794 err = PTR_ERR(priv->gop_clk);
6797 err = clk_prepare_enable(priv->gop_clk);
6801 /* Get system's tclk rate */
6802 priv->tclk = clk_get_rate(priv->pp_clk);
6804 /* Initialize network controller */
6805 err = mvpp2_init(pdev, priv);
6807 dev_err(&pdev->dev, "failed to initialize controller\n");
6811 port_count = of_get_available_child_count(dn);
6812 if (port_count == 0) {
6813 dev_err(&pdev->dev, "no ports enabled\n");
6818 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6819 sizeof(struct mvpp2_port *),
6821 if (!priv->port_list) {
6826 /* Initialize ports */
6828 for_each_available_child_of_node(dn, port_node) {
6829 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6834 platform_set_drvdata(pdev, priv);
6838 clk_disable_unprepare(priv->gop_clk);
6840 clk_disable_unprepare(priv->pp_clk);
6844 static int mvpp2_remove(struct platform_device *pdev)
6846 struct mvpp2 *priv = platform_get_drvdata(pdev);
6847 struct device_node *dn = pdev->dev.of_node;
6848 struct device_node *port_node;
6851 for_each_available_child_of_node(dn, port_node) {
6852 if (priv->port_list[i])
6853 mvpp2_port_remove(priv->port_list[i]);
6857 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6858 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6860 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6863 for_each_present_cpu(i) {
6864 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6866 dma_free_coherent(&pdev->dev,
6867 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6869 aggr_txq->descs_dma);
6872 clk_disable_unprepare(priv->pp_clk);
6873 clk_disable_unprepare(priv->gop_clk);
6878 static const struct of_device_id mvpp2_match[] = {
6880 .compatible = "marvell,armada-375-pp2",
6881 .data = (void *)MVPP21,
6885 MODULE_DEVICE_TABLE(of, mvpp2_match);
6887 static struct platform_driver mvpp2_driver = {
6888 .probe = mvpp2_probe,
6889 .remove = mvpp2_remove,
6891 .name = MVPP2_DRIVER_NAME,
6892 .of_match_table = mvpp2_match,
6896 module_platform_driver(mvpp2_driver);
6898 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6899 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6900 MODULE_LICENSE("GPL v2");