]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/genet/bcmgenet.c
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / genet / bcmgenet.c
1 /*
2  * Broadcom GENET (Gigabit Ethernet) controller driver
3  *
4  * Copyright (c) 2014 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #define pr_fmt(fmt)                             "bcmgenet: " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm.h>
27 #include <linux/clk.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
33 #include <net/arp.h>
34
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/in.h>
42 #include <linux/ip.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
46
47 #include <asm/unaligned.h>
48
49 #include "bcmgenet.h"
50
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT        4
53
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY       0
56
57 #define GENET_Q16_RX_BD_CNT     \
58         (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
59 #define GENET_Q16_TX_BD_CNT     \
60         (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
61
62 #define RX_BUF_LENGTH           2048
63 #define SKB_ALIGNMENT           32
64
65 /* Tx/Rx DMA register offset, skip 256 descriptors */
66 #define WORDS_PER_BD(p)         (p->hw_params->words_per_bd)
67 #define DMA_DESC_SIZE           (WORDS_PER_BD(priv) * sizeof(u32))
68
69 #define GENET_TDMA_REG_OFF      (priv->hw_params->tdma_offset + \
70                                 TOTAL_DESC * DMA_DESC_SIZE)
71
72 #define GENET_RDMA_REG_OFF      (priv->hw_params->rdma_offset + \
73                                 TOTAL_DESC * DMA_DESC_SIZE)
74
75 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
76                                              void __iomem *d, u32 value)
77 {
78         __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
79 }
80
81 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
82                                             void __iomem *d)
83 {
84         return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
85 }
86
87 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
88                                     void __iomem *d,
89                                     dma_addr_t addr)
90 {
91         __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
92
93         /* Register writes to GISB bus can take couple hundred nanoseconds
94          * and are done for each packet, save these expensive writes unless
95          * the platform is explicitly configured for 64-bits/LPAE.
96          */
97 #ifdef CONFIG_PHYS_ADDR_T_64BIT
98         if (priv->hw_params->flags & GENET_HAS_40BITS)
99                 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
100 #endif
101 }
102
103 /* Combined address + length/status setter */
104 static inline void dmadesc_set(struct bcmgenet_priv *priv,
105                                void __iomem *d, dma_addr_t addr, u32 val)
106 {
107         dmadesc_set_length_status(priv, d, val);
108         dmadesc_set_addr(priv, d, addr);
109 }
110
111 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
112                                           void __iomem *d)
113 {
114         dma_addr_t addr;
115
116         addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
117
118         /* Register writes to GISB bus can take couple hundred nanoseconds
119          * and are done for each packet, save these expensive writes unless
120          * the platform is explicitly configured for 64-bits/LPAE.
121          */
122 #ifdef CONFIG_PHYS_ADDR_T_64BIT
123         if (priv->hw_params->flags & GENET_HAS_40BITS)
124                 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
125 #endif
126         return addr;
127 }
128
129 #define GENET_VER_FMT   "%1d.%1d EPHY: 0x%04x"
130
131 #define GENET_MSG_DEFAULT       (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
132                                 NETIF_MSG_LINK)
133
134 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
135 {
136         if (GENET_IS_V1(priv))
137                 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
138         else
139                 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
140 }
141
142 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
143 {
144         if (GENET_IS_V1(priv))
145                 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
146         else
147                 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
148 }
149
150 /* These macros are defined to deal with register map change
151  * between GENET1.1 and GENET2. Only those currently being used
152  * by driver are defined.
153  */
154 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
155 {
156         if (GENET_IS_V1(priv))
157                 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
158         else
159                 return __raw_readl(priv->base +
160                                 priv->hw_params->tbuf_offset + TBUF_CTRL);
161 }
162
163 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
164 {
165         if (GENET_IS_V1(priv))
166                 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
167         else
168                 __raw_writel(val, priv->base +
169                                 priv->hw_params->tbuf_offset + TBUF_CTRL);
170 }
171
172 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
173 {
174         if (GENET_IS_V1(priv))
175                 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
176         else
177                 return __raw_readl(priv->base +
178                                 priv->hw_params->tbuf_offset + TBUF_BP_MC);
179 }
180
181 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
182 {
183         if (GENET_IS_V1(priv))
184                 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
185         else
186                 __raw_writel(val, priv->base +
187                                 priv->hw_params->tbuf_offset + TBUF_BP_MC);
188 }
189
190 /* RX/TX DMA register accessors */
191 enum dma_reg {
192         DMA_RING_CFG = 0,
193         DMA_CTRL,
194         DMA_STATUS,
195         DMA_SCB_BURST_SIZE,
196         DMA_ARB_CTRL,
197         DMA_PRIORITY_0,
198         DMA_PRIORITY_1,
199         DMA_PRIORITY_2,
200         DMA_INDEX2RING_0,
201         DMA_INDEX2RING_1,
202         DMA_INDEX2RING_2,
203         DMA_INDEX2RING_3,
204         DMA_INDEX2RING_4,
205         DMA_INDEX2RING_5,
206         DMA_INDEX2RING_6,
207         DMA_INDEX2RING_7,
208 };
209
210 static const u8 bcmgenet_dma_regs_v3plus[] = {
211         [DMA_RING_CFG]          = 0x00,
212         [DMA_CTRL]              = 0x04,
213         [DMA_STATUS]            = 0x08,
214         [DMA_SCB_BURST_SIZE]    = 0x0C,
215         [DMA_ARB_CTRL]          = 0x2C,
216         [DMA_PRIORITY_0]        = 0x30,
217         [DMA_PRIORITY_1]        = 0x34,
218         [DMA_PRIORITY_2]        = 0x38,
219         [DMA_INDEX2RING_0]      = 0x70,
220         [DMA_INDEX2RING_1]      = 0x74,
221         [DMA_INDEX2RING_2]      = 0x78,
222         [DMA_INDEX2RING_3]      = 0x7C,
223         [DMA_INDEX2RING_4]      = 0x80,
224         [DMA_INDEX2RING_5]      = 0x84,
225         [DMA_INDEX2RING_6]      = 0x88,
226         [DMA_INDEX2RING_7]      = 0x8C,
227 };
228
229 static const u8 bcmgenet_dma_regs_v2[] = {
230         [DMA_RING_CFG]          = 0x00,
231         [DMA_CTRL]              = 0x04,
232         [DMA_STATUS]            = 0x08,
233         [DMA_SCB_BURST_SIZE]    = 0x0C,
234         [DMA_ARB_CTRL]          = 0x30,
235         [DMA_PRIORITY_0]        = 0x34,
236         [DMA_PRIORITY_1]        = 0x38,
237         [DMA_PRIORITY_2]        = 0x3C,
238 };
239
240 static const u8 bcmgenet_dma_regs_v1[] = {
241         [DMA_CTRL]              = 0x00,
242         [DMA_STATUS]            = 0x04,
243         [DMA_SCB_BURST_SIZE]    = 0x0C,
244         [DMA_ARB_CTRL]          = 0x30,
245         [DMA_PRIORITY_0]        = 0x34,
246         [DMA_PRIORITY_1]        = 0x38,
247         [DMA_PRIORITY_2]        = 0x3C,
248 };
249
250 /* Set at runtime once bcmgenet version is known */
251 static const u8 *bcmgenet_dma_regs;
252
253 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
254 {
255         return netdev_priv(dev_get_drvdata(dev));
256 }
257
258 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
259                                       enum dma_reg r)
260 {
261         return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
262                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
263 }
264
265 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
266                                         u32 val, enum dma_reg r)
267 {
268         __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
269                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
270 }
271
272 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
273                                       enum dma_reg r)
274 {
275         return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
276                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
277 }
278
279 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
280                                         u32 val, enum dma_reg r)
281 {
282         __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
283                         DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
284 }
285
286 /* RDMA/TDMA ring registers and accessors
287  * we merge the common fields and just prefix with T/D the registers
288  * having different meaning depending on the direction
289  */
290 enum dma_ring_reg {
291         TDMA_READ_PTR = 0,
292         RDMA_WRITE_PTR = TDMA_READ_PTR,
293         TDMA_READ_PTR_HI,
294         RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
295         TDMA_CONS_INDEX,
296         RDMA_PROD_INDEX = TDMA_CONS_INDEX,
297         TDMA_PROD_INDEX,
298         RDMA_CONS_INDEX = TDMA_PROD_INDEX,
299         DMA_RING_BUF_SIZE,
300         DMA_START_ADDR,
301         DMA_START_ADDR_HI,
302         DMA_END_ADDR,
303         DMA_END_ADDR_HI,
304         DMA_MBUF_DONE_THRESH,
305         TDMA_FLOW_PERIOD,
306         RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
307         TDMA_WRITE_PTR,
308         RDMA_READ_PTR = TDMA_WRITE_PTR,
309         TDMA_WRITE_PTR_HI,
310         RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
311 };
312
313 /* GENET v4 supports 40-bits pointer addressing
314  * for obvious reasons the LO and HI word parts
315  * are contiguous, but this offsets the other
316  * registers.
317  */
318 static const u8 genet_dma_ring_regs_v4[] = {
319         [TDMA_READ_PTR]                 = 0x00,
320         [TDMA_READ_PTR_HI]              = 0x04,
321         [TDMA_CONS_INDEX]               = 0x08,
322         [TDMA_PROD_INDEX]               = 0x0C,
323         [DMA_RING_BUF_SIZE]             = 0x10,
324         [DMA_START_ADDR]                = 0x14,
325         [DMA_START_ADDR_HI]             = 0x18,
326         [DMA_END_ADDR]                  = 0x1C,
327         [DMA_END_ADDR_HI]               = 0x20,
328         [DMA_MBUF_DONE_THRESH]          = 0x24,
329         [TDMA_FLOW_PERIOD]              = 0x28,
330         [TDMA_WRITE_PTR]                = 0x2C,
331         [TDMA_WRITE_PTR_HI]             = 0x30,
332 };
333
334 static const u8 genet_dma_ring_regs_v123[] = {
335         [TDMA_READ_PTR]                 = 0x00,
336         [TDMA_CONS_INDEX]               = 0x04,
337         [TDMA_PROD_INDEX]               = 0x08,
338         [DMA_RING_BUF_SIZE]             = 0x0C,
339         [DMA_START_ADDR]                = 0x10,
340         [DMA_END_ADDR]                  = 0x14,
341         [DMA_MBUF_DONE_THRESH]          = 0x18,
342         [TDMA_FLOW_PERIOD]              = 0x1C,
343         [TDMA_WRITE_PTR]                = 0x20,
344 };
345
346 /* Set at runtime once GENET version is known */
347 static const u8 *genet_dma_ring_regs;
348
349 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
350                                            unsigned int ring,
351                                            enum dma_ring_reg r)
352 {
353         return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
354                         (DMA_RING_SIZE * ring) +
355                         genet_dma_ring_regs[r]);
356 }
357
358 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
359                                              unsigned int ring, u32 val,
360                                              enum dma_ring_reg r)
361 {
362         __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
363                         (DMA_RING_SIZE * ring) +
364                         genet_dma_ring_regs[r]);
365 }
366
367 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
368                                            unsigned int ring,
369                                            enum dma_ring_reg r)
370 {
371         return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
372                         (DMA_RING_SIZE * ring) +
373                         genet_dma_ring_regs[r]);
374 }
375
376 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
377                                              unsigned int ring, u32 val,
378                                              enum dma_ring_reg r)
379 {
380         __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
381                         (DMA_RING_SIZE * ring) +
382                         genet_dma_ring_regs[r]);
383 }
384
385 static int bcmgenet_get_settings(struct net_device *dev,
386                                  struct ethtool_cmd *cmd)
387 {
388         struct bcmgenet_priv *priv = netdev_priv(dev);
389
390         if (!netif_running(dev))
391                 return -EINVAL;
392
393         if (!priv->phydev)
394                 return -ENODEV;
395
396         return phy_ethtool_gset(priv->phydev, cmd);
397 }
398
399 static int bcmgenet_set_settings(struct net_device *dev,
400                                  struct ethtool_cmd *cmd)
401 {
402         struct bcmgenet_priv *priv = netdev_priv(dev);
403
404         if (!netif_running(dev))
405                 return -EINVAL;
406
407         if (!priv->phydev)
408                 return -ENODEV;
409
410         return phy_ethtool_sset(priv->phydev, cmd);
411 }
412
413 static int bcmgenet_set_rx_csum(struct net_device *dev,
414                                 netdev_features_t wanted)
415 {
416         struct bcmgenet_priv *priv = netdev_priv(dev);
417         u32 rbuf_chk_ctrl;
418         bool rx_csum_en;
419
420         rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
421
422         rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
423
424         /* enable rx checksumming */
425         if (rx_csum_en)
426                 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
427         else
428                 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
429         priv->desc_rxchk_en = rx_csum_en;
430
431         /* If UniMAC forwards CRC, we need to skip over it to get
432          * a valid CHK bit to be set in the per-packet status word
433         */
434         if (rx_csum_en && priv->crc_fwd_en)
435                 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
436         else
437                 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
438
439         bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
440
441         return 0;
442 }
443
444 static int bcmgenet_set_tx_csum(struct net_device *dev,
445                                 netdev_features_t wanted)
446 {
447         struct bcmgenet_priv *priv = netdev_priv(dev);
448         bool desc_64b_en;
449         u32 tbuf_ctrl, rbuf_ctrl;
450
451         tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
452         rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
453
454         desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
455
456         /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
457         if (desc_64b_en) {
458                 tbuf_ctrl |= RBUF_64B_EN;
459                 rbuf_ctrl |= RBUF_64B_EN;
460         } else {
461                 tbuf_ctrl &= ~RBUF_64B_EN;
462                 rbuf_ctrl &= ~RBUF_64B_EN;
463         }
464         priv->desc_64b_en = desc_64b_en;
465
466         bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
467         bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
468
469         return 0;
470 }
471
472 static int bcmgenet_set_features(struct net_device *dev,
473                                  netdev_features_t features)
474 {
475         netdev_features_t changed = features ^ dev->features;
476         netdev_features_t wanted = dev->wanted_features;
477         int ret = 0;
478
479         if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
480                 ret = bcmgenet_set_tx_csum(dev, wanted);
481         if (changed & (NETIF_F_RXCSUM))
482                 ret = bcmgenet_set_rx_csum(dev, wanted);
483
484         return ret;
485 }
486
487 static u32 bcmgenet_get_msglevel(struct net_device *dev)
488 {
489         struct bcmgenet_priv *priv = netdev_priv(dev);
490
491         return priv->msg_enable;
492 }
493
494 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
495 {
496         struct bcmgenet_priv *priv = netdev_priv(dev);
497
498         priv->msg_enable = level;
499 }
500
501 /* standard ethtool support functions. */
502 enum bcmgenet_stat_type {
503         BCMGENET_STAT_NETDEV = -1,
504         BCMGENET_STAT_MIB_RX,
505         BCMGENET_STAT_MIB_TX,
506         BCMGENET_STAT_RUNT,
507         BCMGENET_STAT_MISC,
508         BCMGENET_STAT_SOFT,
509 };
510
511 struct bcmgenet_stats {
512         char stat_string[ETH_GSTRING_LEN];
513         int stat_sizeof;
514         int stat_offset;
515         enum bcmgenet_stat_type type;
516         /* reg offset from UMAC base for misc counters */
517         u16 reg_offset;
518 };
519
520 #define STAT_NETDEV(m) { \
521         .stat_string = __stringify(m), \
522         .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
523         .stat_offset = offsetof(struct net_device_stats, m), \
524         .type = BCMGENET_STAT_NETDEV, \
525 }
526
527 #define STAT_GENET_MIB(str, m, _type) { \
528         .stat_string = str, \
529         .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
530         .stat_offset = offsetof(struct bcmgenet_priv, m), \
531         .type = _type, \
532 }
533
534 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
535 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
536 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
537 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
538
539 #define STAT_GENET_MISC(str, m, offset) { \
540         .stat_string = str, \
541         .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
542         .stat_offset = offsetof(struct bcmgenet_priv, m), \
543         .type = BCMGENET_STAT_MISC, \
544         .reg_offset = offset, \
545 }
546
547
548 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
549  * between the end of TX stats and the beginning of the RX RUNT
550  */
551 #define BCMGENET_STAT_OFFSET    0xc
552
553 /* Hardware counters must be kept in sync because the order/offset
554  * is important here (order in structure declaration = order in hardware)
555  */
556 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
557         /* general stats */
558         STAT_NETDEV(rx_packets),
559         STAT_NETDEV(tx_packets),
560         STAT_NETDEV(rx_bytes),
561         STAT_NETDEV(tx_bytes),
562         STAT_NETDEV(rx_errors),
563         STAT_NETDEV(tx_errors),
564         STAT_NETDEV(rx_dropped),
565         STAT_NETDEV(tx_dropped),
566         STAT_NETDEV(multicast),
567         /* UniMAC RSV counters */
568         STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
569         STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
570         STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
571         STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
572         STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
573         STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
574         STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
575         STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
576         STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
577         STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
578         STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
579         STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
580         STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
581         STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
582         STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
583         STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
584         STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
585         STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
586         STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
587         STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
588         STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
589         STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
590         STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
591         STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
592         STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
593         STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
594         STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
595         STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
596         STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
597         /* UniMAC TSV counters */
598         STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
599         STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
600         STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
601         STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
602         STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
603         STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
604         STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
605         STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
606         STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
607         STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
608         STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
609         STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
610         STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
611         STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
612         STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
613         STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
614         STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
615         STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
616         STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
617         STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
618         STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
619         STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
620         STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
621         STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
622         STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
623         STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
624         STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
625         STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
626         STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
627         /* UniMAC RUNT counters */
628         STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
629         STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
630         STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
631         STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
632         /* Misc UniMAC counters */
633         STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
634                         UMAC_RBUF_OVFL_CNT),
635         STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
636         STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
637         STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
638         STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
639         STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
640 };
641
642 #define BCMGENET_STATS_LEN      ARRAY_SIZE(bcmgenet_gstrings_stats)
643
644 static void bcmgenet_get_drvinfo(struct net_device *dev,
645                                  struct ethtool_drvinfo *info)
646 {
647         strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
648         strlcpy(info->version, "v2.0", sizeof(info->version));
649         info->n_stats = BCMGENET_STATS_LEN;
650 }
651
652 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
653 {
654         switch (string_set) {
655         case ETH_SS_STATS:
656                 return BCMGENET_STATS_LEN;
657         default:
658                 return -EOPNOTSUPP;
659         }
660 }
661
662 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
663                                  u8 *data)
664 {
665         int i;
666
667         switch (stringset) {
668         case ETH_SS_STATS:
669                 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
670                         memcpy(data + i * ETH_GSTRING_LEN,
671                                bcmgenet_gstrings_stats[i].stat_string,
672                                ETH_GSTRING_LEN);
673                 }
674                 break;
675         }
676 }
677
678 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
679 {
680         int i, j = 0;
681
682         for (i = 0; i < BCMGENET_STATS_LEN; i++) {
683                 const struct bcmgenet_stats *s;
684                 u8 offset = 0;
685                 u32 val = 0;
686                 char *p;
687
688                 s = &bcmgenet_gstrings_stats[i];
689                 switch (s->type) {
690                 case BCMGENET_STAT_NETDEV:
691                 case BCMGENET_STAT_SOFT:
692                         continue;
693                 case BCMGENET_STAT_MIB_RX:
694                 case BCMGENET_STAT_MIB_TX:
695                 case BCMGENET_STAT_RUNT:
696                         if (s->type != BCMGENET_STAT_MIB_RX)
697                                 offset = BCMGENET_STAT_OFFSET;
698                         val = bcmgenet_umac_readl(priv,
699                                                   UMAC_MIB_START + j + offset);
700                         break;
701                 case BCMGENET_STAT_MISC:
702                         val = bcmgenet_umac_readl(priv, s->reg_offset);
703                         /* clear if overflowed */
704                         if (val == ~0)
705                                 bcmgenet_umac_writel(priv, 0, s->reg_offset);
706                         break;
707                 }
708
709                 j += s->stat_sizeof;
710                 p = (char *)priv + s->stat_offset;
711                 *(u32 *)p = val;
712         }
713 }
714
715 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
716                                        struct ethtool_stats *stats,
717                                        u64 *data)
718 {
719         struct bcmgenet_priv *priv = netdev_priv(dev);
720         int i;
721
722         if (netif_running(dev))
723                 bcmgenet_update_mib_counters(priv);
724
725         for (i = 0; i < BCMGENET_STATS_LEN; i++) {
726                 const struct bcmgenet_stats *s;
727                 char *p;
728
729                 s = &bcmgenet_gstrings_stats[i];
730                 if (s->type == BCMGENET_STAT_NETDEV)
731                         p = (char *)&dev->stats;
732                 else
733                         p = (char *)priv;
734                 p += s->stat_offset;
735                 data[i] = *(u32 *)p;
736         }
737 }
738
739 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
740 {
741         struct bcmgenet_priv *priv = netdev_priv(dev);
742         u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
743         u32 reg;
744
745         if (enable && !priv->clk_eee_enabled) {
746                 clk_prepare_enable(priv->clk_eee);
747                 priv->clk_eee_enabled = true;
748         }
749
750         reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
751         if (enable)
752                 reg |= EEE_EN;
753         else
754                 reg &= ~EEE_EN;
755         bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
756
757         /* Enable EEE and switch to a 27Mhz clock automatically */
758         reg = __raw_readl(priv->base + off);
759         if (enable)
760                 reg |= TBUF_EEE_EN | TBUF_PM_EN;
761         else
762                 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
763         __raw_writel(reg, priv->base + off);
764
765         /* Do the same for thing for RBUF */
766         reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
767         if (enable)
768                 reg |= RBUF_EEE_EN | RBUF_PM_EN;
769         else
770                 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
771         bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
772
773         if (!enable && priv->clk_eee_enabled) {
774                 clk_disable_unprepare(priv->clk_eee);
775                 priv->clk_eee_enabled = false;
776         }
777
778         priv->eee.eee_enabled = enable;
779         priv->eee.eee_active = enable;
780 }
781
782 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
783 {
784         struct bcmgenet_priv *priv = netdev_priv(dev);
785         struct ethtool_eee *p = &priv->eee;
786
787         if (GENET_IS_V1(priv))
788                 return -EOPNOTSUPP;
789
790         e->eee_enabled = p->eee_enabled;
791         e->eee_active = p->eee_active;
792         e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
793
794         return phy_ethtool_get_eee(priv->phydev, e);
795 }
796
797 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
798 {
799         struct bcmgenet_priv *priv = netdev_priv(dev);
800         struct ethtool_eee *p = &priv->eee;
801         int ret = 0;
802
803         if (GENET_IS_V1(priv))
804                 return -EOPNOTSUPP;
805
806         p->eee_enabled = e->eee_enabled;
807
808         if (!p->eee_enabled) {
809                 bcmgenet_eee_enable_set(dev, false);
810         } else {
811                 ret = phy_init_eee(priv->phydev, 0);
812                 if (ret) {
813                         netif_err(priv, hw, dev, "EEE initialization failed\n");
814                         return ret;
815                 }
816
817                 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
818                 bcmgenet_eee_enable_set(dev, true);
819         }
820
821         return phy_ethtool_set_eee(priv->phydev, e);
822 }
823
824 static int bcmgenet_nway_reset(struct net_device *dev)
825 {
826         struct bcmgenet_priv *priv = netdev_priv(dev);
827
828         return genphy_restart_aneg(priv->phydev);
829 }
830
831 /* standard ethtool support functions. */
832 static struct ethtool_ops bcmgenet_ethtool_ops = {
833         .get_strings            = bcmgenet_get_strings,
834         .get_sset_count         = bcmgenet_get_sset_count,
835         .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
836         .get_settings           = bcmgenet_get_settings,
837         .set_settings           = bcmgenet_set_settings,
838         .get_drvinfo            = bcmgenet_get_drvinfo,
839         .get_link               = ethtool_op_get_link,
840         .get_msglevel           = bcmgenet_get_msglevel,
841         .set_msglevel           = bcmgenet_set_msglevel,
842         .get_wol                = bcmgenet_get_wol,
843         .set_wol                = bcmgenet_set_wol,
844         .get_eee                = bcmgenet_get_eee,
845         .set_eee                = bcmgenet_set_eee,
846         .nway_reset             = bcmgenet_nway_reset,
847 };
848
849 /* Power down the unimac, based on mode. */
850 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
851                                 enum bcmgenet_power_mode mode)
852 {
853         int ret = 0;
854         u32 reg;
855
856         switch (mode) {
857         case GENET_POWER_CABLE_SENSE:
858                 phy_detach(priv->phydev);
859                 break;
860
861         case GENET_POWER_WOL_MAGIC:
862                 ret = bcmgenet_wol_power_down_cfg(priv, mode);
863                 break;
864
865         case GENET_POWER_PASSIVE:
866                 /* Power down LED */
867                 if (priv->hw_params->flags & GENET_HAS_EXT) {
868                         reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
869                         reg |= (EXT_PWR_DOWN_PHY |
870                                 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
871                         bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
872
873                         bcmgenet_phy_power_set(priv->dev, false);
874                 }
875                 break;
876         default:
877                 break;
878         }
879
880         return 0;
881 }
882
883 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
884                               enum bcmgenet_power_mode mode)
885 {
886         u32 reg;
887
888         if (!(priv->hw_params->flags & GENET_HAS_EXT))
889                 return;
890
891         reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
892
893         switch (mode) {
894         case GENET_POWER_PASSIVE:
895                 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
896                                 EXT_PWR_DOWN_BIAS);
897                 /* fallthrough */
898         case GENET_POWER_CABLE_SENSE:
899                 /* enable APD */
900                 reg |= EXT_PWR_DN_EN_LD;
901                 break;
902         case GENET_POWER_WOL_MAGIC:
903                 bcmgenet_wol_power_up_cfg(priv, mode);
904                 return;
905         default:
906                 break;
907         }
908
909         bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
910         if (mode == GENET_POWER_PASSIVE)
911                 bcmgenet_phy_power_set(priv->dev, true);
912 }
913
914 /* ioctl handle special commands that are not present in ethtool. */
915 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
916 {
917         struct bcmgenet_priv *priv = netdev_priv(dev);
918         int val = 0;
919
920         if (!netif_running(dev))
921                 return -EINVAL;
922
923         switch (cmd) {
924         case SIOCGMIIPHY:
925         case SIOCGMIIREG:
926         case SIOCSMIIREG:
927                 if (!priv->phydev)
928                         val = -ENODEV;
929                 else
930                         val = phy_mii_ioctl(priv->phydev, rq, cmd);
931                 break;
932
933         default:
934                 val = -EINVAL;
935                 break;
936         }
937
938         return val;
939 }
940
941 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
942                                          struct bcmgenet_tx_ring *ring)
943 {
944         struct enet_cb *tx_cb_ptr;
945
946         tx_cb_ptr = ring->cbs;
947         tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
948
949         /* Advancing local write pointer */
950         if (ring->write_ptr == ring->end_ptr)
951                 ring->write_ptr = ring->cb_ptr;
952         else
953                 ring->write_ptr++;
954
955         return tx_cb_ptr;
956 }
957
958 /* Simple helper to free a control block's resources */
959 static void bcmgenet_free_cb(struct enet_cb *cb)
960 {
961         dev_kfree_skb_any(cb->skb);
962         cb->skb = NULL;
963         dma_unmap_addr_set(cb, dma_addr, 0);
964 }
965
966 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
967 {
968         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
969                                  INTRL2_CPU_MASK_SET);
970 }
971
972 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
973 {
974         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
975                                  INTRL2_CPU_MASK_CLEAR);
976 }
977
978 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
979 {
980         bcmgenet_intrl2_1_writel(ring->priv,
981                                  1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
982                                  INTRL2_CPU_MASK_SET);
983 }
984
985 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
986 {
987         bcmgenet_intrl2_1_writel(ring->priv,
988                                  1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
989                                  INTRL2_CPU_MASK_CLEAR);
990 }
991
992 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
993 {
994         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
995                                  INTRL2_CPU_MASK_SET);
996 }
997
998 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
999 {
1000         bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1001                                  INTRL2_CPU_MASK_CLEAR);
1002 }
1003
1004 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1005 {
1006         bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1007                                  INTRL2_CPU_MASK_CLEAR);
1008 }
1009
1010 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1011 {
1012         bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1013                                  INTRL2_CPU_MASK_SET);
1014 }
1015
1016 /* Unlocked version of the reclaim routine */
1017 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1018                                           struct bcmgenet_tx_ring *ring)
1019 {
1020         struct bcmgenet_priv *priv = netdev_priv(dev);
1021         struct enet_cb *tx_cb_ptr;
1022         struct netdev_queue *txq;
1023         unsigned int pkts_compl = 0;
1024         unsigned int c_index;
1025         unsigned int txbds_ready;
1026         unsigned int txbds_processed = 0;
1027
1028         /* Compute how many buffers are transmitted since last xmit call */
1029         c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
1030         c_index &= DMA_C_INDEX_MASK;
1031
1032         if (likely(c_index >= ring->c_index))
1033                 txbds_ready = c_index - ring->c_index;
1034         else
1035                 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
1036
1037         netif_dbg(priv, tx_done, dev,
1038                   "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1039                   __func__, ring->index, ring->c_index, c_index, txbds_ready);
1040
1041         /* Reclaim transmitted buffers */
1042         while (txbds_processed < txbds_ready) {
1043                 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1044                 if (tx_cb_ptr->skb) {
1045                         pkts_compl++;
1046                         dev->stats.tx_packets++;
1047                         dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1048                         dma_unmap_single(&dev->dev,
1049                                          dma_unmap_addr(tx_cb_ptr, dma_addr),
1050                                          tx_cb_ptr->skb->len,
1051                                          DMA_TO_DEVICE);
1052                         bcmgenet_free_cb(tx_cb_ptr);
1053                 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1054                         dev->stats.tx_bytes +=
1055                                 dma_unmap_len(tx_cb_ptr, dma_len);
1056                         dma_unmap_page(&dev->dev,
1057                                        dma_unmap_addr(tx_cb_ptr, dma_addr),
1058                                        dma_unmap_len(tx_cb_ptr, dma_len),
1059                                        DMA_TO_DEVICE);
1060                         dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1061                 }
1062
1063                 txbds_processed++;
1064                 if (likely(ring->clean_ptr < ring->end_ptr))
1065                         ring->clean_ptr++;
1066                 else
1067                         ring->clean_ptr = ring->cb_ptr;
1068         }
1069
1070         ring->free_bds += txbds_processed;
1071         ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1072
1073         if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1074                 txq = netdev_get_tx_queue(dev, ring->queue);
1075                 if (netif_tx_queue_stopped(txq))
1076                         netif_tx_wake_queue(txq);
1077         }
1078
1079         return pkts_compl;
1080 }
1081
1082 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1083                                 struct bcmgenet_tx_ring *ring)
1084 {
1085         unsigned int released;
1086         unsigned long flags;
1087
1088         spin_lock_irqsave(&ring->lock, flags);
1089         released = __bcmgenet_tx_reclaim(dev, ring);
1090         spin_unlock_irqrestore(&ring->lock, flags);
1091
1092         return released;
1093 }
1094
1095 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1096 {
1097         struct bcmgenet_tx_ring *ring =
1098                 container_of(napi, struct bcmgenet_tx_ring, napi);
1099         unsigned int work_done = 0;
1100
1101         work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1102
1103         if (work_done == 0) {
1104                 napi_complete(napi);
1105                 ring->int_enable(ring);
1106
1107                 return 0;
1108         }
1109
1110         return budget;
1111 }
1112
1113 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1114 {
1115         struct bcmgenet_priv *priv = netdev_priv(dev);
1116         int i;
1117
1118         if (netif_is_multiqueue(dev)) {
1119                 for (i = 0; i < priv->hw_params->tx_queues; i++)
1120                         bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1121         }
1122
1123         bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1124 }
1125
1126 /* Transmits a single SKB (either head of a fragment or a single SKB)
1127  * caller must hold priv->lock
1128  */
1129 static int bcmgenet_xmit_single(struct net_device *dev,
1130                                 struct sk_buff *skb,
1131                                 u16 dma_desc_flags,
1132                                 struct bcmgenet_tx_ring *ring)
1133 {
1134         struct bcmgenet_priv *priv = netdev_priv(dev);
1135         struct device *kdev = &priv->pdev->dev;
1136         struct enet_cb *tx_cb_ptr;
1137         unsigned int skb_len;
1138         dma_addr_t mapping;
1139         u32 length_status;
1140         int ret;
1141
1142         tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1143
1144         if (unlikely(!tx_cb_ptr))
1145                 BUG();
1146
1147         tx_cb_ptr->skb = skb;
1148
1149         skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1150
1151         mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1152         ret = dma_mapping_error(kdev, mapping);
1153         if (ret) {
1154                 priv->mib.tx_dma_failed++;
1155                 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1156                 dev_kfree_skb(skb);
1157                 return ret;
1158         }
1159
1160         dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1161         dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1162         length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1163                         (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1164                         DMA_TX_APPEND_CRC;
1165
1166         if (skb->ip_summed == CHECKSUM_PARTIAL)
1167                 length_status |= DMA_TX_DO_CSUM;
1168
1169         dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1170
1171         return 0;
1172 }
1173
1174 /* Transmit a SKB fragment */
1175 static int bcmgenet_xmit_frag(struct net_device *dev,
1176                               skb_frag_t *frag,
1177                               u16 dma_desc_flags,
1178                               struct bcmgenet_tx_ring *ring)
1179 {
1180         struct bcmgenet_priv *priv = netdev_priv(dev);
1181         struct device *kdev = &priv->pdev->dev;
1182         struct enet_cb *tx_cb_ptr;
1183         dma_addr_t mapping;
1184         int ret;
1185
1186         tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1187
1188         if (unlikely(!tx_cb_ptr))
1189                 BUG();
1190         tx_cb_ptr->skb = NULL;
1191
1192         mapping = skb_frag_dma_map(kdev, frag, 0,
1193                                    skb_frag_size(frag), DMA_TO_DEVICE);
1194         ret = dma_mapping_error(kdev, mapping);
1195         if (ret) {
1196                 priv->mib.tx_dma_failed++;
1197                 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1198                           __func__);
1199                 return ret;
1200         }
1201
1202         dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1203         dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1204
1205         dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1206                     (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1207                     (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1208
1209         return 0;
1210 }
1211
1212 /* Reallocate the SKB to put enough headroom in front of it and insert
1213  * the transmit checksum offsets in the descriptors
1214  */
1215 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1216                                             struct sk_buff *skb)
1217 {
1218         struct status_64 *status = NULL;
1219         struct sk_buff *new_skb;
1220         u16 offset;
1221         u8 ip_proto;
1222         u16 ip_ver;
1223         u32 tx_csum_info;
1224
1225         if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1226                 /* If 64 byte status block enabled, must make sure skb has
1227                  * enough headroom for us to insert 64B status block.
1228                  */
1229                 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1230                 dev_kfree_skb(skb);
1231                 if (!new_skb) {
1232                         dev->stats.tx_dropped++;
1233                         return NULL;
1234                 }
1235                 skb = new_skb;
1236         }
1237
1238         skb_push(skb, sizeof(*status));
1239         status = (struct status_64 *)skb->data;
1240
1241         if (skb->ip_summed  == CHECKSUM_PARTIAL) {
1242                 ip_ver = htons(skb->protocol);
1243                 switch (ip_ver) {
1244                 case ETH_P_IP:
1245                         ip_proto = ip_hdr(skb)->protocol;
1246                         break;
1247                 case ETH_P_IPV6:
1248                         ip_proto = ipv6_hdr(skb)->nexthdr;
1249                         break;
1250                 default:
1251                         return skb;
1252                 }
1253
1254                 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1255                 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1256                                 (offset + skb->csum_offset);
1257
1258                 /* Set the length valid bit for TCP and UDP and just set
1259                  * the special UDP flag for IPv4, else just set to 0.
1260                  */
1261                 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1262                         tx_csum_info |= STATUS_TX_CSUM_LV;
1263                         if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1264                                 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1265                 } else {
1266                         tx_csum_info = 0;
1267                 }
1268
1269                 status->tx_csum_info = tx_csum_info;
1270         }
1271
1272         return skb;
1273 }
1274
1275 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1276 {
1277         struct bcmgenet_priv *priv = netdev_priv(dev);
1278         struct bcmgenet_tx_ring *ring = NULL;
1279         struct netdev_queue *txq;
1280         unsigned long flags = 0;
1281         int nr_frags, index;
1282         u16 dma_desc_flags;
1283         int ret;
1284         int i;
1285
1286         index = skb_get_queue_mapping(skb);
1287         /* Mapping strategy:
1288          * queue_mapping = 0, unclassified, packet xmited through ring16
1289          * queue_mapping = 1, goes to ring 0. (highest priority queue
1290          * queue_mapping = 2, goes to ring 1.
1291          * queue_mapping = 3, goes to ring 2.
1292          * queue_mapping = 4, goes to ring 3.
1293          */
1294         if (index == 0)
1295                 index = DESC_INDEX;
1296         else
1297                 index -= 1;
1298
1299         nr_frags = skb_shinfo(skb)->nr_frags;
1300         ring = &priv->tx_rings[index];
1301         txq = netdev_get_tx_queue(dev, ring->queue);
1302
1303         spin_lock_irqsave(&ring->lock, flags);
1304         if (ring->free_bds <= nr_frags + 1) {
1305                 netif_tx_stop_queue(txq);
1306                 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1307                            __func__, index, ring->queue);
1308                 ret = NETDEV_TX_BUSY;
1309                 goto out;
1310         }
1311
1312         if (skb_padto(skb, ETH_ZLEN)) {
1313                 ret = NETDEV_TX_OK;
1314                 goto out;
1315         }
1316
1317         /* set the SKB transmit checksum */
1318         if (priv->desc_64b_en) {
1319                 skb = bcmgenet_put_tx_csum(dev, skb);
1320                 if (!skb) {
1321                         ret = NETDEV_TX_OK;
1322                         goto out;
1323                 }
1324         }
1325
1326         dma_desc_flags = DMA_SOP;
1327         if (nr_frags == 0)
1328                 dma_desc_flags |= DMA_EOP;
1329
1330         /* Transmit single SKB or head of fragment list */
1331         ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1332         if (ret) {
1333                 ret = NETDEV_TX_OK;
1334                 goto out;
1335         }
1336
1337         /* xmit fragment */
1338         for (i = 0; i < nr_frags; i++) {
1339                 ret = bcmgenet_xmit_frag(dev,
1340                                          &skb_shinfo(skb)->frags[i],
1341                                          (i == nr_frags - 1) ? DMA_EOP : 0,
1342                                          ring);
1343                 if (ret) {
1344                         ret = NETDEV_TX_OK;
1345                         goto out;
1346                 }
1347         }
1348
1349         skb_tx_timestamp(skb);
1350
1351         /* Decrement total BD count and advance our write pointer */
1352         ring->free_bds -= nr_frags + 1;
1353         ring->prod_index += nr_frags + 1;
1354         ring->prod_index &= DMA_P_INDEX_MASK;
1355
1356         if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1357                 netif_tx_stop_queue(txq);
1358
1359         if (!skb->xmit_more || netif_xmit_stopped(txq))
1360                 /* Packets are ready, update producer index */
1361                 bcmgenet_tdma_ring_writel(priv, ring->index,
1362                                           ring->prod_index, TDMA_PROD_INDEX);
1363 out:
1364         spin_unlock_irqrestore(&ring->lock, flags);
1365
1366         return ret;
1367 }
1368
1369 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1370                                           struct enet_cb *cb)
1371 {
1372         struct device *kdev = &priv->pdev->dev;
1373         struct sk_buff *skb;
1374         struct sk_buff *rx_skb;
1375         dma_addr_t mapping;
1376
1377         /* Allocate a new Rx skb */
1378         skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1379         if (!skb) {
1380                 priv->mib.alloc_rx_buff_failed++;
1381                 netif_err(priv, rx_err, priv->dev,
1382                           "%s: Rx skb allocation failed\n", __func__);
1383                 return NULL;
1384         }
1385
1386         /* DMA-map the new Rx skb */
1387         mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1388                                  DMA_FROM_DEVICE);
1389         if (dma_mapping_error(kdev, mapping)) {
1390                 priv->mib.rx_dma_failed++;
1391                 dev_kfree_skb_any(skb);
1392                 netif_err(priv, rx_err, priv->dev,
1393                           "%s: Rx skb DMA mapping failed\n", __func__);
1394                 return NULL;
1395         }
1396
1397         /* Grab the current Rx skb from the ring and DMA-unmap it */
1398         rx_skb = cb->skb;
1399         if (likely(rx_skb))
1400                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1401                                  priv->rx_buf_len, DMA_FROM_DEVICE);
1402
1403         /* Put the new Rx skb on the ring */
1404         cb->skb = skb;
1405         dma_unmap_addr_set(cb, dma_addr, mapping);
1406         dmadesc_set_addr(priv, cb->bd_addr, mapping);
1407
1408         /* Return the current Rx skb to caller */
1409         return rx_skb;
1410 }
1411
1412 /* bcmgenet_desc_rx - descriptor based rx process.
1413  * this could be called from bottom half, or from NAPI polling method.
1414  */
1415 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1416                                      unsigned int budget)
1417 {
1418         struct bcmgenet_priv *priv = ring->priv;
1419         struct net_device *dev = priv->dev;
1420         struct enet_cb *cb;
1421         struct sk_buff *skb;
1422         u32 dma_length_status;
1423         unsigned long dma_flag;
1424         int len;
1425         unsigned int rxpktprocessed = 0, rxpkttoprocess;
1426         unsigned int p_index;
1427         unsigned int discards;
1428         unsigned int chksum_ok = 0;
1429
1430         p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
1431
1432         discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1433                    DMA_P_INDEX_DISCARD_CNT_MASK;
1434         if (discards > ring->old_discards) {
1435                 discards = discards - ring->old_discards;
1436                 dev->stats.rx_missed_errors += discards;
1437                 dev->stats.rx_errors += discards;
1438                 ring->old_discards += discards;
1439
1440                 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1441                 if (ring->old_discards >= 0xC000) {
1442                         ring->old_discards = 0;
1443                         bcmgenet_rdma_ring_writel(priv, ring->index, 0,
1444                                                   RDMA_PROD_INDEX);
1445                 }
1446         }
1447
1448         p_index &= DMA_P_INDEX_MASK;
1449
1450         if (likely(p_index >= ring->c_index))
1451                 rxpkttoprocess = p_index - ring->c_index;
1452         else
1453                 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
1454                                  p_index;
1455
1456         netif_dbg(priv, rx_status, dev,
1457                   "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1458
1459         while ((rxpktprocessed < rxpkttoprocess) &&
1460                (rxpktprocessed < budget)) {
1461                 cb = &priv->rx_cbs[ring->read_ptr];
1462                 skb = bcmgenet_rx_refill(priv, cb);
1463
1464                 if (unlikely(!skb)) {
1465                         dev->stats.rx_dropped++;
1466                         goto next;
1467                 }
1468
1469                 if (!priv->desc_64b_en) {
1470                         dma_length_status =
1471                                 dmadesc_get_length_status(priv, cb->bd_addr);
1472                 } else {
1473                         struct status_64 *status;
1474
1475                         status = (struct status_64 *)skb->data;
1476                         dma_length_status = status->length_status;
1477                 }
1478
1479                 /* DMA flags and length are still valid no matter how
1480                  * we got the Receive Status Vector (64B RSB or register)
1481                  */
1482                 dma_flag = dma_length_status & 0xffff;
1483                 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1484
1485                 netif_dbg(priv, rx_status, dev,
1486                           "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1487                           __func__, p_index, ring->c_index,
1488                           ring->read_ptr, dma_length_status);
1489
1490                 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1491                         netif_err(priv, rx_status, dev,
1492                                   "dropping fragmented packet!\n");
1493                         dev->stats.rx_errors++;
1494                         dev_kfree_skb_any(skb);
1495                         goto next;
1496                 }
1497
1498                 /* report errors */
1499                 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1500                                                 DMA_RX_OV |
1501                                                 DMA_RX_NO |
1502                                                 DMA_RX_LG |
1503                                                 DMA_RX_RXER))) {
1504                         netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1505                                   (unsigned int)dma_flag);
1506                         if (dma_flag & DMA_RX_CRC_ERROR)
1507                                 dev->stats.rx_crc_errors++;
1508                         if (dma_flag & DMA_RX_OV)
1509                                 dev->stats.rx_over_errors++;
1510                         if (dma_flag & DMA_RX_NO)
1511                                 dev->stats.rx_frame_errors++;
1512                         if (dma_flag & DMA_RX_LG)
1513                                 dev->stats.rx_length_errors++;
1514                         dev->stats.rx_errors++;
1515                         dev_kfree_skb_any(skb);
1516                         goto next;
1517                 } /* error packet */
1518
1519                 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1520                              priv->desc_rxchk_en;
1521
1522                 skb_put(skb, len);
1523                 if (priv->desc_64b_en) {
1524                         skb_pull(skb, 64);
1525                         len -= 64;
1526                 }
1527
1528                 if (likely(chksum_ok))
1529                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1530
1531                 /* remove hardware 2bytes added for IP alignment */
1532                 skb_pull(skb, 2);
1533                 len -= 2;
1534
1535                 if (priv->crc_fwd_en) {
1536                         skb_trim(skb, len - ETH_FCS_LEN);
1537                         len -= ETH_FCS_LEN;
1538                 }
1539
1540                 /*Finish setting up the received SKB and send it to the kernel*/
1541                 skb->protocol = eth_type_trans(skb, priv->dev);
1542                 dev->stats.rx_packets++;
1543                 dev->stats.rx_bytes += len;
1544                 if (dma_flag & DMA_RX_MULT)
1545                         dev->stats.multicast++;
1546
1547                 /* Notify kernel */
1548                 napi_gro_receive(&ring->napi, skb);
1549                 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1550
1551 next:
1552                 rxpktprocessed++;
1553                 if (likely(ring->read_ptr < ring->end_ptr))
1554                         ring->read_ptr++;
1555                 else
1556                         ring->read_ptr = ring->cb_ptr;
1557
1558                 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
1559                 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
1560         }
1561
1562         return rxpktprocessed;
1563 }
1564
1565 /* Rx NAPI polling method */
1566 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1567 {
1568         struct bcmgenet_rx_ring *ring = container_of(napi,
1569                         struct bcmgenet_rx_ring, napi);
1570         unsigned int work_done;
1571
1572         work_done = bcmgenet_desc_rx(ring, budget);
1573
1574         if (work_done < budget) {
1575                 napi_complete(napi);
1576                 ring->int_enable(ring);
1577         }
1578
1579         return work_done;
1580 }
1581
1582 /* Assign skb to RX DMA descriptor. */
1583 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1584                                      struct bcmgenet_rx_ring *ring)
1585 {
1586         struct enet_cb *cb;
1587         struct sk_buff *skb;
1588         int i;
1589
1590         netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
1591
1592         /* loop here for each buffer needing assign */
1593         for (i = 0; i < ring->size; i++) {
1594                 cb = ring->cbs + i;
1595                 skb = bcmgenet_rx_refill(priv, cb);
1596                 if (skb)
1597                         dev_kfree_skb_any(skb);
1598                 if (!cb->skb)
1599                         return -ENOMEM;
1600         }
1601
1602         return 0;
1603 }
1604
1605 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1606 {
1607         struct enet_cb *cb;
1608         int i;
1609
1610         for (i = 0; i < priv->num_rx_bds; i++) {
1611                 cb = &priv->rx_cbs[i];
1612
1613                 if (dma_unmap_addr(cb, dma_addr)) {
1614                         dma_unmap_single(&priv->dev->dev,
1615                                          dma_unmap_addr(cb, dma_addr),
1616                                          priv->rx_buf_len, DMA_FROM_DEVICE);
1617                         dma_unmap_addr_set(cb, dma_addr, 0);
1618                 }
1619
1620                 if (cb->skb)
1621                         bcmgenet_free_cb(cb);
1622         }
1623 }
1624
1625 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1626 {
1627         u32 reg;
1628
1629         reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1630         if (enable)
1631                 reg |= mask;
1632         else
1633                 reg &= ~mask;
1634         bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1635
1636         /* UniMAC stops on a packet boundary, wait for a full-size packet
1637          * to be processed
1638          */
1639         if (enable == 0)
1640                 usleep_range(1000, 2000);
1641 }
1642
1643 static int reset_umac(struct bcmgenet_priv *priv)
1644 {
1645         struct device *kdev = &priv->pdev->dev;
1646         unsigned int timeout = 0;
1647         u32 reg;
1648
1649         /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1650         bcmgenet_rbuf_ctrl_set(priv, 0);
1651         udelay(10);
1652
1653         /* disable MAC while updating its registers */
1654         bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1655
1656         /* issue soft reset, wait for it to complete */
1657         bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1658         while (timeout++ < 1000) {
1659                 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1660                 if (!(reg & CMD_SW_RESET))
1661                         return 0;
1662
1663                 udelay(1);
1664         }
1665
1666         if (timeout == 1000) {
1667                 dev_err(kdev,
1668                         "timeout waiting for MAC to come out of reset\n");
1669                 return -ETIMEDOUT;
1670         }
1671
1672         return 0;
1673 }
1674
1675 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1676 {
1677         /* Mask all interrupts.*/
1678         bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1679         bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1680         bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1681         bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1682         bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1683         bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1684 }
1685
1686 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1687 {
1688         u32 int0_enable = 0;
1689
1690         /* Monitor cable plug/unplugged event for internal PHY, external PHY
1691          * and MoCA PHY
1692          */
1693         if (priv->internal_phy) {
1694                 int0_enable |= UMAC_IRQ_LINK_EVENT;
1695         } else if (priv->ext_phy) {
1696                 int0_enable |= UMAC_IRQ_LINK_EVENT;
1697         } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1698                 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1699                         int0_enable |= UMAC_IRQ_LINK_EVENT;
1700         }
1701         bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1702 }
1703
1704 static int init_umac(struct bcmgenet_priv *priv)
1705 {
1706         struct device *kdev = &priv->pdev->dev;
1707         int ret;
1708         u32 reg;
1709         u32 int0_enable = 0;
1710         u32 int1_enable = 0;
1711         int i;
1712
1713         dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1714
1715         ret = reset_umac(priv);
1716         if (ret)
1717                 return ret;
1718
1719         bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1720         /* clear tx/rx counter */
1721         bcmgenet_umac_writel(priv,
1722                              MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1723                              UMAC_MIB_CTRL);
1724         bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1725
1726         bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1727
1728         /* init rx registers, enable ip header optimization */
1729         reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1730         reg |= RBUF_ALIGN_2B;
1731         bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1732
1733         if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1734                 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1735
1736         bcmgenet_intr_disable(priv);
1737
1738         /* Enable Rx default queue 16 interrupts */
1739         int0_enable |= UMAC_IRQ_RXDMA_DONE;
1740
1741         /* Enable Tx default queue 16 interrupts */
1742         int0_enable |= UMAC_IRQ_TXDMA_DONE;
1743
1744         /* Configure backpressure vectors for MoCA */
1745         if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1746                 reg = bcmgenet_bp_mc_get(priv);
1747                 reg |= BIT(priv->hw_params->bp_in_en_shift);
1748
1749                 /* bp_mask: back pressure mask */
1750                 if (netif_is_multiqueue(priv->dev))
1751                         reg |= priv->hw_params->bp_in_mask;
1752                 else
1753                         reg &= ~priv->hw_params->bp_in_mask;
1754                 bcmgenet_bp_mc_set(priv, reg);
1755         }
1756
1757         /* Enable MDIO interrupts on GENET v3+ */
1758         if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1759                 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1760
1761         /* Enable Rx priority queue interrupts */
1762         for (i = 0; i < priv->hw_params->rx_queues; ++i)
1763                 int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
1764
1765         /* Enable Tx priority queue interrupts */
1766         for (i = 0; i < priv->hw_params->tx_queues; ++i)
1767                 int1_enable |= (1 << i);
1768
1769         bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1770         bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
1771
1772         /* Enable rx/tx engine.*/
1773         dev_dbg(kdev, "done init umac\n");
1774
1775         return 0;
1776 }
1777
1778 /* Initialize a Tx ring along with corresponding hardware registers */
1779 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1780                                   unsigned int index, unsigned int size,
1781                                   unsigned int start_ptr, unsigned int end_ptr)
1782 {
1783         struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1784         u32 words_per_bd = WORDS_PER_BD(priv);
1785         u32 flow_period_val = 0;
1786
1787         spin_lock_init(&ring->lock);
1788         ring->priv = priv;
1789         ring->index = index;
1790         if (index == DESC_INDEX) {
1791                 ring->queue = 0;
1792                 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1793                 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1794         } else {
1795                 ring->queue = index + 1;
1796                 ring->int_enable = bcmgenet_tx_ring_int_enable;
1797                 ring->int_disable = bcmgenet_tx_ring_int_disable;
1798         }
1799         ring->cbs = priv->tx_cbs + start_ptr;
1800         ring->size = size;
1801         ring->clean_ptr = start_ptr;
1802         ring->c_index = 0;
1803         ring->free_bds = size;
1804         ring->write_ptr = start_ptr;
1805         ring->cb_ptr = start_ptr;
1806         ring->end_ptr = end_ptr - 1;
1807         ring->prod_index = 0;
1808
1809         /* Set flow period for ring != 16 */
1810         if (index != DESC_INDEX)
1811                 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1812
1813         bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1814         bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1815         bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1816         /* Disable rate control for now */
1817         bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1818                                   TDMA_FLOW_PERIOD);
1819         bcmgenet_tdma_ring_writel(priv, index,
1820                                   ((size << DMA_RING_SIZE_SHIFT) |
1821                                    RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1822
1823         /* Set start and end address, read and write pointers */
1824         bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1825                                   DMA_START_ADDR);
1826         bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1827                                   TDMA_READ_PTR);
1828         bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1829                                   TDMA_WRITE_PTR);
1830         bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1831                                   DMA_END_ADDR);
1832 }
1833
1834 /* Initialize a RDMA ring */
1835 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1836                                  unsigned int index, unsigned int size,
1837                                  unsigned int start_ptr, unsigned int end_ptr)
1838 {
1839         struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
1840         u32 words_per_bd = WORDS_PER_BD(priv);
1841         int ret;
1842
1843         ring->priv = priv;
1844         ring->index = index;
1845         if (index == DESC_INDEX) {
1846                 ring->int_enable = bcmgenet_rx_ring16_int_enable;
1847                 ring->int_disable = bcmgenet_rx_ring16_int_disable;
1848         } else {
1849                 ring->int_enable = bcmgenet_rx_ring_int_enable;
1850                 ring->int_disable = bcmgenet_rx_ring_int_disable;
1851         }
1852         ring->cbs = priv->rx_cbs + start_ptr;
1853         ring->size = size;
1854         ring->c_index = 0;
1855         ring->read_ptr = start_ptr;
1856         ring->cb_ptr = start_ptr;
1857         ring->end_ptr = end_ptr - 1;
1858
1859         ret = bcmgenet_alloc_rx_buffers(priv, ring);
1860         if (ret)
1861                 return ret;
1862
1863         bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1864         bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1865         bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1866         bcmgenet_rdma_ring_writel(priv, index,
1867                                   ((size << DMA_RING_SIZE_SHIFT) |
1868                                    RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1869         bcmgenet_rdma_ring_writel(priv, index,
1870                                   (DMA_FC_THRESH_LO <<
1871                                    DMA_XOFF_THRESHOLD_SHIFT) |
1872                                    DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1873
1874         /* Set start and end address, read and write pointers */
1875         bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1876                                   DMA_START_ADDR);
1877         bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1878                                   RDMA_READ_PTR);
1879         bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
1880                                   RDMA_WRITE_PTR);
1881         bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1882                                   DMA_END_ADDR);
1883
1884         return ret;
1885 }
1886
1887 static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
1888 {
1889         unsigned int i;
1890         struct bcmgenet_tx_ring *ring;
1891
1892         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1893                 ring = &priv->tx_rings[i];
1894                 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1895         }
1896
1897         ring = &priv->tx_rings[DESC_INDEX];
1898         netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1899 }
1900
1901 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
1902 {
1903         unsigned int i;
1904         struct bcmgenet_tx_ring *ring;
1905
1906         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1907                 ring = &priv->tx_rings[i];
1908                 napi_enable(&ring->napi);
1909         }
1910
1911         ring = &priv->tx_rings[DESC_INDEX];
1912         napi_enable(&ring->napi);
1913 }
1914
1915 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
1916 {
1917         unsigned int i;
1918         struct bcmgenet_tx_ring *ring;
1919
1920         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1921                 ring = &priv->tx_rings[i];
1922                 napi_disable(&ring->napi);
1923         }
1924
1925         ring = &priv->tx_rings[DESC_INDEX];
1926         napi_disable(&ring->napi);
1927 }
1928
1929 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
1930 {
1931         unsigned int i;
1932         struct bcmgenet_tx_ring *ring;
1933
1934         for (i = 0; i < priv->hw_params->tx_queues; ++i) {
1935                 ring = &priv->tx_rings[i];
1936                 netif_napi_del(&ring->napi);
1937         }
1938
1939         ring = &priv->tx_rings[DESC_INDEX];
1940         netif_napi_del(&ring->napi);
1941 }
1942
1943 /* Initialize Tx queues
1944  *
1945  * Queues 0-3 are priority-based, each one has 32 descriptors,
1946  * with queue 0 being the highest priority queue.
1947  *
1948  * Queue 16 is the default Tx queue with
1949  * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
1950  *
1951  * The transmit control block pool is then partitioned as follows:
1952  * - Tx queue 0 uses tx_cbs[0..31]
1953  * - Tx queue 1 uses tx_cbs[32..63]
1954  * - Tx queue 2 uses tx_cbs[64..95]
1955  * - Tx queue 3 uses tx_cbs[96..127]
1956  * - Tx queue 16 uses tx_cbs[128..255]
1957  */
1958 static void bcmgenet_init_tx_queues(struct net_device *dev)
1959 {
1960         struct bcmgenet_priv *priv = netdev_priv(dev);
1961         u32 i, dma_enable;
1962         u32 dma_ctrl, ring_cfg;
1963         u32 dma_priority[3] = {0, 0, 0};
1964
1965         dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1966         dma_enable = dma_ctrl & DMA_EN;
1967         dma_ctrl &= ~DMA_EN;
1968         bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1969
1970         dma_ctrl = 0;
1971         ring_cfg = 0;
1972
1973         /* Enable strict priority arbiter mode */
1974         bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1975
1976         /* Initialize Tx priority queues */
1977         for (i = 0; i < priv->hw_params->tx_queues; i++) {
1978                 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
1979                                       i * priv->hw_params->tx_bds_per_q,
1980                                       (i + 1) * priv->hw_params->tx_bds_per_q);
1981                 ring_cfg |= (1 << i);
1982                 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
1983                 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1984                         ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1985         }
1986
1987         /* Initialize Tx default queue 16 */
1988         bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
1989                               priv->hw_params->tx_queues *
1990                               priv->hw_params->tx_bds_per_q,
1991                               TOTAL_DESC);
1992         ring_cfg |= (1 << DESC_INDEX);
1993         dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1994         dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1995                 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1996                  DMA_PRIO_REG_SHIFT(DESC_INDEX));
1997
1998         /* Set Tx queue priorities */
1999         bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2000         bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2001         bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2002
2003         /* Initialize Tx NAPI */
2004         bcmgenet_init_tx_napi(priv);
2005
2006         /* Enable Tx queues */
2007         bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2008
2009         /* Enable Tx DMA */
2010         if (dma_enable)
2011                 dma_ctrl |= DMA_EN;
2012         bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2013 }
2014
2015 static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
2016 {
2017         unsigned int i;
2018         struct bcmgenet_rx_ring *ring;
2019
2020         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2021                 ring = &priv->rx_rings[i];
2022                 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2023         }
2024
2025         ring = &priv->rx_rings[DESC_INDEX];
2026         netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
2027 }
2028
2029 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2030 {
2031         unsigned int i;
2032         struct bcmgenet_rx_ring *ring;
2033
2034         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2035                 ring = &priv->rx_rings[i];
2036                 napi_enable(&ring->napi);
2037         }
2038
2039         ring = &priv->rx_rings[DESC_INDEX];
2040         napi_enable(&ring->napi);
2041 }
2042
2043 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2044 {
2045         unsigned int i;
2046         struct bcmgenet_rx_ring *ring;
2047
2048         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2049                 ring = &priv->rx_rings[i];
2050                 napi_disable(&ring->napi);
2051         }
2052
2053         ring = &priv->rx_rings[DESC_INDEX];
2054         napi_disable(&ring->napi);
2055 }
2056
2057 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2058 {
2059         unsigned int i;
2060         struct bcmgenet_rx_ring *ring;
2061
2062         for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2063                 ring = &priv->rx_rings[i];
2064                 netif_napi_del(&ring->napi);
2065         }
2066
2067         ring = &priv->rx_rings[DESC_INDEX];
2068         netif_napi_del(&ring->napi);
2069 }
2070
2071 /* Initialize Rx queues
2072  *
2073  * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2074  * used to direct traffic to these queues.
2075  *
2076  * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2077  */
2078 static int bcmgenet_init_rx_queues(struct net_device *dev)
2079 {
2080         struct bcmgenet_priv *priv = netdev_priv(dev);
2081         u32 i;
2082         u32 dma_enable;
2083         u32 dma_ctrl;
2084         u32 ring_cfg;
2085         int ret;
2086
2087         dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2088         dma_enable = dma_ctrl & DMA_EN;
2089         dma_ctrl &= ~DMA_EN;
2090         bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2091
2092         dma_ctrl = 0;
2093         ring_cfg = 0;
2094
2095         /* Initialize Rx priority queues */
2096         for (i = 0; i < priv->hw_params->rx_queues; i++) {
2097                 ret = bcmgenet_init_rx_ring(priv, i,
2098                                             priv->hw_params->rx_bds_per_q,
2099                                             i * priv->hw_params->rx_bds_per_q,
2100                                             (i + 1) *
2101                                             priv->hw_params->rx_bds_per_q);
2102                 if (ret)
2103                         return ret;
2104
2105                 ring_cfg |= (1 << i);
2106                 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2107         }
2108
2109         /* Initialize Rx default queue 16 */
2110         ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2111                                     priv->hw_params->rx_queues *
2112                                     priv->hw_params->rx_bds_per_q,
2113                                     TOTAL_DESC);
2114         if (ret)
2115                 return ret;
2116
2117         ring_cfg |= (1 << DESC_INDEX);
2118         dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2119
2120         /* Initialize Rx NAPI */
2121         bcmgenet_init_rx_napi(priv);
2122
2123         /* Enable rings */
2124         bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2125
2126         /* Configure ring as descriptor ring and re-enable DMA if enabled */
2127         if (dma_enable)
2128                 dma_ctrl |= DMA_EN;
2129         bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2130
2131         return 0;
2132 }
2133
2134 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2135 {
2136         int ret = 0;
2137         int timeout = 0;
2138         u32 reg;
2139         u32 dma_ctrl;
2140         int i;
2141
2142         /* Disable TDMA to stop add more frames in TX DMA */
2143         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2144         reg &= ~DMA_EN;
2145         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2146
2147         /* Check TDMA status register to confirm TDMA is disabled */
2148         while (timeout++ < DMA_TIMEOUT_VAL) {
2149                 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2150                 if (reg & DMA_DISABLED)
2151                         break;
2152
2153                 udelay(1);
2154         }
2155
2156         if (timeout == DMA_TIMEOUT_VAL) {
2157                 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2158                 ret = -ETIMEDOUT;
2159         }
2160
2161         /* Wait 10ms for packet drain in both tx and rx dma */
2162         usleep_range(10000, 20000);
2163
2164         /* Disable RDMA */
2165         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2166         reg &= ~DMA_EN;
2167         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2168
2169         timeout = 0;
2170         /* Check RDMA status register to confirm RDMA is disabled */
2171         while (timeout++ < DMA_TIMEOUT_VAL) {
2172                 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2173                 if (reg & DMA_DISABLED)
2174                         break;
2175
2176                 udelay(1);
2177         }
2178
2179         if (timeout == DMA_TIMEOUT_VAL) {
2180                 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2181                 ret = -ETIMEDOUT;
2182         }
2183
2184         dma_ctrl = 0;
2185         for (i = 0; i < priv->hw_params->rx_queues; i++)
2186                 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2187         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2188         reg &= ~dma_ctrl;
2189         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2190
2191         dma_ctrl = 0;
2192         for (i = 0; i < priv->hw_params->tx_queues; i++)
2193                 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2194         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2195         reg &= ~dma_ctrl;
2196         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2197
2198         return ret;
2199 }
2200
2201 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2202 {
2203         int i;
2204
2205         bcmgenet_fini_rx_napi(priv);
2206         bcmgenet_fini_tx_napi(priv);
2207
2208         /* disable DMA */
2209         bcmgenet_dma_teardown(priv);
2210
2211         for (i = 0; i < priv->num_tx_bds; i++) {
2212                 if (priv->tx_cbs[i].skb != NULL) {
2213                         dev_kfree_skb(priv->tx_cbs[i].skb);
2214                         priv->tx_cbs[i].skb = NULL;
2215                 }
2216         }
2217
2218         bcmgenet_free_rx_buffers(priv);
2219         kfree(priv->rx_cbs);
2220         kfree(priv->tx_cbs);
2221 }
2222
2223 /* init_edma: Initialize DMA control register */
2224 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2225 {
2226         int ret;
2227         unsigned int i;
2228         struct enet_cb *cb;
2229
2230         netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2231
2232         /* Initialize common Rx ring structures */
2233         priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2234         priv->num_rx_bds = TOTAL_DESC;
2235         priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2236                                GFP_KERNEL);
2237         if (!priv->rx_cbs)
2238                 return -ENOMEM;
2239
2240         for (i = 0; i < priv->num_rx_bds; i++) {
2241                 cb = priv->rx_cbs + i;
2242                 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2243         }
2244
2245         /* Initialize common TX ring structures */
2246         priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2247         priv->num_tx_bds = TOTAL_DESC;
2248         priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
2249                                GFP_KERNEL);
2250         if (!priv->tx_cbs) {
2251                 kfree(priv->rx_cbs);
2252                 return -ENOMEM;
2253         }
2254
2255         for (i = 0; i < priv->num_tx_bds; i++) {
2256                 cb = priv->tx_cbs + i;
2257                 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2258         }
2259
2260         /* Init rDma */
2261         bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2262
2263         /* Initialize Rx queues */
2264         ret = bcmgenet_init_rx_queues(priv->dev);
2265         if (ret) {
2266                 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2267                 bcmgenet_free_rx_buffers(priv);
2268                 kfree(priv->rx_cbs);
2269                 kfree(priv->tx_cbs);
2270                 return ret;
2271         }
2272
2273         /* Init tDma */
2274         bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
2275
2276         /* Initialize Tx queues */
2277         bcmgenet_init_tx_queues(priv->dev);
2278
2279         return 0;
2280 }
2281
2282 /* Interrupt bottom half */
2283 static void bcmgenet_irq_task(struct work_struct *work)
2284 {
2285         struct bcmgenet_priv *priv = container_of(
2286                         work, struct bcmgenet_priv, bcmgenet_irq_work);
2287
2288         netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2289
2290         if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2291                 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2292                 netif_dbg(priv, wol, priv->dev,
2293                           "magic packet detected, waking up\n");
2294                 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2295         }
2296
2297         /* Link UP/DOWN event */
2298         if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2299             (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2300                 phy_mac_interrupt(priv->phydev,
2301                                   !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2302                 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
2303         }
2304 }
2305
2306 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2307 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2308 {
2309         struct bcmgenet_priv *priv = dev_id;
2310         struct bcmgenet_rx_ring *rx_ring;
2311         struct bcmgenet_tx_ring *tx_ring;
2312         unsigned int index;
2313
2314         /* Save irq status for bottom-half processing. */
2315         priv->irq1_stat =
2316                 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2317                 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2318
2319         /* clear interrupts */
2320         bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2321
2322         netif_dbg(priv, intr, priv->dev,
2323                   "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2324
2325         /* Check Rx priority queue interrupts */
2326         for (index = 0; index < priv->hw_params->rx_queues; index++) {
2327                 if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
2328                         continue;
2329
2330                 rx_ring = &priv->rx_rings[index];
2331
2332                 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2333                         rx_ring->int_disable(rx_ring);
2334                         __napi_schedule(&rx_ring->napi);
2335                 }
2336         }
2337
2338         /* Check Tx priority queue interrupts */
2339         for (index = 0; index < priv->hw_params->tx_queues; index++) {
2340                 if (!(priv->irq1_stat & BIT(index)))
2341                         continue;
2342
2343                 tx_ring = &priv->tx_rings[index];
2344
2345                 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2346                         tx_ring->int_disable(tx_ring);
2347                         __napi_schedule(&tx_ring->napi);
2348                 }
2349         }
2350
2351         return IRQ_HANDLED;
2352 }
2353
2354 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2355 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2356 {
2357         struct bcmgenet_priv *priv = dev_id;
2358         struct bcmgenet_rx_ring *rx_ring;
2359         struct bcmgenet_tx_ring *tx_ring;
2360
2361         /* Save irq status for bottom-half processing. */
2362         priv->irq0_stat =
2363                 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2364                 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2365
2366         /* clear interrupts */
2367         bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2368
2369         netif_dbg(priv, intr, priv->dev,
2370                   "IRQ=0x%x\n", priv->irq0_stat);
2371
2372         if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
2373                 rx_ring = &priv->rx_rings[DESC_INDEX];
2374
2375                 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2376                         rx_ring->int_disable(rx_ring);
2377                         __napi_schedule(&rx_ring->napi);
2378                 }
2379         }
2380
2381         if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
2382                 tx_ring = &priv->tx_rings[DESC_INDEX];
2383
2384                 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2385                         tx_ring->int_disable(tx_ring);
2386                         __napi_schedule(&tx_ring->napi);
2387                 }
2388         }
2389
2390         if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2391                                 UMAC_IRQ_PHY_DET_F |
2392                                 UMAC_IRQ_LINK_EVENT |
2393                                 UMAC_IRQ_HFB_SM |
2394                                 UMAC_IRQ_HFB_MM |
2395                                 UMAC_IRQ_MPD_R)) {
2396                 /* all other interested interrupts handled in bottom half */
2397                 schedule_work(&priv->bcmgenet_irq_work);
2398         }
2399
2400         if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2401             priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2402                 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2403                 wake_up(&priv->wq);
2404         }
2405
2406         return IRQ_HANDLED;
2407 }
2408
2409 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2410 {
2411         struct bcmgenet_priv *priv = dev_id;
2412
2413         pm_wakeup_event(&priv->pdev->dev, 0);
2414
2415         return IRQ_HANDLED;
2416 }
2417
2418 #ifdef CONFIG_NET_POLL_CONTROLLER
2419 static void bcmgenet_poll_controller(struct net_device *dev)
2420 {
2421         struct bcmgenet_priv *priv = netdev_priv(dev);
2422
2423         /* Invoke the main RX/TX interrupt handler */
2424         disable_irq(priv->irq0);
2425         bcmgenet_isr0(priv->irq0, priv);
2426         enable_irq(priv->irq0);
2427
2428         /* And the interrupt handler for RX/TX priority queues */
2429         disable_irq(priv->irq1);
2430         bcmgenet_isr1(priv->irq1, priv);
2431         enable_irq(priv->irq1);
2432 }
2433 #endif
2434
2435 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2436 {
2437         u32 reg;
2438
2439         reg = bcmgenet_rbuf_ctrl_get(priv);
2440         reg |= BIT(1);
2441         bcmgenet_rbuf_ctrl_set(priv, reg);
2442         udelay(10);
2443
2444         reg &= ~BIT(1);
2445         bcmgenet_rbuf_ctrl_set(priv, reg);
2446         udelay(10);
2447 }
2448
2449 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2450                                  unsigned char *addr)
2451 {
2452         bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2453                         (addr[2] << 8) | addr[3], UMAC_MAC0);
2454         bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2455 }
2456
2457 /* Returns a reusable dma control register value */
2458 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2459 {
2460         u32 reg;
2461         u32 dma_ctrl;
2462
2463         /* disable DMA */
2464         dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2465         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2466         reg &= ~dma_ctrl;
2467         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2468
2469         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2470         reg &= ~dma_ctrl;
2471         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2472
2473         bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2474         udelay(10);
2475         bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2476
2477         return dma_ctrl;
2478 }
2479
2480 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2481 {
2482         u32 reg;
2483
2484         reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2485         reg |= dma_ctrl;
2486         bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2487
2488         reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2489         reg |= dma_ctrl;
2490         bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2491 }
2492
2493 static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
2494                                            u32 f_index)
2495 {
2496         u32 offset;
2497         u32 reg;
2498
2499         offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2500         reg = bcmgenet_hfb_reg_readl(priv, offset);
2501         return !!(reg & (1 << (f_index % 32)));
2502 }
2503
2504 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
2505 {
2506         u32 offset;
2507         u32 reg;
2508
2509         offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
2510         reg = bcmgenet_hfb_reg_readl(priv, offset);
2511         reg |= (1 << (f_index % 32));
2512         bcmgenet_hfb_reg_writel(priv, reg, offset);
2513 }
2514
2515 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
2516                                                      u32 f_index, u32 rx_queue)
2517 {
2518         u32 offset;
2519         u32 reg;
2520
2521         offset = f_index / 8;
2522         reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
2523         reg &= ~(0xF << (4 * (f_index % 8)));
2524         reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
2525         bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
2526 }
2527
2528 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
2529                                            u32 f_index, u32 f_length)
2530 {
2531         u32 offset;
2532         u32 reg;
2533
2534         offset = HFB_FLT_LEN_V3PLUS +
2535                  ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
2536                  sizeof(u32);
2537         reg = bcmgenet_hfb_reg_readl(priv, offset);
2538         reg &= ~(0xFF << (8 * (f_index % 4)));
2539         reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
2540         bcmgenet_hfb_reg_writel(priv, reg, offset);
2541 }
2542
2543 static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
2544 {
2545         u32 f_index;
2546
2547         for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++)
2548                 if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
2549                         return f_index;
2550
2551         return -ENOMEM;
2552 }
2553
2554 /* bcmgenet_hfb_add_filter
2555  *
2556  * Add new filter to Hardware Filter Block to match and direct Rx traffic to
2557  * desired Rx queue.
2558  *
2559  * f_data is an array of unsigned 32-bit integers where each 32-bit integer
2560  * provides filter data for 2 bytes (4 nibbles) of Rx frame:
2561  *
2562  * bits 31:20 - unused
2563  * bit  19    - nibble 0 match enable
2564  * bit  18    - nibble 1 match enable
2565  * bit  17    - nibble 2 match enable
2566  * bit  16    - nibble 3 match enable
2567  * bits 15:12 - nibble 0 data
2568  * bits 11:8  - nibble 1 data
2569  * bits 7:4   - nibble 2 data
2570  * bits 3:0   - nibble 3 data
2571  *
2572  * Example:
2573  * In order to match:
2574  * - Ethernet frame type = 0x0800 (IP)
2575  * - IP version field = 4
2576  * - IP protocol field = 0x11 (UDP)
2577  *
2578  * The following filter is needed:
2579  * u32 hfb_filter_ipv4_udp[] = {
2580  *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
2581  *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
2582  *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
2583  * };
2584  *
2585  * To add the filter to HFB and direct the traffic to Rx queue 0, call:
2586  * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
2587  *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
2588  */
2589 int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
2590                             u32 f_length, u32 rx_queue)
2591 {
2592         int f_index;
2593         u32 i;
2594
2595         f_index = bcmgenet_hfb_find_unused_filter(priv);
2596         if (f_index < 0)
2597                 return -ENOMEM;
2598
2599         if (f_length > priv->hw_params->hfb_filter_size)
2600                 return -EINVAL;
2601
2602         for (i = 0; i < f_length; i++)
2603                 bcmgenet_hfb_writel(priv, f_data[i],
2604                         (f_index * priv->hw_params->hfb_filter_size + i) *
2605                         sizeof(u32));
2606
2607         bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
2608         bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
2609         bcmgenet_hfb_enable_filter(priv, f_index);
2610         bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL);
2611
2612         return 0;
2613 }
2614
2615 /* bcmgenet_hfb_clear
2616  *
2617  * Clear Hardware Filter Block and disable all filtering.
2618  */
2619 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2620 {
2621         u32 i;
2622
2623         bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2624         bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2625         bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2626
2627         for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2628                 bcmgenet_rdma_writel(priv, 0x0, i);
2629
2630         for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2631                 bcmgenet_hfb_reg_writel(priv, 0x0,
2632                                         HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2633
2634         for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2635                         priv->hw_params->hfb_filter_size; i++)
2636                 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2637 }
2638
2639 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2640 {
2641         if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2642                 return;
2643
2644         bcmgenet_hfb_clear(priv);
2645 }
2646
2647 static void bcmgenet_netif_start(struct net_device *dev)
2648 {
2649         struct bcmgenet_priv *priv = netdev_priv(dev);
2650
2651         /* Start the network engine */
2652         bcmgenet_enable_rx_napi(priv);
2653         bcmgenet_enable_tx_napi(priv);
2654
2655         umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2656
2657         netif_tx_start_all_queues(dev);
2658
2659         /* Monitor link interrupts now */
2660         bcmgenet_link_intr_enable(priv);
2661
2662         phy_start(priv->phydev);
2663 }
2664
2665 static int bcmgenet_open(struct net_device *dev)
2666 {
2667         struct bcmgenet_priv *priv = netdev_priv(dev);
2668         unsigned long dma_ctrl;
2669         u32 reg;
2670         int ret;
2671
2672         netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2673
2674         /* Turn on the clock */
2675         clk_prepare_enable(priv->clk);
2676
2677         /* If this is an internal GPHY, power it back on now, before UniMAC is
2678          * brought out of reset as absolutely no UniMAC activity is allowed
2679          */
2680         if (priv->internal_phy)
2681                 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2682
2683         /* take MAC out of reset */
2684         bcmgenet_umac_reset(priv);
2685
2686         ret = init_umac(priv);
2687         if (ret)
2688                 goto err_clk_disable;
2689
2690         /* disable ethernet MAC while updating its registers */
2691         umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2692
2693         /* Make sure we reflect the value of CRC_CMD_FWD */
2694         reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2695         priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2696
2697         bcmgenet_set_hw_addr(priv, dev->dev_addr);
2698
2699         if (priv->internal_phy) {
2700                 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2701                 reg |= EXT_ENERGY_DET_MASK;
2702                 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2703         }
2704
2705         /* Disable RX/TX DMA and flush TX queues */
2706         dma_ctrl = bcmgenet_dma_disable(priv);
2707
2708         /* Reinitialize TDMA and RDMA and SW housekeeping */
2709         ret = bcmgenet_init_dma(priv);
2710         if (ret) {
2711                 netdev_err(dev, "failed to initialize DMA\n");
2712                 goto err_clk_disable;
2713         }
2714
2715         /* Always enable ring 16 - descriptor ring */
2716         bcmgenet_enable_dma(priv, dma_ctrl);
2717
2718         /* HFB init */
2719         bcmgenet_hfb_init(priv);
2720
2721         ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2722                           dev->name, priv);
2723         if (ret < 0) {
2724                 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2725                 goto err_fini_dma;
2726         }
2727
2728         ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2729                           dev->name, priv);
2730         if (ret < 0) {
2731                 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2732                 goto err_irq0;
2733         }
2734
2735         ret = bcmgenet_mii_probe(dev);
2736         if (ret) {
2737                 netdev_err(dev, "failed to connect to PHY\n");
2738                 goto err_irq1;
2739         }
2740
2741         bcmgenet_netif_start(dev);
2742
2743         return 0;
2744
2745 err_irq1:
2746         free_irq(priv->irq1, priv);
2747 err_irq0:
2748         free_irq(priv->irq0, priv);
2749 err_fini_dma:
2750         bcmgenet_fini_dma(priv);
2751 err_clk_disable:
2752         clk_disable_unprepare(priv->clk);
2753         return ret;
2754 }
2755
2756 static void bcmgenet_netif_stop(struct net_device *dev)
2757 {
2758         struct bcmgenet_priv *priv = netdev_priv(dev);
2759
2760         netif_tx_stop_all_queues(dev);
2761         phy_stop(priv->phydev);
2762         bcmgenet_intr_disable(priv);
2763         bcmgenet_disable_rx_napi(priv);
2764         bcmgenet_disable_tx_napi(priv);
2765
2766         /* Wait for pending work items to complete. Since interrupts are
2767          * disabled no new work will be scheduled.
2768          */
2769         cancel_work_sync(&priv->bcmgenet_irq_work);
2770
2771         priv->old_link = -1;
2772         priv->old_speed = -1;
2773         priv->old_duplex = -1;
2774         priv->old_pause = -1;
2775 }
2776
2777 static int bcmgenet_close(struct net_device *dev)
2778 {
2779         struct bcmgenet_priv *priv = netdev_priv(dev);
2780         int ret;
2781
2782         netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2783
2784         bcmgenet_netif_stop(dev);
2785
2786         /* Really kill the PHY state machine and disconnect from it */
2787         phy_disconnect(priv->phydev);
2788
2789         /* Disable MAC receive */
2790         umac_enable_set(priv, CMD_RX_EN, false);
2791
2792         ret = bcmgenet_dma_teardown(priv);
2793         if (ret)
2794                 return ret;
2795
2796         /* Disable MAC transmit. TX DMA disabled have to done before this */
2797         umac_enable_set(priv, CMD_TX_EN, false);
2798
2799         /* tx reclaim */
2800         bcmgenet_tx_reclaim_all(dev);
2801         bcmgenet_fini_dma(priv);
2802
2803         free_irq(priv->irq0, priv);
2804         free_irq(priv->irq1, priv);
2805
2806         if (priv->internal_phy)
2807                 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2808
2809         clk_disable_unprepare(priv->clk);
2810
2811         return ret;
2812 }
2813
2814 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2815 {
2816         struct bcmgenet_priv *priv = ring->priv;
2817         u32 p_index, c_index, intsts, intmsk;
2818         struct netdev_queue *txq;
2819         unsigned int free_bds;
2820         unsigned long flags;
2821         bool txq_stopped;
2822
2823         if (!netif_msg_tx_err(priv))
2824                 return;
2825
2826         txq = netdev_get_tx_queue(priv->dev, ring->queue);
2827
2828         spin_lock_irqsave(&ring->lock, flags);
2829         if (ring->index == DESC_INDEX) {
2830                 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2831                 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2832         } else {
2833                 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2834                 intmsk = 1 << ring->index;
2835         }
2836         c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2837         p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2838         txq_stopped = netif_tx_queue_stopped(txq);
2839         free_bds = ring->free_bds;
2840         spin_unlock_irqrestore(&ring->lock, flags);
2841
2842         netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2843                   "TX queue status: %s, interrupts: %s\n"
2844                   "(sw)free_bds: %d (sw)size: %d\n"
2845                   "(sw)p_index: %d (hw)p_index: %d\n"
2846                   "(sw)c_index: %d (hw)c_index: %d\n"
2847                   "(sw)clean_p: %d (sw)write_p: %d\n"
2848                   "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2849                   ring->index, ring->queue,
2850                   txq_stopped ? "stopped" : "active",
2851                   intsts & intmsk ? "enabled" : "disabled",
2852                   free_bds, ring->size,
2853                   ring->prod_index, p_index & DMA_P_INDEX_MASK,
2854                   ring->c_index, c_index & DMA_C_INDEX_MASK,
2855                   ring->clean_ptr, ring->write_ptr,
2856                   ring->cb_ptr, ring->end_ptr);
2857 }
2858
2859 static void bcmgenet_timeout(struct net_device *dev)
2860 {
2861         struct bcmgenet_priv *priv = netdev_priv(dev);
2862         u32 int0_enable = 0;
2863         u32 int1_enable = 0;
2864         unsigned int q;
2865
2866         netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2867
2868         for (q = 0; q < priv->hw_params->tx_queues; q++)
2869                 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
2870         bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
2871
2872         bcmgenet_tx_reclaim_all(dev);
2873
2874         for (q = 0; q < priv->hw_params->tx_queues; q++)
2875                 int1_enable |= (1 << q);
2876
2877         int0_enable = UMAC_IRQ_TXDMA_DONE;
2878
2879         /* Re-enable TX interrupts if disabled */
2880         bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2881         bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
2882
2883         dev->trans_start = jiffies;
2884
2885         dev->stats.tx_errors++;
2886
2887         netif_tx_wake_all_queues(dev);
2888 }
2889
2890 #define MAX_MC_COUNT    16
2891
2892 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2893                                          unsigned char *addr,
2894                                          int *i,
2895                                          int *mc)
2896 {
2897         u32 reg;
2898
2899         bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2900                              UMAC_MDF_ADDR + (*i * 4));
2901         bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2902                              addr[4] << 8 | addr[5],
2903                              UMAC_MDF_ADDR + ((*i + 1) * 4));
2904         reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2905         reg |= (1 << (MAX_MC_COUNT - *mc));
2906         bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2907         *i += 2;
2908         (*mc)++;
2909 }
2910
2911 static void bcmgenet_set_rx_mode(struct net_device *dev)
2912 {
2913         struct bcmgenet_priv *priv = netdev_priv(dev);
2914         struct netdev_hw_addr *ha;
2915         int i, mc;
2916         u32 reg;
2917
2918         netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2919
2920         /* Promiscuous mode */
2921         reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2922         if (dev->flags & IFF_PROMISC) {
2923                 reg |= CMD_PROMISC;
2924                 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2925                 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2926                 return;
2927         } else {
2928                 reg &= ~CMD_PROMISC;
2929                 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2930         }
2931
2932         /* UniMac doesn't support ALLMULTI */
2933         if (dev->flags & IFF_ALLMULTI) {
2934                 netdev_warn(dev, "ALLMULTI is not supported\n");
2935                 return;
2936         }
2937
2938         /* update MDF filter */
2939         i = 0;
2940         mc = 0;
2941         /* Broadcast */
2942         bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2943         /* my own address.*/
2944         bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2945         /* Unicast list*/
2946         if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2947                 return;
2948
2949         if (!netdev_uc_empty(dev))
2950                 netdev_for_each_uc_addr(ha, dev)
2951                         bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2952         /* Multicast */
2953         if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2954                 return;
2955
2956         netdev_for_each_mc_addr(ha, dev)
2957                 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2958 }
2959
2960 /* Set the hardware MAC address. */
2961 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2962 {
2963         struct sockaddr *addr = p;
2964
2965         /* Setting the MAC address at the hardware level is not possible
2966          * without disabling the UniMAC RX/TX enable bits.
2967          */
2968         if (netif_running(dev))
2969                 return -EBUSY;
2970
2971         ether_addr_copy(dev->dev_addr, addr->sa_data);
2972
2973         return 0;
2974 }
2975
2976 static const struct net_device_ops bcmgenet_netdev_ops = {
2977         .ndo_open               = bcmgenet_open,
2978         .ndo_stop               = bcmgenet_close,
2979         .ndo_start_xmit         = bcmgenet_xmit,
2980         .ndo_tx_timeout         = bcmgenet_timeout,
2981         .ndo_set_rx_mode        = bcmgenet_set_rx_mode,
2982         .ndo_set_mac_address    = bcmgenet_set_mac_addr,
2983         .ndo_do_ioctl           = bcmgenet_ioctl,
2984         .ndo_set_features       = bcmgenet_set_features,
2985 #ifdef CONFIG_NET_POLL_CONTROLLER
2986         .ndo_poll_controller    = bcmgenet_poll_controller,
2987 #endif
2988 };
2989
2990 /* Array of GENET hardware parameters/characteristics */
2991 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2992         [GENET_V1] = {
2993                 .tx_queues = 0,
2994                 .tx_bds_per_q = 0,
2995                 .rx_queues = 0,
2996                 .rx_bds_per_q = 0,
2997                 .bp_in_en_shift = 16,
2998                 .bp_in_mask = 0xffff,
2999                 .hfb_filter_cnt = 16,
3000                 .qtag_mask = 0x1F,
3001                 .hfb_offset = 0x1000,
3002                 .rdma_offset = 0x2000,
3003                 .tdma_offset = 0x3000,
3004                 .words_per_bd = 2,
3005         },
3006         [GENET_V2] = {
3007                 .tx_queues = 4,
3008                 .tx_bds_per_q = 32,
3009                 .rx_queues = 0,
3010                 .rx_bds_per_q = 0,
3011                 .bp_in_en_shift = 16,
3012                 .bp_in_mask = 0xffff,
3013                 .hfb_filter_cnt = 16,
3014                 .qtag_mask = 0x1F,
3015                 .tbuf_offset = 0x0600,
3016                 .hfb_offset = 0x1000,
3017                 .hfb_reg_offset = 0x2000,
3018                 .rdma_offset = 0x3000,
3019                 .tdma_offset = 0x4000,
3020                 .words_per_bd = 2,
3021                 .flags = GENET_HAS_EXT,
3022         },
3023         [GENET_V3] = {
3024                 .tx_queues = 4,
3025                 .tx_bds_per_q = 32,
3026                 .rx_queues = 0,
3027                 .rx_bds_per_q = 0,
3028                 .bp_in_en_shift = 17,
3029                 .bp_in_mask = 0x1ffff,
3030                 .hfb_filter_cnt = 48,
3031                 .hfb_filter_size = 128,
3032                 .qtag_mask = 0x3F,
3033                 .tbuf_offset = 0x0600,
3034                 .hfb_offset = 0x8000,
3035                 .hfb_reg_offset = 0xfc00,
3036                 .rdma_offset = 0x10000,
3037                 .tdma_offset = 0x11000,
3038                 .words_per_bd = 2,
3039                 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3040                          GENET_HAS_MOCA_LINK_DET,
3041         },
3042         [GENET_V4] = {
3043                 .tx_queues = 4,
3044                 .tx_bds_per_q = 32,
3045                 .rx_queues = 0,
3046                 .rx_bds_per_q = 0,
3047                 .bp_in_en_shift = 17,
3048                 .bp_in_mask = 0x1ffff,
3049                 .hfb_filter_cnt = 48,
3050                 .hfb_filter_size = 128,
3051                 .qtag_mask = 0x3F,
3052                 .tbuf_offset = 0x0600,
3053                 .hfb_offset = 0x8000,
3054                 .hfb_reg_offset = 0xfc00,
3055                 .rdma_offset = 0x2000,
3056                 .tdma_offset = 0x4000,
3057                 .words_per_bd = 3,
3058                 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3059                          GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3060         },
3061 };
3062
3063 /* Infer hardware parameters from the detected GENET version */
3064 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3065 {
3066         struct bcmgenet_hw_params *params;
3067         u32 reg;
3068         u8 major;
3069         u16 gphy_rev;
3070
3071         if (GENET_IS_V4(priv)) {
3072                 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3073                 genet_dma_ring_regs = genet_dma_ring_regs_v4;
3074                 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3075                 priv->version = GENET_V4;
3076         } else if (GENET_IS_V3(priv)) {
3077                 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3078                 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3079                 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
3080                 priv->version = GENET_V3;
3081         } else if (GENET_IS_V2(priv)) {
3082                 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3083                 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3084                 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3085                 priv->version = GENET_V2;
3086         } else if (GENET_IS_V1(priv)) {
3087                 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3088                 genet_dma_ring_regs = genet_dma_ring_regs_v123;
3089                 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
3090                 priv->version = GENET_V1;
3091         }
3092
3093         /* enum genet_version starts at 1 */
3094         priv->hw_params = &bcmgenet_hw_params[priv->version];
3095         params = priv->hw_params;
3096
3097         /* Read GENET HW version */
3098         reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3099         major = (reg >> 24 & 0x0f);
3100         if (major == 5)
3101                 major = 4;
3102         else if (major == 0)
3103                 major = 1;
3104         if (major != priv->version) {
3105                 dev_err(&priv->pdev->dev,
3106                         "GENET version mismatch, got: %d, configured for: %d\n",
3107                         major, priv->version);
3108         }
3109
3110         /* Print the GENET core version */
3111         dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3112                  major, (reg >> 16) & 0x0f, reg & 0xffff);
3113
3114         /* Store the integrated PHY revision for the MDIO probing function
3115          * to pass this information to the PHY driver. The PHY driver expects
3116          * to find the PHY major revision in bits 15:8 while the GENET register
3117          * stores that information in bits 7:0, account for that.
3118          *
3119          * On newer chips, starting with PHY revision G0, a new scheme is
3120          * deployed similar to the Starfighter 2 switch with GPHY major
3121          * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3122          * is reserved as well as special value 0x01ff, we have a small
3123          * heuristic to check for the new GPHY revision and re-arrange things
3124          * so the GPHY driver is happy.
3125          */
3126         gphy_rev = reg & 0xffff;
3127
3128         /* This is the good old scheme, just GPHY major, no minor nor patch */
3129         if ((gphy_rev & 0xf0) != 0)
3130                 priv->gphy_rev = gphy_rev << 8;
3131
3132         /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3133         else if ((gphy_rev & 0xff00) != 0)
3134                 priv->gphy_rev = gphy_rev;
3135
3136         /* This is reserved so should require special treatment */
3137         else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3138                 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3139                 return;
3140         }
3141
3142 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3143         if (!(params->flags & GENET_HAS_40BITS))
3144                 pr_warn("GENET does not support 40-bits PA\n");
3145 #endif
3146
3147         pr_debug("Configuration for version: %d\n"
3148                 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3149                 "BP << en: %2d, BP msk: 0x%05x\n"
3150                 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3151                 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3152                 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3153                 "Words/BD: %d\n",
3154                 priv->version,
3155                 params->tx_queues, params->tx_bds_per_q,
3156                 params->rx_queues, params->rx_bds_per_q,
3157                 params->bp_in_en_shift, params->bp_in_mask,
3158                 params->hfb_filter_cnt, params->qtag_mask,
3159                 params->tbuf_offset, params->hfb_offset,
3160                 params->hfb_reg_offset,
3161                 params->rdma_offset, params->tdma_offset,
3162                 params->words_per_bd);
3163 }
3164
3165 static const struct of_device_id bcmgenet_match[] = {
3166         { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
3167         { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
3168         { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
3169         { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3170         { },
3171 };
3172 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3173
3174 static int bcmgenet_probe(struct platform_device *pdev)
3175 {
3176         struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
3177         struct device_node *dn = pdev->dev.of_node;
3178         const struct of_device_id *of_id = NULL;
3179         struct bcmgenet_priv *priv;
3180         struct net_device *dev;
3181         const void *macaddr;
3182         struct resource *r;
3183         int err = -EIO;
3184
3185         /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3186         dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3187                                  GENET_MAX_MQ_CNT + 1);
3188         if (!dev) {
3189                 dev_err(&pdev->dev, "can't allocate net device\n");
3190                 return -ENOMEM;
3191         }
3192
3193         if (dn) {
3194                 of_id = of_match_node(bcmgenet_match, dn);
3195                 if (!of_id)
3196                         return -EINVAL;
3197         }
3198
3199         priv = netdev_priv(dev);
3200         priv->irq0 = platform_get_irq(pdev, 0);
3201         priv->irq1 = platform_get_irq(pdev, 1);
3202         priv->wol_irq = platform_get_irq(pdev, 2);
3203         if (!priv->irq0 || !priv->irq1) {
3204                 dev_err(&pdev->dev, "can't find IRQs\n");
3205                 err = -EINVAL;
3206                 goto err;
3207         }
3208
3209         if (dn) {
3210                 macaddr = of_get_mac_address(dn);
3211                 if (!macaddr) {
3212                         dev_err(&pdev->dev, "can't find MAC address\n");
3213                         err = -EINVAL;
3214                         goto err;
3215                 }
3216         } else {
3217                 macaddr = pd->mac_address;
3218         }
3219
3220         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3221         priv->base = devm_ioremap_resource(&pdev->dev, r);
3222         if (IS_ERR(priv->base)) {
3223                 err = PTR_ERR(priv->base);
3224                 goto err;
3225         }
3226
3227         SET_NETDEV_DEV(dev, &pdev->dev);
3228         dev_set_drvdata(&pdev->dev, dev);
3229         ether_addr_copy(dev->dev_addr, macaddr);
3230         dev->watchdog_timeo = 2 * HZ;
3231         dev->ethtool_ops = &bcmgenet_ethtool_ops;
3232         dev->netdev_ops = &bcmgenet_netdev_ops;
3233
3234         priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3235
3236         /* Set hardware features */
3237         dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
3238                 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
3239
3240         /* Request the WOL interrupt and advertise suspend if available */
3241         priv->wol_irq_disabled = true;
3242         err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3243                                dev->name, priv);
3244         if (!err)
3245                 device_set_wakeup_capable(&pdev->dev, 1);
3246
3247         /* Set the needed headroom to account for any possible
3248          * features enabling/disabling at runtime
3249          */
3250         dev->needed_headroom += 64;
3251
3252         netdev_boot_setup_check(dev);
3253
3254         priv->dev = dev;
3255         priv->pdev = pdev;
3256         if (of_id)
3257                 priv->version = (enum bcmgenet_version)of_id->data;
3258         else
3259                 priv->version = pd->genet_version;
3260
3261         priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
3262         if (IS_ERR(priv->clk)) {
3263                 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
3264                 priv->clk = NULL;
3265         }
3266
3267         clk_prepare_enable(priv->clk);
3268
3269         bcmgenet_set_hw_params(priv);
3270
3271         /* Mii wait queue */
3272         init_waitqueue_head(&priv->wq);
3273         /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3274         priv->rx_buf_len = RX_BUF_LENGTH;
3275         INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3276
3277         priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
3278         if (IS_ERR(priv->clk_wol)) {
3279                 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
3280                 priv->clk_wol = NULL;
3281         }
3282
3283         priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3284         if (IS_ERR(priv->clk_eee)) {
3285                 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3286                 priv->clk_eee = NULL;
3287         }
3288
3289         err = reset_umac(priv);
3290         if (err)
3291                 goto err_clk_disable;
3292
3293         err = bcmgenet_mii_init(dev);
3294         if (err)
3295                 goto err_clk_disable;
3296
3297         /* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
3298          * just the ring 16 descriptor based TX
3299          */
3300         netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3301         netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3302
3303         /* libphy will determine the link state */
3304         netif_carrier_off(dev);
3305
3306         /* Turn off the main clock, WOL clock is handled separately */
3307         clk_disable_unprepare(priv->clk);
3308
3309         err = register_netdev(dev);
3310         if (err)
3311                 goto err;
3312
3313         return err;
3314
3315 err_clk_disable:
3316         clk_disable_unprepare(priv->clk);
3317 err:
3318         free_netdev(dev);
3319         return err;
3320 }
3321
3322 static int bcmgenet_remove(struct platform_device *pdev)
3323 {
3324         struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3325
3326         dev_set_drvdata(&pdev->dev, NULL);
3327         unregister_netdev(priv->dev);
3328         bcmgenet_mii_exit(priv->dev);
3329         free_netdev(priv->dev);
3330
3331         return 0;
3332 }
3333
3334 #ifdef CONFIG_PM_SLEEP
3335 static int bcmgenet_suspend(struct device *d)
3336 {
3337         struct net_device *dev = dev_get_drvdata(d);
3338         struct bcmgenet_priv *priv = netdev_priv(dev);
3339         int ret;
3340
3341         if (!netif_running(dev))
3342                 return 0;
3343
3344         bcmgenet_netif_stop(dev);
3345
3346         phy_suspend(priv->phydev);
3347
3348         netif_device_detach(dev);
3349
3350         /* Disable MAC receive */
3351         umac_enable_set(priv, CMD_RX_EN, false);
3352
3353         ret = bcmgenet_dma_teardown(priv);
3354         if (ret)
3355                 return ret;
3356
3357         /* Disable MAC transmit. TX DMA disabled have to done before this */
3358         umac_enable_set(priv, CMD_TX_EN, false);
3359
3360         /* tx reclaim */
3361         bcmgenet_tx_reclaim_all(dev);
3362         bcmgenet_fini_dma(priv);
3363
3364         /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3365         if (device_may_wakeup(d) && priv->wolopts) {
3366                 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3367                 clk_prepare_enable(priv->clk_wol);
3368         } else if (priv->internal_phy) {
3369                 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3370         }
3371
3372         /* Turn off the clocks */
3373         clk_disable_unprepare(priv->clk);
3374
3375         return ret;
3376 }
3377
3378 static int bcmgenet_resume(struct device *d)
3379 {
3380         struct net_device *dev = dev_get_drvdata(d);
3381         struct bcmgenet_priv *priv = netdev_priv(dev);
3382         unsigned long dma_ctrl;
3383         int ret;
3384         u32 reg;
3385
3386         if (!netif_running(dev))
3387                 return 0;
3388
3389         /* Turn on the clock */
3390         ret = clk_prepare_enable(priv->clk);
3391         if (ret)
3392                 return ret;
3393
3394         /* If this is an internal GPHY, power it back on now, before UniMAC is
3395          * brought out of reset as absolutely no UniMAC activity is allowed
3396          */
3397         if (priv->internal_phy)
3398                 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3399
3400         bcmgenet_umac_reset(priv);
3401
3402         ret = init_umac(priv);
3403         if (ret)
3404                 goto out_clk_disable;
3405
3406         /* From WOL-enabled suspend, switch to regular clock */
3407         if (priv->wolopts)
3408                 clk_disable_unprepare(priv->clk_wol);
3409
3410         phy_init_hw(priv->phydev);
3411         /* Speed settings must be restored */
3412         bcmgenet_mii_config(priv->dev);
3413
3414         /* disable ethernet MAC while updating its registers */
3415         umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
3416
3417         bcmgenet_set_hw_addr(priv, dev->dev_addr);
3418
3419         if (priv->internal_phy) {
3420                 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3421                 reg |= EXT_ENERGY_DET_MASK;
3422                 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3423         }
3424
3425         if (priv->wolopts)
3426                 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3427
3428         /* Disable RX/TX DMA and flush TX queues */
3429         dma_ctrl = bcmgenet_dma_disable(priv);
3430
3431         /* Reinitialize TDMA and RDMA and SW housekeeping */
3432         ret = bcmgenet_init_dma(priv);
3433         if (ret) {
3434                 netdev_err(dev, "failed to initialize DMA\n");
3435                 goto out_clk_disable;
3436         }
3437
3438         /* Always enable ring 16 - descriptor ring */
3439         bcmgenet_enable_dma(priv, dma_ctrl);
3440
3441         netif_device_attach(dev);
3442
3443         phy_resume(priv->phydev);
3444
3445         if (priv->eee.eee_enabled)
3446                 bcmgenet_eee_enable_set(dev, true);
3447
3448         bcmgenet_netif_start(dev);
3449
3450         return 0;
3451
3452 out_clk_disable:
3453         clk_disable_unprepare(priv->clk);
3454         return ret;
3455 }
3456 #endif /* CONFIG_PM_SLEEP */
3457
3458 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3459
3460 static struct platform_driver bcmgenet_driver = {
3461         .probe  = bcmgenet_probe,
3462         .remove = bcmgenet_remove,
3463         .driver = {
3464                 .name   = "bcmgenet",
3465                 .of_match_table = bcmgenet_match,
3466                 .pm     = &bcmgenet_pm_ops,
3467         },
3468 };
3469 module_platform_driver(bcmgenet_driver);
3470
3471 MODULE_AUTHOR("Broadcom Corporation");
3472 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3473 MODULE_ALIAS("platform:bcmgenet");
3474 MODULE_LICENSE("GPL");