]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc.,
18   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20   The full GNU General Public License is included in this distribution in
21   the file called "COPYING".
22
23   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25   Documentation available at:
26         http://www.stlinux.com
27   Support available at:
28         https://bugzilla.stlinux.com/
29 *******************************************************************************/
30
31 #include <linux/clk.h>
32 #include <linux/kernel.h>
33 #include <linux/interrupt.h>
34 #include <linux/ip.h>
35 #include <linux/tcp.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/crc32.h>
40 #include <linux/mii.h>
41 #include <linux/if.h>
42 #include <linux/if_vlan.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 #include <linux/prefetch.h>
46 #include <linux/pinctrl/consumer.h>
47 #ifdef CONFIG_DEBUG_FS
48 #include <linux/debugfs.h>
49 #include <linux/seq_file.h>
50 #endif /* CONFIG_DEBUG_FS */
51 #include <linux/net_tstamp.h>
52 #include "stmmac_ptp.h"
53 #include "stmmac.h"
54 #include <linux/reset.h>
55 #include <linux/of_mdio.h>
56 #include "dwmac1000.h"
57
58 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
59 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
60
61 /* Module parameters */
62 #define TX_TIMEO        5000
63 static int watchdog = TX_TIMEO;
64 module_param(watchdog, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
66
67 static int debug = -1;
68 module_param(debug, int, S_IRUGO | S_IWUSR);
69 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
70
71 static int phyaddr = -1;
72 module_param(phyaddr, int, S_IRUGO);
73 MODULE_PARM_DESC(phyaddr, "Physical device address");
74
75 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
76 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
77
78 static int flow_ctrl = FLOW_OFF;
79 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
81
82 static int pause = PAUSE_TIME;
83 module_param(pause, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
85
86 #define TC_DEFAULT 64
87 static int tc = TC_DEFAULT;
88 module_param(tc, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(tc, "DMA threshold control value");
90
91 #define DEFAULT_BUFSIZE 1536
92 static int buf_sz = DEFAULT_BUFSIZE;
93 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
95
96 #define STMMAC_RX_COPYBREAK     256
97
98 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
99                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
100                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
101
102 #define STMMAC_DEFAULT_LPI_TIMER        1000
103 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
104 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
106 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
107
108 /* By default the driver will use the ring mode to manage tx and rx descriptors
109  * but passing this value so user can force to use the chain instead of the ring
110  */
111 static unsigned int chain_mode;
112 module_param(chain_mode, int, S_IRUGO);
113 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
114
115 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
116
117 #ifdef CONFIG_DEBUG_FS
118 static int stmmac_init_fs(struct net_device *dev);
119 static void stmmac_exit_fs(struct net_device *dev);
120 #endif
121
122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123
124 /**
125  * stmmac_verify_args - verify the driver parameters.
126  * Description: it checks the driver parameters and set a default in case of
127  * errors.
128  */
129 static void stmmac_verify_args(void)
130 {
131         if (unlikely(watchdog < 0))
132                 watchdog = TX_TIMEO;
133         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
134                 buf_sz = DEFAULT_BUFSIZE;
135         if (unlikely(flow_ctrl > 1))
136                 flow_ctrl = FLOW_AUTO;
137         else if (likely(flow_ctrl < 0))
138                 flow_ctrl = FLOW_OFF;
139         if (unlikely((pause < 0) || (pause > 0xffff)))
140                 pause = PAUSE_TIME;
141         if (eee_timer < 0)
142                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
143 }
144
145 /**
146  * stmmac_clk_csr_set - dynamically set the MDC clock
147  * @priv: driver private structure
148  * Description: this is to dynamically set the MDC clock according to the csr
149  * clock input.
150  * Note:
151  *      If a specific clk_csr value is passed from the platform
152  *      this means that the CSR Clock Range selection cannot be
153  *      changed at run-time and it is fixed (as reported in the driver
154  *      documentation). Viceversa the driver will try to set the MDC
155  *      clock dynamically according to the actual clock input.
156  */
157 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
158 {
159         u32 clk_rate;
160
161         clk_rate = clk_get_rate(priv->stmmac_clk);
162
163         /* Platform provided default clk_csr would be assumed valid
164          * for all other cases except for the below mentioned ones.
165          * For values higher than the IEEE 802.3 specified frequency
166          * we can not estimate the proper divider as it is not known
167          * the frequency of clk_csr_i. So we do not change the default
168          * divider.
169          */
170         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
171                 if (clk_rate < CSR_F_35M)
172                         priv->clk_csr = STMMAC_CSR_20_35M;
173                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
174                         priv->clk_csr = STMMAC_CSR_35_60M;
175                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
176                         priv->clk_csr = STMMAC_CSR_60_100M;
177                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
178                         priv->clk_csr = STMMAC_CSR_100_150M;
179                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
180                         priv->clk_csr = STMMAC_CSR_150_250M;
181                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
182                         priv->clk_csr = STMMAC_CSR_250_300M;
183         }
184 }
185
186 static void print_pkt(unsigned char *buf, int len)
187 {
188         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
189         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
190 }
191
192 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
193 {
194         unsigned avail;
195
196         if (priv->dirty_tx > priv->cur_tx)
197                 avail = priv->dirty_tx - priv->cur_tx - 1;
198         else
199                 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
200
201         return avail;
202 }
203
204 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
205 {
206         unsigned dirty;
207
208         if (priv->dirty_rx <= priv->cur_rx)
209                 dirty = priv->cur_rx - priv->dirty_rx;
210         else
211                 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
212
213         return dirty;
214 }
215
216 /**
217  * stmmac_hw_fix_mac_speed - callback for speed selection
218  * @priv: driver private structure
219  * Description: on some platforms (e.g. ST), some HW system configuraton
220  * registers have to be set according to the link speed negotiated.
221  */
222 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
223 {
224         struct phy_device *phydev = priv->phydev;
225
226         if (likely(priv->plat->fix_mac_speed))
227                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
228 }
229
230 /**
231  * stmmac_enable_eee_mode - check and enter in LPI mode
232  * @priv: driver private structure
233  * Description: this function is to verify and enter in LPI mode in case of
234  * EEE.
235  */
236 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
237 {
238         /* Check and enter in LPI mode */
239         if ((priv->dirty_tx == priv->cur_tx) &&
240             (priv->tx_path_in_lpi_mode == false))
241                 priv->hw->mac->set_eee_mode(priv->hw);
242 }
243
244 /**
245  * stmmac_disable_eee_mode - disable and exit from LPI mode
246  * @priv: driver private structure
247  * Description: this function is to exit and disable EEE in case of
248  * LPI state is true. This is called by the xmit.
249  */
250 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
251 {
252         priv->hw->mac->reset_eee_mode(priv->hw);
253         del_timer_sync(&priv->eee_ctrl_timer);
254         priv->tx_path_in_lpi_mode = false;
255 }
256
257 /**
258  * stmmac_eee_ctrl_timer - EEE TX SW timer.
259  * @arg : data hook
260  * Description:
261  *  if there is no data transfer and if we are not in LPI state,
262  *  then MAC Transmitter can be moved to LPI state.
263  */
264 static void stmmac_eee_ctrl_timer(unsigned long arg)
265 {
266         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
267
268         stmmac_enable_eee_mode(priv);
269         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
270 }
271
272 /**
273  * stmmac_eee_init - init EEE
274  * @priv: driver private structure
275  * Description:
276  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
277  *  can also manage EEE, this function enable the LPI state and start related
278  *  timer.
279  */
280 bool stmmac_eee_init(struct stmmac_priv *priv)
281 {
282         unsigned long flags;
283         bool ret = false;
284
285         /* Using PCS we cannot dial with the phy registers at this stage
286          * so we do not support extra feature like EEE.
287          */
288         if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
289             (priv->pcs == STMMAC_PCS_RTBI))
290                 goto out;
291
292         /* MAC core supports the EEE feature. */
293         if (priv->dma_cap.eee) {
294                 int tx_lpi_timer = priv->tx_lpi_timer;
295
296                 /* Check if the PHY supports EEE */
297                 if (phy_init_eee(priv->phydev, 1)) {
298                         /* To manage at run-time if the EEE cannot be supported
299                          * anymore (for example because the lp caps have been
300                          * changed).
301                          * In that case the driver disable own timers.
302                          */
303                         spin_lock_irqsave(&priv->lock, flags);
304                         if (priv->eee_active) {
305                                 pr_debug("stmmac: disable EEE\n");
306                                 del_timer_sync(&priv->eee_ctrl_timer);
307                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
308                                                              tx_lpi_timer);
309                         }
310                         priv->eee_active = 0;
311                         spin_unlock_irqrestore(&priv->lock, flags);
312                         goto out;
313                 }
314                 /* Activate the EEE and start timers */
315                 spin_lock_irqsave(&priv->lock, flags);
316                 if (!priv->eee_active) {
317                         priv->eee_active = 1;
318                         setup_timer(&priv->eee_ctrl_timer,
319                                     stmmac_eee_ctrl_timer,
320                                     (unsigned long)priv);
321                         mod_timer(&priv->eee_ctrl_timer,
322                                   STMMAC_LPI_T(eee_timer));
323
324                         priv->hw->mac->set_eee_timer(priv->hw,
325                                                      STMMAC_DEFAULT_LIT_LS,
326                                                      tx_lpi_timer);
327                 }
328                 /* Set HW EEE according to the speed */
329                 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
330
331                 ret = true;
332                 spin_unlock_irqrestore(&priv->lock, flags);
333
334                 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
335         }
336 out:
337         return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @entry : descriptor index to be used.
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349                                    unsigned int entry, struct sk_buff *skb)
350 {
351         struct skb_shared_hwtstamps shhwtstamp;
352         u64 ns;
353         void *desc = NULL;
354
355         if (!priv->hwts_tx_en)
356                 return;
357
358         /* exit if skb doesn't support hw tstamp */
359         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
360                 return;
361
362         if (priv->adv_ts)
363                 desc = (priv->dma_etx + entry);
364         else
365                 desc = (priv->dma_tx + entry);
366
367         /* check tx tstamp status */
368         if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
369                 return;
370
371         /* get the valid tstamp */
372         ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
373
374         memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
375         shhwtstamp.hwtstamp = ns_to_ktime(ns);
376         /* pass tstamp to stack */
377         skb_tstamp_tx(skb, &shhwtstamp);
378
379         return;
380 }
381
382 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
383  * @priv: driver private structure
384  * @entry : descriptor index to be used.
385  * @skb : the socket buffer
386  * Description :
387  * This function will read received packet's timestamp from the descriptor
388  * and pass it to stack. It also perform some sanity checks.
389  */
390 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
391                                    unsigned int entry, struct sk_buff *skb)
392 {
393         struct skb_shared_hwtstamps *shhwtstamp = NULL;
394         u64 ns;
395         void *desc = NULL;
396
397         if (!priv->hwts_rx_en)
398                 return;
399
400         if (priv->adv_ts)
401                 desc = (priv->dma_erx + entry);
402         else
403                 desc = (priv->dma_rx + entry);
404
405         /* exit if rx tstamp is not valid */
406         if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
407                 return;
408
409         /* get valid tstamp */
410         ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
411         shhwtstamp = skb_hwtstamps(skb);
412         memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
413         shhwtstamp->hwtstamp = ns_to_ktime(ns);
414 }
415
416 /**
417  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
418  *  @dev: device pointer.
419  *  @ifr: An IOCTL specefic structure, that can contain a pointer to
420  *  a proprietary structure used to pass information to the driver.
421  *  Description:
422  *  This function configures the MAC to enable/disable both outgoing(TX)
423  *  and incoming(RX) packets time stamping based on user input.
424  *  Return Value:
425  *  0 on success and an appropriate -ve integer on failure.
426  */
427 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
428 {
429         struct stmmac_priv *priv = netdev_priv(dev);
430         struct hwtstamp_config config;
431         struct timespec64 now;
432         u64 temp = 0;
433         u32 ptp_v2 = 0;
434         u32 tstamp_all = 0;
435         u32 ptp_over_ipv4_udp = 0;
436         u32 ptp_over_ipv6_udp = 0;
437         u32 ptp_over_ethernet = 0;
438         u32 snap_type_sel = 0;
439         u32 ts_master_en = 0;
440         u32 ts_event_en = 0;
441         u32 value = 0;
442         u32 sec_inc;
443
444         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
445                 netdev_alert(priv->dev, "No support for HW time stamping\n");
446                 priv->hwts_tx_en = 0;
447                 priv->hwts_rx_en = 0;
448
449                 return -EOPNOTSUPP;
450         }
451
452         if (copy_from_user(&config, ifr->ifr_data,
453                            sizeof(struct hwtstamp_config)))
454                 return -EFAULT;
455
456         pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
457                  __func__, config.flags, config.tx_type, config.rx_filter);
458
459         /* reserved for future extensions */
460         if (config.flags)
461                 return -EINVAL;
462
463         if (config.tx_type != HWTSTAMP_TX_OFF &&
464             config.tx_type != HWTSTAMP_TX_ON)
465                 return -ERANGE;
466
467         if (priv->adv_ts) {
468                 switch (config.rx_filter) {
469                 case HWTSTAMP_FILTER_NONE:
470                         /* time stamp no incoming packet at all */
471                         config.rx_filter = HWTSTAMP_FILTER_NONE;
472                         break;
473
474                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
475                         /* PTP v1, UDP, any kind of event packet */
476                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
477                         /* take time stamp for all event messages */
478                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
479
480                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
481                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
482                         break;
483
484                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
485                         /* PTP v1, UDP, Sync packet */
486                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
487                         /* take time stamp for SYNC messages only */
488                         ts_event_en = PTP_TCR_TSEVNTENA;
489
490                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
491                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
492                         break;
493
494                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
495                         /* PTP v1, UDP, Delay_req packet */
496                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
497                         /* take time stamp for Delay_Req messages only */
498                         ts_master_en = PTP_TCR_TSMSTRENA;
499                         ts_event_en = PTP_TCR_TSEVNTENA;
500
501                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
502                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
503                         break;
504
505                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
506                         /* PTP v2, UDP, any kind of event packet */
507                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
508                         ptp_v2 = PTP_TCR_TSVER2ENA;
509                         /* take time stamp for all event messages */
510                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
511
512                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
513                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
514                         break;
515
516                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
517                         /* PTP v2, UDP, Sync packet */
518                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
519                         ptp_v2 = PTP_TCR_TSVER2ENA;
520                         /* take time stamp for SYNC messages only */
521                         ts_event_en = PTP_TCR_TSEVNTENA;
522
523                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
524                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
525                         break;
526
527                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
528                         /* PTP v2, UDP, Delay_req packet */
529                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
530                         ptp_v2 = PTP_TCR_TSVER2ENA;
531                         /* take time stamp for Delay_Req messages only */
532                         ts_master_en = PTP_TCR_TSMSTRENA;
533                         ts_event_en = PTP_TCR_TSEVNTENA;
534
535                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
536                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
537                         break;
538
539                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
540                         /* PTP v2/802.AS1 any layer, any kind of event packet */
541                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
542                         ptp_v2 = PTP_TCR_TSVER2ENA;
543                         /* take time stamp for all event messages */
544                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
545
546                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
547                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
548                         ptp_over_ethernet = PTP_TCR_TSIPENA;
549                         break;
550
551                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
552                         /* PTP v2/802.AS1, any layer, Sync packet */
553                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
554                         ptp_v2 = PTP_TCR_TSVER2ENA;
555                         /* take time stamp for SYNC messages only */
556                         ts_event_en = PTP_TCR_TSEVNTENA;
557
558                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
559                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
560                         ptp_over_ethernet = PTP_TCR_TSIPENA;
561                         break;
562
563                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
564                         /* PTP v2/802.AS1, any layer, Delay_req packet */
565                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
566                         ptp_v2 = PTP_TCR_TSVER2ENA;
567                         /* take time stamp for Delay_Req messages only */
568                         ts_master_en = PTP_TCR_TSMSTRENA;
569                         ts_event_en = PTP_TCR_TSEVNTENA;
570
571                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
572                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
573                         ptp_over_ethernet = PTP_TCR_TSIPENA;
574                         break;
575
576                 case HWTSTAMP_FILTER_ALL:
577                         /* time stamp any incoming packet */
578                         config.rx_filter = HWTSTAMP_FILTER_ALL;
579                         tstamp_all = PTP_TCR_TSENALL;
580                         break;
581
582                 default:
583                         return -ERANGE;
584                 }
585         } else {
586                 switch (config.rx_filter) {
587                 case HWTSTAMP_FILTER_NONE:
588                         config.rx_filter = HWTSTAMP_FILTER_NONE;
589                         break;
590                 default:
591                         /* PTP v1, UDP, any kind of event packet */
592                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
593                         break;
594                 }
595         }
596         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
597         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
598
599         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
600                 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
601         else {
602                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
603                          tstamp_all | ptp_v2 | ptp_over_ethernet |
604                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
605                          ts_master_en | snap_type_sel);
606                 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
607
608                 /* program Sub Second Increment reg */
609                 sec_inc = priv->hw->ptp->config_sub_second_increment(
610                         priv->ioaddr, priv->clk_ptp_rate);
611                 temp = div_u64(1000000000ULL, sec_inc);
612
613                 /* calculate default added value:
614                  * formula is :
615                  * addend = (2^32)/freq_div_ratio;
616                  * where, freq_div_ratio = 1e9ns/sec_inc
617                  */
618                 temp = (u64)(temp << 32);
619                 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
620                 priv->hw->ptp->config_addend(priv->ioaddr,
621                                              priv->default_addend);
622
623                 /* initialize system time */
624                 ktime_get_real_ts64(&now);
625
626                 /* lower 32 bits of tv_sec are safe until y2106 */
627                 priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
628                                             now.tv_nsec);
629         }
630
631         return copy_to_user(ifr->ifr_data, &config,
632                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
633 }
634
635 /**
636  * stmmac_init_ptp - init PTP
637  * @priv: driver private structure
638  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
639  * This is done by looking at the HW cap. register.
640  * This function also registers the ptp driver.
641  */
642 static int stmmac_init_ptp(struct stmmac_priv *priv)
643 {
644         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
645                 return -EOPNOTSUPP;
646
647         /* Fall-back to main clock in case of no PTP ref is passed */
648         priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
649         if (IS_ERR(priv->clk_ptp_ref)) {
650                 priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
651                 priv->clk_ptp_ref = NULL;
652         } else {
653                 clk_prepare_enable(priv->clk_ptp_ref);
654                 priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
655         }
656
657         priv->adv_ts = 0;
658         if (priv->dma_cap.atime_stamp && priv->extend_desc)
659                 priv->adv_ts = 1;
660
661         if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
662                 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
663
664         if (netif_msg_hw(priv) && priv->adv_ts)
665                 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
666
667         priv->hw->ptp = &stmmac_ptp;
668         priv->hwts_tx_en = 0;
669         priv->hwts_rx_en = 0;
670
671         return stmmac_ptp_register(priv);
672 }
673
674 static void stmmac_release_ptp(struct stmmac_priv *priv)
675 {
676         if (priv->clk_ptp_ref)
677                 clk_disable_unprepare(priv->clk_ptp_ref);
678         stmmac_ptp_unregister(priv);
679 }
680
681 /**
682  * stmmac_adjust_link - adjusts the link parameters
683  * @dev: net device structure
684  * Description: this is the helper called by the physical abstraction layer
685  * drivers to communicate the phy link status. According the speed and duplex
686  * this driver can invoke registered glue-logic as well.
687  * It also invoke the eee initialization because it could happen when switch
688  * on different networks (that are eee capable).
689  */
690 static void stmmac_adjust_link(struct net_device *dev)
691 {
692         struct stmmac_priv *priv = netdev_priv(dev);
693         struct phy_device *phydev = priv->phydev;
694         unsigned long flags;
695         int new_state = 0;
696         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
697
698         if (phydev == NULL)
699                 return;
700
701         spin_lock_irqsave(&priv->lock, flags);
702
703         if (phydev->link) {
704                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
705
706                 /* Now we make sure that we can be in full duplex mode.
707                  * If not, we operate in half-duplex mode. */
708                 if (phydev->duplex != priv->oldduplex) {
709                         new_state = 1;
710                         if (!(phydev->duplex))
711                                 ctrl &= ~priv->hw->link.duplex;
712                         else
713                                 ctrl |= priv->hw->link.duplex;
714                         priv->oldduplex = phydev->duplex;
715                 }
716                 /* Flow Control operation */
717                 if (phydev->pause)
718                         priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
719                                                  fc, pause_time);
720
721                 if (phydev->speed != priv->speed) {
722                         new_state = 1;
723                         switch (phydev->speed) {
724                         case 1000:
725                                 if (likely((priv->plat->has_gmac) ||
726                                            (priv->plat->has_gmac4)))
727                                         ctrl &= ~priv->hw->link.port;
728                                 stmmac_hw_fix_mac_speed(priv);
729                                 break;
730                         case 100:
731                         case 10:
732                                 if (likely((priv->plat->has_gmac) ||
733                                            (priv->plat->has_gmac4))) {
734                                         ctrl |= priv->hw->link.port;
735                                         if (phydev->speed == SPEED_100) {
736                                                 ctrl |= priv->hw->link.speed;
737                                         } else {
738                                                 ctrl &= ~(priv->hw->link.speed);
739                                         }
740                                 } else {
741                                         ctrl &= ~priv->hw->link.port;
742                                 }
743                                 stmmac_hw_fix_mac_speed(priv);
744                                 break;
745                         default:
746                                 if (netif_msg_link(priv))
747                                         pr_warn("%s: Speed (%d) not 10/100\n",
748                                                 dev->name, phydev->speed);
749                                 break;
750                         }
751
752                         priv->speed = phydev->speed;
753                 }
754
755                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
756
757                 if (!priv->oldlink) {
758                         new_state = 1;
759                         priv->oldlink = 1;
760                 }
761         } else if (priv->oldlink) {
762                 new_state = 1;
763                 priv->oldlink = 0;
764                 priv->speed = 0;
765                 priv->oldduplex = -1;
766         }
767
768         if (new_state && netif_msg_link(priv))
769                 phy_print_status(phydev);
770
771         spin_unlock_irqrestore(&priv->lock, flags);
772
773         if (phydev->is_pseudo_fixed_link)
774                 /* Stop PHY layer to call the hook to adjust the link in case
775                  * of a switch is attached to the stmmac driver.
776                  */
777                 phydev->irq = PHY_IGNORE_INTERRUPT;
778         else
779                 /* At this stage, init the EEE if supported.
780                  * Never called in case of fixed_link.
781                  */
782                 priv->eee_enabled = stmmac_eee_init(priv);
783 }
784
785 /**
786  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
787  * @priv: driver private structure
788  * Description: this is to verify if the HW supports the PCS.
789  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
790  * configured for the TBI, RTBI, or SGMII PHY interface.
791  */
792 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
793 {
794         int interface = priv->plat->interface;
795
796         if (priv->dma_cap.pcs) {
797                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
798                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
799                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
800                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
801                         pr_debug("STMMAC: PCS RGMII support enable\n");
802                         priv->pcs = STMMAC_PCS_RGMII;
803                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
804                         pr_debug("STMMAC: PCS SGMII support enable\n");
805                         priv->pcs = STMMAC_PCS_SGMII;
806                 }
807         }
808 }
809
810 /**
811  * stmmac_init_phy - PHY initialization
812  * @dev: net device structure
813  * Description: it initializes the driver's PHY state, and attaches the PHY
814  * to the mac driver.
815  *  Return value:
816  *  0 on success
817  */
818 static int stmmac_init_phy(struct net_device *dev)
819 {
820         struct stmmac_priv *priv = netdev_priv(dev);
821         struct phy_device *phydev;
822         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
823         char bus_id[MII_BUS_ID_SIZE];
824         int interface = priv->plat->interface;
825         int max_speed = priv->plat->max_speed;
826         priv->oldlink = 0;
827         priv->speed = 0;
828         priv->oldduplex = -1;
829
830         if (priv->plat->phy_node) {
831                 phydev = of_phy_connect(dev, priv->plat->phy_node,
832                                         &stmmac_adjust_link, 0, interface);
833         } else {
834                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
835                          priv->plat->bus_id);
836
837                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
838                          priv->plat->phy_addr);
839                 pr_debug("stmmac_init_phy:  trying to attach to %s\n",
840                          phy_id_fmt);
841
842                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
843                                      interface);
844         }
845
846         if (IS_ERR_OR_NULL(phydev)) {
847                 pr_err("%s: Could not attach to PHY\n", dev->name);
848                 if (!phydev)
849                         return -ENODEV;
850
851                 return PTR_ERR(phydev);
852         }
853
854         /* Stop Advertising 1000BASE Capability if interface is not GMII */
855         if ((interface == PHY_INTERFACE_MODE_MII) ||
856             (interface == PHY_INTERFACE_MODE_RMII) ||
857                 (max_speed < 1000 && max_speed > 0))
858                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
859                                          SUPPORTED_1000baseT_Full);
860
861         /*
862          * Broken HW is sometimes missing the pull-up resistor on the
863          * MDIO line, which results in reads to non-existent devices returning
864          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
865          * device as well.
866          * Note: phydev->phy_id is the result of reading the UID PHY registers.
867          */
868         if (!priv->plat->phy_node && phydev->phy_id == 0) {
869                 phy_disconnect(phydev);
870                 return -ENODEV;
871         }
872
873         pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
874                  " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
875
876         priv->phydev = phydev;
877
878         return 0;
879 }
880
881 static void stmmac_display_rings(struct stmmac_priv *priv)
882 {
883         void *head_rx, *head_tx;
884
885         if (priv->extend_desc) {
886                 head_rx = (void *)priv->dma_erx;
887                 head_tx = (void *)priv->dma_etx;
888         } else {
889                 head_rx = (void *)priv->dma_rx;
890                 head_tx = (void *)priv->dma_tx;
891         }
892
893         /* Display Rx ring */
894         priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895         /* Display Tx ring */
896         priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 }
898
899 static int stmmac_set_bfsize(int mtu, int bufsize)
900 {
901         int ret = bufsize;
902
903         if (mtu >= BUF_SIZE_4KiB)
904                 ret = BUF_SIZE_8KiB;
905         else if (mtu >= BUF_SIZE_2KiB)
906                 ret = BUF_SIZE_4KiB;
907         else if (mtu > DEFAULT_BUFSIZE)
908                 ret = BUF_SIZE_2KiB;
909         else
910                 ret = DEFAULT_BUFSIZE;
911
912         return ret;
913 }
914
915 /**
916  * stmmac_clear_descriptors - clear descriptors
917  * @priv: driver private structure
918  * Description: this function is called to clear the tx and rx descriptors
919  * in case of both basic and extended descriptors are used.
920  */
921 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922 {
923         int i;
924
925         /* Clear the Rx/Tx descriptors */
926         for (i = 0; i < DMA_RX_SIZE; i++)
927                 if (priv->extend_desc)
928                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929                                                      priv->use_riwt, priv->mode,
930                                                      (i == DMA_RX_SIZE - 1));
931                 else
932                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933                                                      priv->use_riwt, priv->mode,
934                                                      (i == DMA_RX_SIZE - 1));
935         for (i = 0; i < DMA_TX_SIZE; i++)
936                 if (priv->extend_desc)
937                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938                                                      priv->mode,
939                                                      (i == DMA_TX_SIZE - 1));
940                 else
941                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942                                                      priv->mode,
943                                                      (i == DMA_TX_SIZE - 1));
944 }
945
946 /**
947  * stmmac_init_rx_buffers - init the RX descriptor buffer.
948  * @priv: driver private structure
949  * @p: descriptor pointer
950  * @i: descriptor index
951  * @flags: gfp flag.
952  * Description: this function is called to allocate a receive buffer, perform
953  * the DMA mapping and init the descriptor.
954  */
955 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956                                   int i, gfp_t flags)
957 {
958         struct sk_buff *skb;
959
960         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961         if (!skb) {
962                 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
963                 return -ENOMEM;
964         }
965         priv->rx_skbuff[i] = skb;
966         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
967                                                 priv->dma_buf_sz,
968                                                 DMA_FROM_DEVICE);
969         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
970                 pr_err("%s: DMA mapping error\n", __func__);
971                 dev_kfree_skb_any(skb);
972                 return -EINVAL;
973         }
974
975         if (priv->synopsys_id >= DWMAC_CORE_4_00)
976                 p->des0 = priv->rx_skbuff_dma[i];
977         else
978                 p->des2 = priv->rx_skbuff_dma[i];
979
980         if ((priv->hw->mode->init_desc3) &&
981             (priv->dma_buf_sz == BUF_SIZE_16KiB))
982                 priv->hw->mode->init_desc3(p);
983
984         return 0;
985 }
986
987 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
988 {
989         if (priv->rx_skbuff[i]) {
990                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
991                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
992                 dev_kfree_skb_any(priv->rx_skbuff[i]);
993         }
994         priv->rx_skbuff[i] = NULL;
995 }
996
997 /**
998  * init_dma_desc_rings - init the RX/TX descriptor rings
999  * @dev: net device structure
1000  * @flags: gfp flag.
1001  * Description: this function initializes the DMA RX/TX descriptors
1002  * and allocates the socket buffers. It suppors the chained and ring
1003  * modes.
1004  */
1005 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1006 {
1007         int i;
1008         struct stmmac_priv *priv = netdev_priv(dev);
1009         unsigned int bfsize = 0;
1010         int ret = -ENOMEM;
1011
1012         if (priv->hw->mode->set_16kib_bfsize)
1013                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1014
1015         if (bfsize < BUF_SIZE_16KiB)
1016                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1017
1018         priv->dma_buf_sz = bfsize;
1019
1020         if (netif_msg_probe(priv)) {
1021                 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1022                          (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1023
1024                 /* RX INITIALIZATION */
1025                 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1026         }
1027         for (i = 0; i < DMA_RX_SIZE; i++) {
1028                 struct dma_desc *p;
1029                 if (priv->extend_desc)
1030                         p = &((priv->dma_erx + i)->basic);
1031                 else
1032                         p = priv->dma_rx + i;
1033
1034                 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1035                 if (ret)
1036                         goto err_init_rx_buffers;
1037
1038                 if (netif_msg_probe(priv))
1039                         pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1040                                  priv->rx_skbuff[i]->data,
1041                                  (unsigned int)priv->rx_skbuff_dma[i]);
1042         }
1043         priv->cur_rx = 0;
1044         priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1045         buf_sz = bfsize;
1046
1047         /* Setup the chained descriptor addresses */
1048         if (priv->mode == STMMAC_CHAIN_MODE) {
1049                 if (priv->extend_desc) {
1050                         priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1051                                              DMA_RX_SIZE, 1);
1052                         priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1053                                              DMA_TX_SIZE, 1);
1054                 } else {
1055                         priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1056                                              DMA_RX_SIZE, 0);
1057                         priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1058                                              DMA_TX_SIZE, 0);
1059                 }
1060         }
1061
1062         /* TX INITIALIZATION */
1063         for (i = 0; i < DMA_TX_SIZE; i++) {
1064                 struct dma_desc *p;
1065                 if (priv->extend_desc)
1066                         p = &((priv->dma_etx + i)->basic);
1067                 else
1068                         p = priv->dma_tx + i;
1069
1070                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1071                         p->des0 = 0;
1072                         p->des1 = 0;
1073                         p->des2 = 0;
1074                         p->des3 = 0;
1075                 } else {
1076                         p->des2 = 0;
1077                 }
1078
1079                 priv->tx_skbuff_dma[i].buf = 0;
1080                 priv->tx_skbuff_dma[i].map_as_page = false;
1081                 priv->tx_skbuff_dma[i].len = 0;
1082                 priv->tx_skbuff_dma[i].last_segment = false;
1083                 priv->tx_skbuff[i] = NULL;
1084         }
1085
1086         priv->dirty_tx = 0;
1087         priv->cur_tx = 0;
1088         netdev_reset_queue(priv->dev);
1089
1090         stmmac_clear_descriptors(priv);
1091
1092         if (netif_msg_hw(priv))
1093                 stmmac_display_rings(priv);
1094
1095         return 0;
1096 err_init_rx_buffers:
1097         while (--i >= 0)
1098                 stmmac_free_rx_buffers(priv, i);
1099         return ret;
1100 }
1101
1102 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1103 {
1104         int i;
1105
1106         for (i = 0; i < DMA_RX_SIZE; i++)
1107                 stmmac_free_rx_buffers(priv, i);
1108 }
1109
1110 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1111 {
1112         int i;
1113
1114         for (i = 0; i < DMA_TX_SIZE; i++) {
1115                 struct dma_desc *p;
1116
1117                 if (priv->extend_desc)
1118                         p = &((priv->dma_etx + i)->basic);
1119                 else
1120                         p = priv->dma_tx + i;
1121
1122                 if (priv->tx_skbuff_dma[i].buf) {
1123                         if (priv->tx_skbuff_dma[i].map_as_page)
1124                                 dma_unmap_page(priv->device,
1125                                                priv->tx_skbuff_dma[i].buf,
1126                                                priv->tx_skbuff_dma[i].len,
1127                                                DMA_TO_DEVICE);
1128                         else
1129                                 dma_unmap_single(priv->device,
1130                                                  priv->tx_skbuff_dma[i].buf,
1131                                                  priv->tx_skbuff_dma[i].len,
1132                                                  DMA_TO_DEVICE);
1133                 }
1134
1135                 if (priv->tx_skbuff[i] != NULL) {
1136                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1137                         priv->tx_skbuff[i] = NULL;
1138                         priv->tx_skbuff_dma[i].buf = 0;
1139                         priv->tx_skbuff_dma[i].map_as_page = false;
1140                 }
1141         }
1142 }
1143
1144 /**
1145  * alloc_dma_desc_resources - alloc TX/RX resources.
1146  * @priv: private structure
1147  * Description: according to which descriptor can be used (extend or basic)
1148  * this function allocates the resources for TX and RX paths. In case of
1149  * reception, for example, it pre-allocated the RX socket buffer in order to
1150  * allow zero-copy mechanism.
1151  */
1152 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1153 {
1154         int ret = -ENOMEM;
1155
1156         priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1157                                             GFP_KERNEL);
1158         if (!priv->rx_skbuff_dma)
1159                 return -ENOMEM;
1160
1161         priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1162                                         GFP_KERNEL);
1163         if (!priv->rx_skbuff)
1164                 goto err_rx_skbuff;
1165
1166         priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1167                                             sizeof(*priv->tx_skbuff_dma),
1168                                             GFP_KERNEL);
1169         if (!priv->tx_skbuff_dma)
1170                 goto err_tx_skbuff_dma;
1171
1172         priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1173                                         GFP_KERNEL);
1174         if (!priv->tx_skbuff)
1175                 goto err_tx_skbuff;
1176
1177         if (priv->extend_desc) {
1178                 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1179                                                     sizeof(struct
1180                                                            dma_extended_desc),
1181                                                     &priv->dma_rx_phy,
1182                                                     GFP_KERNEL);
1183                 if (!priv->dma_erx)
1184                         goto err_dma;
1185
1186                 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1187                                                     sizeof(struct
1188                                                            dma_extended_desc),
1189                                                     &priv->dma_tx_phy,
1190                                                     GFP_KERNEL);
1191                 if (!priv->dma_etx) {
1192                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1193                                           sizeof(struct dma_extended_desc),
1194                                           priv->dma_erx, priv->dma_rx_phy);
1195                         goto err_dma;
1196                 }
1197         } else {
1198                 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1199                                                    sizeof(struct dma_desc),
1200                                                    &priv->dma_rx_phy,
1201                                                    GFP_KERNEL);
1202                 if (!priv->dma_rx)
1203                         goto err_dma;
1204
1205                 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1206                                                    sizeof(struct dma_desc),
1207                                                    &priv->dma_tx_phy,
1208                                                    GFP_KERNEL);
1209                 if (!priv->dma_tx) {
1210                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1211                                           sizeof(struct dma_desc),
1212                                           priv->dma_rx, priv->dma_rx_phy);
1213                         goto err_dma;
1214                 }
1215         }
1216
1217         return 0;
1218
1219 err_dma:
1220         kfree(priv->tx_skbuff);
1221 err_tx_skbuff:
1222         kfree(priv->tx_skbuff_dma);
1223 err_tx_skbuff_dma:
1224         kfree(priv->rx_skbuff);
1225 err_rx_skbuff:
1226         kfree(priv->rx_skbuff_dma);
1227         return ret;
1228 }
1229
1230 static void free_dma_desc_resources(struct stmmac_priv *priv)
1231 {
1232         /* Release the DMA TX/RX socket buffers */
1233         dma_free_rx_skbufs(priv);
1234         dma_free_tx_skbufs(priv);
1235
1236         /* Free DMA regions of consistent memory previously allocated */
1237         if (!priv->extend_desc) {
1238                 dma_free_coherent(priv->device,
1239                                   DMA_TX_SIZE * sizeof(struct dma_desc),
1240                                   priv->dma_tx, priv->dma_tx_phy);
1241                 dma_free_coherent(priv->device,
1242                                   DMA_RX_SIZE * sizeof(struct dma_desc),
1243                                   priv->dma_rx, priv->dma_rx_phy);
1244         } else {
1245                 dma_free_coherent(priv->device, DMA_TX_SIZE *
1246                                   sizeof(struct dma_extended_desc),
1247                                   priv->dma_etx, priv->dma_tx_phy);
1248                 dma_free_coherent(priv->device, DMA_RX_SIZE *
1249                                   sizeof(struct dma_extended_desc),
1250                                   priv->dma_erx, priv->dma_rx_phy);
1251         }
1252         kfree(priv->rx_skbuff_dma);
1253         kfree(priv->rx_skbuff);
1254         kfree(priv->tx_skbuff_dma);
1255         kfree(priv->tx_skbuff);
1256 }
1257
1258 /**
1259  *  stmmac_dma_operation_mode - HW DMA operation mode
1260  *  @priv: driver private structure
1261  *  Description: it is used for configuring the DMA operation mode register in
1262  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1263  */
1264 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1265 {
1266         int rxfifosz = priv->plat->rx_fifo_size;
1267
1268         if (priv->plat->force_thresh_dma_mode)
1269                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1270         else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1271                 /*
1272                  * In case of GMAC, SF mode can be enabled
1273                  * to perform the TX COE in HW. This depends on:
1274                  * 1) TX COE if actually supported
1275                  * 2) There is no bugged Jumbo frame support
1276                  *    that needs to not insert csum in the TDES.
1277                  */
1278                 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1279                                         rxfifosz);
1280                 priv->xstats.threshold = SF_DMA_MODE;
1281         } else
1282                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1283                                         rxfifosz);
1284 }
1285
1286 /**
1287  * stmmac_tx_clean - to manage the transmission completion
1288  * @priv: driver private structure
1289  * Description: it reclaims the transmit resources after transmission completes.
1290  */
1291 static void stmmac_tx_clean(struct stmmac_priv *priv)
1292 {
1293         unsigned int bytes_compl = 0, pkts_compl = 0;
1294         unsigned int entry = priv->dirty_tx;
1295
1296         spin_lock(&priv->tx_lock);
1297
1298         priv->xstats.tx_clean++;
1299
1300         while (entry != priv->cur_tx) {
1301                 struct sk_buff *skb = priv->tx_skbuff[entry];
1302                 struct dma_desc *p;
1303                 int status;
1304
1305                 if (priv->extend_desc)
1306                         p = (struct dma_desc *)(priv->dma_etx + entry);
1307                 else
1308                         p = priv->dma_tx + entry;
1309
1310                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1311                                                       &priv->xstats, p,
1312                                                       priv->ioaddr);
1313                 /* Check if the descriptor is owned by the DMA */
1314                 if (unlikely(status & tx_dma_own))
1315                         break;
1316
1317                 /* Just consider the last segment and ...*/
1318                 if (likely(!(status & tx_not_ls))) {
1319                         /* ... verify the status error condition */
1320                         if (unlikely(status & tx_err)) {
1321                                 priv->dev->stats.tx_errors++;
1322                         } else {
1323                                 priv->dev->stats.tx_packets++;
1324                                 priv->xstats.tx_pkt_n++;
1325                         }
1326                         stmmac_get_tx_hwtstamp(priv, entry, skb);
1327                 }
1328
1329                 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1330                         if (priv->tx_skbuff_dma[entry].map_as_page)
1331                                 dma_unmap_page(priv->device,
1332                                                priv->tx_skbuff_dma[entry].buf,
1333                                                priv->tx_skbuff_dma[entry].len,
1334                                                DMA_TO_DEVICE);
1335                         else
1336                                 dma_unmap_single(priv->device,
1337                                                  priv->tx_skbuff_dma[entry].buf,
1338                                                  priv->tx_skbuff_dma[entry].len,
1339                                                  DMA_TO_DEVICE);
1340                         priv->tx_skbuff_dma[entry].buf = 0;
1341                         priv->tx_skbuff_dma[entry].len = 0;
1342                         priv->tx_skbuff_dma[entry].map_as_page = false;
1343                 }
1344
1345                 if (priv->hw->mode->clean_desc3)
1346                         priv->hw->mode->clean_desc3(priv, p);
1347
1348                 priv->tx_skbuff_dma[entry].last_segment = false;
1349                 priv->tx_skbuff_dma[entry].is_jumbo = false;
1350
1351                 if (likely(skb != NULL)) {
1352                         pkts_compl++;
1353                         bytes_compl += skb->len;
1354                         dev_consume_skb_any(skb);
1355                         priv->tx_skbuff[entry] = NULL;
1356                 }
1357
1358                 priv->hw->desc->release_tx_desc(p, priv->mode);
1359
1360                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1361         }
1362         priv->dirty_tx = entry;
1363
1364         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1365
1366         if (unlikely(netif_queue_stopped(priv->dev) &&
1367                      stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1368                 netif_tx_lock(priv->dev);
1369                 if (netif_queue_stopped(priv->dev) &&
1370                     stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
1371                         if (netif_msg_tx_done(priv))
1372                                 pr_debug("%s: restart transmit\n", __func__);
1373                         netif_wake_queue(priv->dev);
1374                 }
1375                 netif_tx_unlock(priv->dev);
1376         }
1377
1378         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1379                 stmmac_enable_eee_mode(priv);
1380                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1381         }
1382         spin_unlock(&priv->tx_lock);
1383 }
1384
1385 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1386 {
1387         priv->hw->dma->enable_dma_irq(priv->ioaddr);
1388 }
1389
1390 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1391 {
1392         priv->hw->dma->disable_dma_irq(priv->ioaddr);
1393 }
1394
1395 /**
1396  * stmmac_tx_err - to manage the tx error
1397  * @priv: driver private structure
1398  * Description: it cleans the descriptors and restarts the transmission
1399  * in case of transmission errors.
1400  */
1401 static void stmmac_tx_err(struct stmmac_priv *priv)
1402 {
1403         int i;
1404         netif_stop_queue(priv->dev);
1405
1406         priv->hw->dma->stop_tx(priv->ioaddr);
1407         dma_free_tx_skbufs(priv);
1408         for (i = 0; i < DMA_TX_SIZE; i++)
1409                 if (priv->extend_desc)
1410                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1411                                                      priv->mode,
1412                                                      (i == DMA_TX_SIZE - 1));
1413                 else
1414                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1415                                                      priv->mode,
1416                                                      (i == DMA_TX_SIZE - 1));
1417         priv->dirty_tx = 0;
1418         priv->cur_tx = 0;
1419         netdev_reset_queue(priv->dev);
1420         priv->hw->dma->start_tx(priv->ioaddr);
1421
1422         priv->dev->stats.tx_errors++;
1423         netif_wake_queue(priv->dev);
1424 }
1425
1426 /**
1427  * stmmac_dma_interrupt - DMA ISR
1428  * @priv: driver private structure
1429  * Description: this is the DMA ISR. It is called by the main ISR.
1430  * It calls the dwmac dma routine and schedule poll method in case of some
1431  * work can be done.
1432  */
1433 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1434 {
1435         int status;
1436         int rxfifosz = priv->plat->rx_fifo_size;
1437
1438         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1439         if (likely((status & handle_rx)) || (status & handle_tx)) {
1440                 if (likely(napi_schedule_prep(&priv->napi))) {
1441                         stmmac_disable_dma_irq(priv);
1442                         __napi_schedule(&priv->napi);
1443                 }
1444         }
1445         if (unlikely(status & tx_hard_error_bump_tc)) {
1446                 /* Try to bump up the dma threshold on this failure */
1447                 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1448                     (tc <= 256)) {
1449                         tc += 64;
1450                         if (priv->plat->force_thresh_dma_mode)
1451                                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1452                                                         rxfifosz);
1453                         else
1454                                 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1455                                                         SF_DMA_MODE, rxfifosz);
1456                         priv->xstats.threshold = tc;
1457                 }
1458         } else if (unlikely(status == tx_hard_error))
1459                 stmmac_tx_err(priv);
1460 }
1461
1462 /**
1463  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1464  * @priv: driver private structure
1465  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1466  */
1467 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1468 {
1469         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1470                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1471
1472         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1473                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1474         else
1475                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1476
1477         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1478
1479         if (priv->dma_cap.rmon) {
1480                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1481                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1482         } else
1483                 pr_info(" No MAC Management Counters available\n");
1484 }
1485
1486 /**
1487  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1488  * @priv: driver private structure
1489  * Description: select the Enhanced/Alternate or Normal descriptors.
1490  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1491  * supported by the HW capability register.
1492  */
1493 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1494 {
1495         if (priv->plat->enh_desc) {
1496                 pr_info(" Enhanced/Alternate descriptors\n");
1497
1498                 /* GMAC older than 3.50 has no extended descriptors */
1499                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1500                         pr_info("\tEnabled extended descriptors\n");
1501                         priv->extend_desc = 1;
1502                 } else
1503                         pr_warn("Extended descriptors not supported\n");
1504
1505                 priv->hw->desc = &enh_desc_ops;
1506         } else {
1507                 pr_info(" Normal descriptors\n");
1508                 priv->hw->desc = &ndesc_ops;
1509         }
1510 }
1511
1512 /**
1513  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1514  * @priv: driver private structure
1515  * Description:
1516  *  new GMAC chip generations have a new register to indicate the
1517  *  presence of the optional feature/functions.
1518  *  This can be also used to override the value passed through the
1519  *  platform and necessary for old MAC10/100 and GMAC chips.
1520  */
1521 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1522 {
1523         u32 ret = 0;
1524
1525         if (priv->hw->dma->get_hw_feature) {
1526                 priv->hw->dma->get_hw_feature(priv->ioaddr,
1527                                               &priv->dma_cap);
1528                 ret = 1;
1529         }
1530
1531         return ret;
1532 }
1533
1534 /**
1535  * stmmac_check_ether_addr - check if the MAC addr is valid
1536  * @priv: driver private structure
1537  * Description:
1538  * it is to verify if the MAC address is valid, in case of failures it
1539  * generates a random MAC address
1540  */
1541 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1542 {
1543         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1544                 priv->hw->mac->get_umac_addr(priv->hw,
1545                                              priv->dev->dev_addr, 0);
1546                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1547                         eth_hw_addr_random(priv->dev);
1548                 pr_info("%s: device MAC address %pM\n", priv->dev->name,
1549                         priv->dev->dev_addr);
1550         }
1551 }
1552
1553 /**
1554  * stmmac_init_dma_engine - DMA init.
1555  * @priv: driver private structure
1556  * Description:
1557  * It inits the DMA invoking the specific MAC/GMAC callback.
1558  * Some DMA parameters can be passed from the platform;
1559  * in case of these are not passed a default is kept for the MAC or GMAC.
1560  */
1561 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1562 {
1563         int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
1564         int mixed_burst = 0;
1565         int atds = 0;
1566         int ret = 0;
1567
1568         if (priv->plat->dma_cfg) {
1569                 pbl = priv->plat->dma_cfg->pbl;
1570                 fixed_burst = priv->plat->dma_cfg->fixed_burst;
1571                 mixed_burst = priv->plat->dma_cfg->mixed_burst;
1572                 aal = priv->plat->dma_cfg->aal;
1573         }
1574
1575         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1576                 atds = 1;
1577
1578         ret = priv->hw->dma->reset(priv->ioaddr);
1579         if (ret) {
1580                 dev_err(priv->device, "Failed to reset the dma\n");
1581                 return ret;
1582         }
1583
1584         priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1585                             aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
1586
1587         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1588                 priv->rx_tail_addr = priv->dma_rx_phy +
1589                             (DMA_RX_SIZE * sizeof(struct dma_desc));
1590                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1591                                                STMMAC_CHAN0);
1592
1593                 priv->tx_tail_addr = priv->dma_tx_phy +
1594                             (DMA_TX_SIZE * sizeof(struct dma_desc));
1595                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1596                                                STMMAC_CHAN0);
1597         }
1598
1599         if (priv->plat->axi && priv->hw->dma->axi)
1600                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1601
1602         return ret;
1603 }
1604
1605 /**
1606  * stmmac_tx_timer - mitigation sw timer for tx.
1607  * @data: data pointer
1608  * Description:
1609  * This is the timer handler to directly invoke the stmmac_tx_clean.
1610  */
1611 static void stmmac_tx_timer(unsigned long data)
1612 {
1613         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1614
1615         stmmac_tx_clean(priv);
1616 }
1617
1618 /**
1619  * stmmac_init_tx_coalesce - init tx mitigation options.
1620  * @priv: driver private structure
1621  * Description:
1622  * This inits the transmit coalesce parameters: i.e. timer rate,
1623  * timer handler and default threshold used for enabling the
1624  * interrupt on completion bit.
1625  */
1626 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1627 {
1628         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1629         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1630         init_timer(&priv->txtimer);
1631         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1632         priv->txtimer.data = (unsigned long)priv;
1633         priv->txtimer.function = stmmac_tx_timer;
1634         add_timer(&priv->txtimer);
1635 }
1636
1637 /**
1638  * stmmac_hw_setup - setup mac in a usable state.
1639  *  @dev : pointer to the device structure.
1640  *  Description:
1641  *  this is the main function to setup the HW in a usable state because the
1642  *  dma engine is reset, the core registers are configured (e.g. AXI,
1643  *  Checksum features, timers). The DMA is ready to start receiving and
1644  *  transmitting.
1645  *  Return value:
1646  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1647  *  file on failure.
1648  */
1649 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1650 {
1651         struct stmmac_priv *priv = netdev_priv(dev);
1652         int ret;
1653
1654         /* DMA initialization and SW reset */
1655         ret = stmmac_init_dma_engine(priv);
1656         if (ret < 0) {
1657                 pr_err("%s: DMA engine initialization failed\n", __func__);
1658                 return ret;
1659         }
1660
1661         /* Copy the MAC addr into the HW  */
1662         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1663
1664         /* If required, perform hw setup of the bus. */
1665         if (priv->plat->bus_setup)
1666                 priv->plat->bus_setup(priv->ioaddr);
1667
1668         /* Initialize the MAC Core */
1669         priv->hw->mac->core_init(priv->hw, dev->mtu);
1670
1671         ret = priv->hw->mac->rx_ipc(priv->hw);
1672         if (!ret) {
1673                 pr_warn(" RX IPC Checksum Offload disabled\n");
1674                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1675                 priv->hw->rx_csum = 0;
1676         }
1677
1678         /* Enable the MAC Rx/Tx */
1679         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1680                 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1681         else
1682                 stmmac_set_mac(priv->ioaddr, true);
1683
1684         /* Set the HW DMA mode and the COE */
1685         stmmac_dma_operation_mode(priv);
1686
1687         stmmac_mmc_setup(priv);
1688
1689         if (init_ptp) {
1690                 ret = stmmac_init_ptp(priv);
1691                 if (ret && ret != -EOPNOTSUPP)
1692                         pr_warn("%s: failed PTP initialisation\n", __func__);
1693         }
1694
1695 #ifdef CONFIG_DEBUG_FS
1696         ret = stmmac_init_fs(dev);
1697         if (ret < 0)
1698                 pr_warn("%s: failed debugFS registration\n", __func__);
1699 #endif
1700         /* Start the ball rolling... */
1701         pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1702         priv->hw->dma->start_tx(priv->ioaddr);
1703         priv->hw->dma->start_rx(priv->ioaddr);
1704
1705         /* Dump DMA/MAC registers */
1706         if (netif_msg_hw(priv)) {
1707                 priv->hw->mac->dump_regs(priv->hw);
1708                 priv->hw->dma->dump_regs(priv->ioaddr);
1709         }
1710         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1711
1712         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1713                 priv->rx_riwt = MAX_DMA_RIWT;
1714                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1715         }
1716
1717         if (priv->pcs && priv->hw->mac->ctrl_ane)
1718                 priv->hw->mac->ctrl_ane(priv->hw, 0);
1719
1720         /*  set TX ring length */
1721         if (priv->hw->dma->set_tx_ring_len)
1722                 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1723                                                (DMA_TX_SIZE - 1));
1724         /*  set RX ring length */
1725         if (priv->hw->dma->set_rx_ring_len)
1726                 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1727                                                (DMA_RX_SIZE - 1));
1728         /* Enable TSO */
1729         if (priv->tso)
1730                 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1731
1732         return 0;
1733 }
1734
1735 /**
1736  *  stmmac_open - open entry point of the driver
1737  *  @dev : pointer to the device structure.
1738  *  Description:
1739  *  This function is the open entry point of the driver.
1740  *  Return value:
1741  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1742  *  file on failure.
1743  */
1744 static int stmmac_open(struct net_device *dev)
1745 {
1746         struct stmmac_priv *priv = netdev_priv(dev);
1747         int ret;
1748
1749         stmmac_check_ether_addr(priv);
1750
1751         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1752             priv->pcs != STMMAC_PCS_RTBI) {
1753                 ret = stmmac_init_phy(dev);
1754                 if (ret) {
1755                         pr_err("%s: Cannot attach to PHY (error: %d)\n",
1756                                __func__, ret);
1757                         return ret;
1758                 }
1759         }
1760
1761         /* Extra statistics */
1762         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1763         priv->xstats.threshold = tc;
1764
1765         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1766         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1767
1768         ret = alloc_dma_desc_resources(priv);
1769         if (ret < 0) {
1770                 pr_err("%s: DMA descriptors allocation failed\n", __func__);
1771                 goto dma_desc_error;
1772         }
1773
1774         ret = init_dma_desc_rings(dev, GFP_KERNEL);
1775         if (ret < 0) {
1776                 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1777                 goto init_error;
1778         }
1779
1780         ret = stmmac_hw_setup(dev, true);
1781         if (ret < 0) {
1782                 pr_err("%s: Hw setup failed\n", __func__);
1783                 goto init_error;
1784         }
1785
1786         stmmac_init_tx_coalesce(priv);
1787
1788         if (priv->phydev)
1789                 phy_start(priv->phydev);
1790
1791         /* Request the IRQ lines */
1792         ret = request_irq(dev->irq, stmmac_interrupt,
1793                           IRQF_SHARED, dev->name, dev);
1794         if (unlikely(ret < 0)) {
1795                 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1796                        __func__, dev->irq, ret);
1797                 goto init_error;
1798         }
1799
1800         /* Request the Wake IRQ in case of another line is used for WoL */
1801         if (priv->wol_irq != dev->irq) {
1802                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1803                                   IRQF_SHARED, dev->name, dev);
1804                 if (unlikely(ret < 0)) {
1805                         pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1806                                __func__, priv->wol_irq, ret);
1807                         goto wolirq_error;
1808                 }
1809         }
1810
1811         /* Request the IRQ lines */
1812         if (priv->lpi_irq > 0) {
1813                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1814                                   dev->name, dev);
1815                 if (unlikely(ret < 0)) {
1816                         pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1817                                __func__, priv->lpi_irq, ret);
1818                         goto lpiirq_error;
1819                 }
1820         }
1821
1822         napi_enable(&priv->napi);
1823         netif_start_queue(dev);
1824
1825         return 0;
1826
1827 lpiirq_error:
1828         if (priv->wol_irq != dev->irq)
1829                 free_irq(priv->wol_irq, dev);
1830 wolirq_error:
1831         free_irq(dev->irq, dev);
1832
1833 init_error:
1834         free_dma_desc_resources(priv);
1835 dma_desc_error:
1836         if (priv->phydev)
1837                 phy_disconnect(priv->phydev);
1838
1839         return ret;
1840 }
1841
1842 /**
1843  *  stmmac_release - close entry point of the driver
1844  *  @dev : device pointer.
1845  *  Description:
1846  *  This is the stop entry point of the driver.
1847  */
1848 static int stmmac_release(struct net_device *dev)
1849 {
1850         struct stmmac_priv *priv = netdev_priv(dev);
1851
1852         if (priv->eee_enabled)
1853                 del_timer_sync(&priv->eee_ctrl_timer);
1854
1855         /* Stop and disconnect the PHY */
1856         if (priv->phydev) {
1857                 phy_stop(priv->phydev);
1858                 phy_disconnect(priv->phydev);
1859                 priv->phydev = NULL;
1860         }
1861
1862         netif_stop_queue(dev);
1863
1864         napi_disable(&priv->napi);
1865
1866         del_timer_sync(&priv->txtimer);
1867
1868         /* Free the IRQ lines */
1869         free_irq(dev->irq, dev);
1870         if (priv->wol_irq != dev->irq)
1871                 free_irq(priv->wol_irq, dev);
1872         if (priv->lpi_irq > 0)
1873                 free_irq(priv->lpi_irq, dev);
1874
1875         /* Stop TX/RX DMA and clear the descriptors */
1876         priv->hw->dma->stop_tx(priv->ioaddr);
1877         priv->hw->dma->stop_rx(priv->ioaddr);
1878
1879         /* Release and free the Rx/Tx resources */
1880         free_dma_desc_resources(priv);
1881
1882         /* Disable the MAC Rx/Tx */
1883         stmmac_set_mac(priv->ioaddr, false);
1884
1885         netif_carrier_off(dev);
1886
1887 #ifdef CONFIG_DEBUG_FS
1888         stmmac_exit_fs(dev);
1889 #endif
1890
1891         stmmac_release_ptp(priv);
1892
1893         return 0;
1894 }
1895
1896 /**
1897  *  stmmac_tso_allocator - close entry point of the driver
1898  *  @priv: driver private structure
1899  *  @des: buffer start address
1900  *  @total_len: total length to fill in descriptors
1901  *  @last_segmant: condition for the last descriptor
1902  *  Description:
1903  *  This function fills descriptor and request new descriptors according to
1904  *  buffer length to fill
1905  */
1906 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1907                                  int total_len, bool last_segment)
1908 {
1909         struct dma_desc *desc;
1910         int tmp_len;
1911         u32 buff_size;
1912
1913         tmp_len = total_len;
1914
1915         while (tmp_len > 0) {
1916                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1917                 desc = priv->dma_tx + priv->cur_tx;
1918
1919                 desc->des0 = des + (total_len - tmp_len);
1920                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1921                             TSO_MAX_BUFF_SIZE : tmp_len;
1922
1923                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1924                         0, 1,
1925                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1926                         0, 0);
1927
1928                 tmp_len -= TSO_MAX_BUFF_SIZE;
1929         }
1930 }
1931
1932 /**
1933  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
1934  *  @skb : the socket buffer
1935  *  @dev : device pointer
1936  *  Description: this is the transmit function that is called on TSO frames
1937  *  (support available on GMAC4 and newer chips).
1938  *  Diagram below show the ring programming in case of TSO frames:
1939  *
1940  *  First Descriptor
1941  *   --------
1942  *   | DES0 |---> buffer1 = L2/L3/L4 header
1943  *   | DES1 |---> TCP Payload (can continue on next descr...)
1944  *   | DES2 |---> buffer 1 and 2 len
1945  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
1946  *   --------
1947  *      |
1948  *     ...
1949  *      |
1950  *   --------
1951  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
1952  *   | DES1 | --|
1953  *   | DES2 | --> buffer 1 and 2 len
1954  *   | DES3 |
1955  *   --------
1956  *
1957  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
1958  */
1959 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1960 {
1961         u32 pay_len, mss;
1962         int tmp_pay_len = 0;
1963         struct stmmac_priv *priv = netdev_priv(dev);
1964         int nfrags = skb_shinfo(skb)->nr_frags;
1965         unsigned int first_entry, des;
1966         struct dma_desc *desc, *first, *mss_desc = NULL;
1967         u8 proto_hdr_len;
1968         int i;
1969
1970         spin_lock(&priv->tx_lock);
1971
1972         /* Compute header lengths */
1973         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1974
1975         /* Desc availability based on threshold should be enough safe */
1976         if (unlikely(stmmac_tx_avail(priv) <
1977                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
1978                 if (!netif_queue_stopped(dev)) {
1979                         netif_stop_queue(dev);
1980                         /* This is a hard error, log it. */
1981                         pr_err("%s: Tx Ring full when queue awake\n", __func__);
1982                 }
1983                 spin_unlock(&priv->tx_lock);
1984                 return NETDEV_TX_BUSY;
1985         }
1986
1987         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
1988
1989         mss = skb_shinfo(skb)->gso_size;
1990
1991         /* set new MSS value if needed */
1992         if (mss != priv->mss) {
1993                 mss_desc = priv->dma_tx + priv->cur_tx;
1994                 priv->hw->desc->set_mss(mss_desc, mss);
1995                 priv->mss = mss;
1996                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1997         }
1998
1999         if (netif_msg_tx_queued(priv)) {
2000                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2001                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2002                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2003                         skb->data_len);
2004         }
2005
2006         first_entry = priv->cur_tx;
2007
2008         desc = priv->dma_tx + first_entry;
2009         first = desc;
2010
2011         /* first descriptor: fill Headers on Buf1 */
2012         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2013                              DMA_TO_DEVICE);
2014         if (dma_mapping_error(priv->device, des))
2015                 goto dma_map_err;
2016
2017         priv->tx_skbuff_dma[first_entry].buf = des;
2018         priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2019         priv->tx_skbuff[first_entry] = skb;
2020
2021         first->des0 = des;
2022
2023         /* Fill start of payload in buff2 of first descriptor */
2024         if (pay_len)
2025                 first->des1 =  des + proto_hdr_len;
2026
2027         /* If needed take extra descriptors to fill the remaining payload */
2028         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2029
2030         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2031
2032         /* Prepare fragments */
2033         for (i = 0; i < nfrags; i++) {
2034                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2035
2036                 des = skb_frag_dma_map(priv->device, frag, 0,
2037                                        skb_frag_size(frag),
2038                                        DMA_TO_DEVICE);
2039
2040                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2041                                      (i == nfrags - 1));
2042
2043                 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2044                 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2045                 priv->tx_skbuff[priv->cur_tx] = NULL;
2046                 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2047         }
2048
2049         priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2050
2051         priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2052
2053         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2054                 if (netif_msg_hw(priv))
2055                         pr_debug("%s: stop transmitted packets\n", __func__);
2056                 netif_stop_queue(dev);
2057         }
2058
2059         dev->stats.tx_bytes += skb->len;
2060         priv->xstats.tx_tso_frames++;
2061         priv->xstats.tx_tso_nfrags += nfrags;
2062
2063         /* Manage tx mitigation */
2064         priv->tx_count_frames += nfrags + 1;
2065         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2066                 mod_timer(&priv->txtimer,
2067                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2068         } else {
2069                 priv->tx_count_frames = 0;
2070                 priv->hw->desc->set_tx_ic(desc);
2071                 priv->xstats.tx_set_ic_bit++;
2072         }
2073
2074         if (!priv->hwts_tx_en)
2075                 skb_tx_timestamp(skb);
2076
2077         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2078                      priv->hwts_tx_en)) {
2079                 /* declare that device is doing timestamping */
2080                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2081                 priv->hw->desc->enable_tx_timestamp(first);
2082         }
2083
2084         /* Complete the first descriptor before granting the DMA */
2085         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2086                         proto_hdr_len,
2087                         pay_len,
2088                         1, priv->tx_skbuff_dma[first_entry].last_segment,
2089                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2090
2091         /* If context desc is used to change MSS */
2092         if (mss_desc)
2093                 priv->hw->desc->set_tx_owner(mss_desc);
2094
2095         /* The own bit must be the latest setting done when prepare the
2096          * descriptor and then barrier is needed to make sure that
2097          * all is coherent before granting the DMA engine.
2098          */
2099         smp_wmb();
2100
2101         if (netif_msg_pktdata(priv)) {
2102                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2103                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2104                         priv->cur_tx, first, nfrags);
2105
2106                 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2107                                              0);
2108
2109                 pr_info(">>> frame to be transmitted: ");
2110                 print_pkt(skb->data, skb_headlen(skb));
2111         }
2112
2113         netdev_sent_queue(dev, skb->len);
2114
2115         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2116                                        STMMAC_CHAN0);
2117
2118         spin_unlock(&priv->tx_lock);
2119         return NETDEV_TX_OK;
2120
2121 dma_map_err:
2122         spin_unlock(&priv->tx_lock);
2123         dev_err(priv->device, "Tx dma map failed\n");
2124         dev_kfree_skb(skb);
2125         priv->dev->stats.tx_dropped++;
2126         return NETDEV_TX_OK;
2127 }
2128
2129 /**
2130  *  stmmac_xmit - Tx entry point of the driver
2131  *  @skb : the socket buffer
2132  *  @dev : device pointer
2133  *  Description : this is the tx entry point of the driver.
2134  *  It programs the chain or the ring and supports oversized frames
2135  *  and SG feature.
2136  */
2137 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2138 {
2139         struct stmmac_priv *priv = netdev_priv(dev);
2140         unsigned int nopaged_len = skb_headlen(skb);
2141         int i, csum_insertion = 0, is_jumbo = 0;
2142         int nfrags = skb_shinfo(skb)->nr_frags;
2143         unsigned int entry, first_entry;
2144         struct dma_desc *desc, *first;
2145         unsigned int enh_desc;
2146         unsigned int des;
2147
2148         /* Manage oversized TCP frames for GMAC4 device */
2149         if (skb_is_gso(skb) && priv->tso) {
2150                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2151                         return stmmac_tso_xmit(skb, dev);
2152         }
2153
2154         spin_lock(&priv->tx_lock);
2155
2156         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2157                 spin_unlock(&priv->tx_lock);
2158                 if (!netif_queue_stopped(dev)) {
2159                         netif_stop_queue(dev);
2160                         /* This is a hard error, log it. */
2161                         pr_err("%s: Tx Ring full when queue awake\n", __func__);
2162                 }
2163                 return NETDEV_TX_BUSY;
2164         }
2165
2166         if (priv->tx_path_in_lpi_mode)
2167                 stmmac_disable_eee_mode(priv);
2168
2169         entry = priv->cur_tx;
2170         first_entry = entry;
2171
2172         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2173
2174         if (likely(priv->extend_desc))
2175                 desc = (struct dma_desc *)(priv->dma_etx + entry);
2176         else
2177                 desc = priv->dma_tx + entry;
2178
2179         first = desc;
2180
2181         priv->tx_skbuff[first_entry] = skb;
2182
2183         enh_desc = priv->plat->enh_desc;
2184         /* To program the descriptors according to the size of the frame */
2185         if (enh_desc)
2186                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2187
2188         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2189                                          DWMAC_CORE_4_00)) {
2190                 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2191                 if (unlikely(entry < 0))
2192                         goto dma_map_err;
2193         }
2194
2195         for (i = 0; i < nfrags; i++) {
2196                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2197                 int len = skb_frag_size(frag);
2198                 bool last_segment = (i == (nfrags - 1));
2199
2200                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2201
2202                 if (likely(priv->extend_desc))
2203                         desc = (struct dma_desc *)(priv->dma_etx + entry);
2204                 else
2205                         desc = priv->dma_tx + entry;
2206
2207                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2208                                        DMA_TO_DEVICE);
2209                 if (dma_mapping_error(priv->device, des))
2210                         goto dma_map_err; /* should reuse desc w/o issues */
2211
2212                 priv->tx_skbuff[entry] = NULL;
2213
2214                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2215                         desc->des0 = des;
2216                         priv->tx_skbuff_dma[entry].buf = desc->des0;
2217                 } else {
2218                         desc->des2 = des;
2219                         priv->tx_skbuff_dma[entry].buf = desc->des2;
2220                 }
2221
2222                 priv->tx_skbuff_dma[entry].map_as_page = true;
2223                 priv->tx_skbuff_dma[entry].len = len;
2224                 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2225
2226                 /* Prepare the descriptor and set the own bit too */
2227                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2228                                                 priv->mode, 1, last_segment);
2229         }
2230
2231         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2232
2233         priv->cur_tx = entry;
2234
2235         if (netif_msg_pktdata(priv)) {
2236                 void *tx_head;
2237
2238                 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2239                          __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2240                          entry, first, nfrags);
2241
2242                 if (priv->extend_desc)
2243                         tx_head = (void *)priv->dma_etx;
2244                 else
2245                         tx_head = (void *)priv->dma_tx;
2246
2247                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2248
2249                 pr_debug(">>> frame to be transmitted: ");
2250                 print_pkt(skb->data, skb->len);
2251         }
2252
2253         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2254                 if (netif_msg_hw(priv))
2255                         pr_debug("%s: stop transmitted packets\n", __func__);
2256                 netif_stop_queue(dev);
2257         }
2258
2259         dev->stats.tx_bytes += skb->len;
2260
2261         /* According to the coalesce parameter the IC bit for the latest
2262          * segment is reset and the timer re-started to clean the tx status.
2263          * This approach takes care about the fragments: desc is the first
2264          * element in case of no SG.
2265          */
2266         priv->tx_count_frames += nfrags + 1;
2267         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2268                 mod_timer(&priv->txtimer,
2269                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2270         } else {
2271                 priv->tx_count_frames = 0;
2272                 priv->hw->desc->set_tx_ic(desc);
2273                 priv->xstats.tx_set_ic_bit++;
2274         }
2275
2276         if (!priv->hwts_tx_en)
2277                 skb_tx_timestamp(skb);
2278
2279         /* Ready to fill the first descriptor and set the OWN bit w/o any
2280          * problems because all the descriptors are actually ready to be
2281          * passed to the DMA engine.
2282          */
2283         if (likely(!is_jumbo)) {
2284                 bool last_segment = (nfrags == 0);
2285
2286                 des = dma_map_single(priv->device, skb->data,
2287                                      nopaged_len, DMA_TO_DEVICE);
2288                 if (dma_mapping_error(priv->device, des))
2289                         goto dma_map_err;
2290
2291                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2292                         first->des0 = des;
2293                         priv->tx_skbuff_dma[first_entry].buf = first->des0;
2294                 } else {
2295                         first->des2 = des;
2296                         priv->tx_skbuff_dma[first_entry].buf = first->des2;
2297                 }
2298
2299                 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2300                 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2301
2302                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2303                              priv->hwts_tx_en)) {
2304                         /* declare that device is doing timestamping */
2305                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2306                         priv->hw->desc->enable_tx_timestamp(first);
2307                 }
2308
2309                 /* Prepare the first descriptor setting the OWN bit too */
2310                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2311                                                 csum_insertion, priv->mode, 1,
2312                                                 last_segment);
2313
2314                 /* The own bit must be the latest setting done when prepare the
2315                  * descriptor and then barrier is needed to make sure that
2316                  * all is coherent before granting the DMA engine.
2317                  */
2318                 smp_wmb();
2319         }
2320
2321         netdev_sent_queue(dev, skb->len);
2322
2323         if (priv->synopsys_id < DWMAC_CORE_4_00)
2324                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2325         else
2326                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2327                                                STMMAC_CHAN0);
2328
2329         spin_unlock(&priv->tx_lock);
2330         return NETDEV_TX_OK;
2331
2332 dma_map_err:
2333         spin_unlock(&priv->tx_lock);
2334         dev_err(priv->device, "Tx dma map failed\n");
2335         dev_kfree_skb(skb);
2336         priv->dev->stats.tx_dropped++;
2337         return NETDEV_TX_OK;
2338 }
2339
2340 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2341 {
2342         struct ethhdr *ehdr;
2343         u16 vlanid;
2344
2345         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2346             NETIF_F_HW_VLAN_CTAG_RX &&
2347             !__vlan_get_tag(skb, &vlanid)) {
2348                 /* pop the vlan tag */
2349                 ehdr = (struct ethhdr *)skb->data;
2350                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2351                 skb_pull(skb, VLAN_HLEN);
2352                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2353         }
2354 }
2355
2356
2357 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2358 {
2359         if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2360                 return 0;
2361
2362         return 1;
2363 }
2364
2365 /**
2366  * stmmac_rx_refill - refill used skb preallocated buffers
2367  * @priv: driver private structure
2368  * Description : this is to reallocate the skb for the reception process
2369  * that is based on zero-copy.
2370  */
2371 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2372 {
2373         int bfsize = priv->dma_buf_sz;
2374         unsigned int entry = priv->dirty_rx;
2375         int dirty = stmmac_rx_dirty(priv);
2376
2377         while (dirty-- > 0) {
2378                 struct dma_desc *p;
2379
2380                 if (priv->extend_desc)
2381                         p = (struct dma_desc *)(priv->dma_erx + entry);
2382                 else
2383                         p = priv->dma_rx + entry;
2384
2385                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2386                         struct sk_buff *skb;
2387
2388                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2389                         if (unlikely(!skb)) {
2390                                 /* so for a while no zero-copy! */
2391                                 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2392                                 if (unlikely(net_ratelimit()))
2393                                         dev_err(priv->device,
2394                                                 "fail to alloc skb entry %d\n",
2395                                                 entry);
2396                                 break;
2397                         }
2398
2399                         priv->rx_skbuff[entry] = skb;
2400                         priv->rx_skbuff_dma[entry] =
2401                             dma_map_single(priv->device, skb->data, bfsize,
2402                                            DMA_FROM_DEVICE);
2403                         if (dma_mapping_error(priv->device,
2404                                               priv->rx_skbuff_dma[entry])) {
2405                                 dev_err(priv->device, "Rx dma map failed\n");
2406                                 dev_kfree_skb(skb);
2407                                 break;
2408                         }
2409
2410                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2411                                 p->des0 = priv->rx_skbuff_dma[entry];
2412                                 p->des1 = 0;
2413                         } else {
2414                                 p->des2 = priv->rx_skbuff_dma[entry];
2415                         }
2416                         if (priv->hw->mode->refill_desc3)
2417                                 priv->hw->mode->refill_desc3(priv, p);
2418
2419                         if (priv->rx_zeroc_thresh > 0)
2420                                 priv->rx_zeroc_thresh--;
2421
2422                         if (netif_msg_rx_status(priv))
2423                                 pr_debug("\trefill entry #%d\n", entry);
2424                 }
2425                 wmb();
2426
2427                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2428                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2429                 else
2430                         priv->hw->desc->set_rx_owner(p);
2431
2432                 wmb();
2433
2434                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2435         }
2436         priv->dirty_rx = entry;
2437 }
2438
2439 /**
2440  * stmmac_rx - manage the receive process
2441  * @priv: driver private structure
2442  * @limit: napi bugget.
2443  * Description :  this the function called by the napi poll method.
2444  * It gets all the frames inside the ring.
2445  */
2446 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2447 {
2448         unsigned int entry = priv->cur_rx;
2449         unsigned int next_entry;
2450         unsigned int count = 0;
2451         int coe = priv->hw->rx_csum;
2452
2453         if (netif_msg_rx_status(priv)) {
2454                 void *rx_head;
2455
2456                 pr_debug("%s: descriptor ring:\n", __func__);
2457                 if (priv->extend_desc)
2458                         rx_head = (void *)priv->dma_erx;
2459                 else
2460                         rx_head = (void *)priv->dma_rx;
2461
2462                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2463         }
2464         while (count < limit) {
2465                 int status;
2466                 struct dma_desc *p;
2467
2468                 if (priv->extend_desc)
2469                         p = (struct dma_desc *)(priv->dma_erx + entry);
2470                 else
2471                         p = priv->dma_rx + entry;
2472
2473                 /* read the status of the incoming frame */
2474                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2475                                                    &priv->xstats, p);
2476                 /* check if managed by the DMA otherwise go ahead */
2477                 if (unlikely(status & dma_own))
2478                         break;
2479
2480                 count++;
2481
2482                 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2483                 next_entry = priv->cur_rx;
2484
2485                 if (priv->extend_desc)
2486                         prefetch(priv->dma_erx + next_entry);
2487                 else
2488                         prefetch(priv->dma_rx + next_entry);
2489
2490                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2491                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2492                                                            &priv->xstats,
2493                                                            priv->dma_erx +
2494                                                            entry);
2495                 if (unlikely(status == discard_frame)) {
2496                         priv->dev->stats.rx_errors++;
2497                         if (priv->hwts_rx_en && !priv->extend_desc) {
2498                                 /* DESC2 & DESC3 will be overwitten by device
2499                                  * with timestamp value, hence reinitialize
2500                                  * them in stmmac_rx_refill() function so that
2501                                  * device can reuse it.
2502                                  */
2503                                 priv->rx_skbuff[entry] = NULL;
2504                                 dma_unmap_single(priv->device,
2505                                                  priv->rx_skbuff_dma[entry],
2506                                                  priv->dma_buf_sz,
2507                                                  DMA_FROM_DEVICE);
2508                         }
2509                 } else {
2510                         struct sk_buff *skb;
2511                         int frame_len;
2512                         unsigned int des;
2513
2514                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2515                                 des = p->des0;
2516                         else
2517                                 des = p->des2;
2518
2519                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2520
2521                         /*  If frame length is greather than skb buffer size
2522                          *  (preallocated during init) then the packet is
2523                          *  ignored
2524                          */
2525                         if (frame_len > priv->dma_buf_sz) {
2526                                 pr_err("%s: len %d larger than size (%d)\n",
2527                                        priv->dev->name, frame_len,
2528                                        priv->dma_buf_sz);
2529                                 priv->dev->stats.rx_length_errors++;
2530                                 break;
2531                         }
2532
2533                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2534                          * Type frames (LLC/LLC-SNAP)
2535                          */
2536                         if (unlikely(status != llc_snap))
2537                                 frame_len -= ETH_FCS_LEN;
2538
2539                         if (netif_msg_rx_status(priv)) {
2540                                 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2541                                         p, entry, des);
2542                                 if (frame_len > ETH_FRAME_LEN)
2543                                         pr_debug("\tframe size %d, COE: %d\n",
2544                                                  frame_len, status);
2545                         }
2546
2547                         /* The zero-copy is always used for all the sizes
2548                          * in case of GMAC4 because it needs
2549                          * to refill the used descriptors, always.
2550                          */
2551                         if (unlikely(!priv->plat->has_gmac4 &&
2552                                      ((frame_len < priv->rx_copybreak) ||
2553                                      stmmac_rx_threshold_count(priv)))) {
2554                                 skb = netdev_alloc_skb_ip_align(priv->dev,
2555                                                                 frame_len);
2556                                 if (unlikely(!skb)) {
2557                                         if (net_ratelimit())
2558                                                 dev_warn(priv->device,
2559                                                          "packet dropped\n");
2560                                         priv->dev->stats.rx_dropped++;
2561                                         break;
2562                                 }
2563
2564                                 dma_sync_single_for_cpu(priv->device,
2565                                                         priv->rx_skbuff_dma
2566                                                         [entry], frame_len,
2567                                                         DMA_FROM_DEVICE);
2568                                 skb_copy_to_linear_data(skb,
2569                                                         priv->
2570                                                         rx_skbuff[entry]->data,
2571                                                         frame_len);
2572
2573                                 skb_put(skb, frame_len);
2574                                 dma_sync_single_for_device(priv->device,
2575                                                            priv->rx_skbuff_dma
2576                                                            [entry], frame_len,
2577                                                            DMA_FROM_DEVICE);
2578                         } else {
2579                                 skb = priv->rx_skbuff[entry];
2580                                 if (unlikely(!skb)) {
2581                                         pr_err("%s: Inconsistent Rx chain\n",
2582                                                priv->dev->name);
2583                                         priv->dev->stats.rx_dropped++;
2584                                         break;
2585                                 }
2586                                 prefetch(skb->data - NET_IP_ALIGN);
2587                                 priv->rx_skbuff[entry] = NULL;
2588                                 priv->rx_zeroc_thresh++;
2589
2590                                 skb_put(skb, frame_len);
2591                                 dma_unmap_single(priv->device,
2592                                                  priv->rx_skbuff_dma[entry],
2593                                                  priv->dma_buf_sz,
2594                                                  DMA_FROM_DEVICE);
2595                         }
2596
2597                         stmmac_get_rx_hwtstamp(priv, entry, skb);
2598
2599                         if (netif_msg_pktdata(priv)) {
2600                                 pr_debug("frame received (%dbytes)", frame_len);
2601                                 print_pkt(skb->data, frame_len);
2602                         }
2603
2604                         stmmac_rx_vlan(priv->dev, skb);
2605
2606                         skb->protocol = eth_type_trans(skb, priv->dev);
2607
2608                         if (unlikely(!coe))
2609                                 skb_checksum_none_assert(skb);
2610                         else
2611                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2612
2613                         napi_gro_receive(&priv->napi, skb);
2614
2615                         priv->dev->stats.rx_packets++;
2616                         priv->dev->stats.rx_bytes += frame_len;
2617                 }
2618                 entry = next_entry;
2619         }
2620
2621         stmmac_rx_refill(priv);
2622
2623         priv->xstats.rx_pkt_n += count;
2624
2625         return count;
2626 }
2627
2628 /**
2629  *  stmmac_poll - stmmac poll method (NAPI)
2630  *  @napi : pointer to the napi structure.
2631  *  @budget : maximum number of packets that the current CPU can receive from
2632  *            all interfaces.
2633  *  Description :
2634  *  To look at the incoming frames and clear the tx resources.
2635  */
2636 static int stmmac_poll(struct napi_struct *napi, int budget)
2637 {
2638         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2639         int work_done = 0;
2640
2641         priv->xstats.napi_poll++;
2642         stmmac_tx_clean(priv);
2643
2644         work_done = stmmac_rx(priv, budget);
2645         if (work_done < budget) {
2646                 napi_complete(napi);
2647                 stmmac_enable_dma_irq(priv);
2648         }
2649         return work_done;
2650 }
2651
2652 /**
2653  *  stmmac_tx_timeout
2654  *  @dev : Pointer to net device structure
2655  *  Description: this function is called when a packet transmission fails to
2656  *   complete within a reasonable time. The driver will mark the error in the
2657  *   netdev structure and arrange for the device to be reset to a sane state
2658  *   in order to transmit a new packet.
2659  */
2660 static void stmmac_tx_timeout(struct net_device *dev)
2661 {
2662         struct stmmac_priv *priv = netdev_priv(dev);
2663
2664         /* Clear Tx resources and restart transmitting again */
2665         stmmac_tx_err(priv);
2666 }
2667
2668 /**
2669  *  stmmac_set_rx_mode - entry point for multicast addressing
2670  *  @dev : pointer to the device structure
2671  *  Description:
2672  *  This function is a driver entry point which gets called by the kernel
2673  *  whenever multicast addresses must be enabled/disabled.
2674  *  Return value:
2675  *  void.
2676  */
2677 static void stmmac_set_rx_mode(struct net_device *dev)
2678 {
2679         struct stmmac_priv *priv = netdev_priv(dev);
2680
2681         priv->hw->mac->set_filter(priv->hw, dev);
2682 }
2683
2684 /**
2685  *  stmmac_change_mtu - entry point to change MTU size for the device.
2686  *  @dev : device pointer.
2687  *  @new_mtu : the new MTU size for the device.
2688  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2689  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2690  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2691  *  Return value:
2692  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2693  *  file on failure.
2694  */
2695 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2696 {
2697         struct stmmac_priv *priv = netdev_priv(dev);
2698         int max_mtu;
2699
2700         if (netif_running(dev)) {
2701                 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2702                 return -EBUSY;
2703         }
2704
2705         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
2706                 max_mtu = JUMBO_LEN;
2707         else
2708                 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
2709
2710         if (priv->plat->maxmtu < max_mtu)
2711                 max_mtu = priv->plat->maxmtu;
2712
2713         if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2714                 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2715                 return -EINVAL;
2716         }
2717
2718         dev->mtu = new_mtu;
2719
2720         netdev_update_features(dev);
2721
2722         return 0;
2723 }
2724
2725 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2726                                              netdev_features_t features)
2727 {
2728         struct stmmac_priv *priv = netdev_priv(dev);
2729
2730         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2731                 features &= ~NETIF_F_RXCSUM;
2732
2733         if (!priv->plat->tx_coe)
2734                 features &= ~NETIF_F_CSUM_MASK;
2735
2736         /* Some GMAC devices have a bugged Jumbo frame support that
2737          * needs to have the Tx COE disabled for oversized frames
2738          * (due to limited buffer sizes). In this case we disable
2739          * the TX csum insertionin the TDES and not use SF.
2740          */
2741         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2742                 features &= ~NETIF_F_CSUM_MASK;
2743
2744         /* Disable tso if asked by ethtool */
2745         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2746                 if (features & NETIF_F_TSO)
2747                         priv->tso = true;
2748                 else
2749                         priv->tso = false;
2750         }
2751
2752         return features;
2753 }
2754
2755 static int stmmac_set_features(struct net_device *netdev,
2756                                netdev_features_t features)
2757 {
2758         struct stmmac_priv *priv = netdev_priv(netdev);
2759
2760         /* Keep the COE Type in case of csum is supporting */
2761         if (features & NETIF_F_RXCSUM)
2762                 priv->hw->rx_csum = priv->plat->rx_coe;
2763         else
2764                 priv->hw->rx_csum = 0;
2765         /* No check needed because rx_coe has been set before and it will be
2766          * fixed in case of issue.
2767          */
2768         priv->hw->mac->rx_ipc(priv->hw);
2769
2770         return 0;
2771 }
2772
2773 /**
2774  *  stmmac_interrupt - main ISR
2775  *  @irq: interrupt number.
2776  *  @dev_id: to pass the net device pointer.
2777  *  Description: this is the main driver interrupt service routine.
2778  *  It can call:
2779  *  o DMA service routine (to manage incoming frame reception and transmission
2780  *    status)
2781  *  o Core interrupts to manage: remote wake-up, management counter, LPI
2782  *    interrupts.
2783  */
2784 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2785 {
2786         struct net_device *dev = (struct net_device *)dev_id;
2787         struct stmmac_priv *priv = netdev_priv(dev);
2788
2789         if (priv->irq_wake)
2790                 pm_wakeup_event(priv->device, 0);
2791
2792         if (unlikely(!dev)) {
2793                 pr_err("%s: invalid dev pointer\n", __func__);
2794                 return IRQ_NONE;
2795         }
2796
2797         /* To handle GMAC own interrupts */
2798         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2799                 int status = priv->hw->mac->host_irq_status(priv->hw,
2800                                                             &priv->xstats);
2801                 if (unlikely(status)) {
2802                         /* For LPI we need to save the tx status */
2803                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2804                                 priv->tx_path_in_lpi_mode = true;
2805                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2806                                 priv->tx_path_in_lpi_mode = false;
2807                         if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2808                                 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2809                                                         priv->rx_tail_addr,
2810                                                         STMMAC_CHAN0);
2811                 }
2812         }
2813
2814         /* To handle DMA interrupts */
2815         stmmac_dma_interrupt(priv);
2816
2817         return IRQ_HANDLED;
2818 }
2819
2820 #ifdef CONFIG_NET_POLL_CONTROLLER
2821 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2822  * to allow network I/O with interrupts disabled.
2823  */
2824 static void stmmac_poll_controller(struct net_device *dev)
2825 {
2826         disable_irq(dev->irq);
2827         stmmac_interrupt(dev->irq, dev);
2828         enable_irq(dev->irq);
2829 }
2830 #endif
2831
2832 /**
2833  *  stmmac_ioctl - Entry point for the Ioctl
2834  *  @dev: Device pointer.
2835  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2836  *  a proprietary structure used to pass information to the driver.
2837  *  @cmd: IOCTL command
2838  *  Description:
2839  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2840  */
2841 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2842 {
2843         struct stmmac_priv *priv = netdev_priv(dev);
2844         int ret = -EOPNOTSUPP;
2845
2846         if (!netif_running(dev))
2847                 return -EINVAL;
2848
2849         switch (cmd) {
2850         case SIOCGMIIPHY:
2851         case SIOCGMIIREG:
2852         case SIOCSMIIREG:
2853                 if (!priv->phydev)
2854                         return -EINVAL;
2855                 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2856                 break;
2857         case SIOCSHWTSTAMP:
2858                 ret = stmmac_hwtstamp_ioctl(dev, rq);
2859                 break;
2860         default:
2861                 break;
2862         }
2863
2864         return ret;
2865 }
2866
2867 #ifdef CONFIG_DEBUG_FS
2868 static struct dentry *stmmac_fs_dir;
2869
2870 static void sysfs_display_ring(void *head, int size, int extend_desc,
2871                                struct seq_file *seq)
2872 {
2873         int i;
2874         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2875         struct dma_desc *p = (struct dma_desc *)head;
2876
2877         for (i = 0; i < size; i++) {
2878                 u64 x;
2879                 if (extend_desc) {
2880                         x = *(u64 *) ep;
2881                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2882                                    i, (unsigned int)virt_to_phys(ep),
2883                                    ep->basic.des0, ep->basic.des1,
2884                                    ep->basic.des2, ep->basic.des3);
2885                         ep++;
2886                 } else {
2887                         x = *(u64 *) p;
2888                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2889                                    i, (unsigned int)virt_to_phys(ep),
2890                                    p->des0, p->des1, p->des2, p->des3);
2891                         p++;
2892                 }
2893                 seq_printf(seq, "\n");
2894         }
2895 }
2896
2897 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2898 {
2899         struct net_device *dev = seq->private;
2900         struct stmmac_priv *priv = netdev_priv(dev);
2901
2902         if (priv->extend_desc) {
2903                 seq_printf(seq, "Extended RX descriptor ring:\n");
2904                 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2905                 seq_printf(seq, "Extended TX descriptor ring:\n");
2906                 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2907         } else {
2908                 seq_printf(seq, "RX descriptor ring:\n");
2909                 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2910                 seq_printf(seq, "TX descriptor ring:\n");
2911                 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2912         }
2913
2914         return 0;
2915 }
2916
2917 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2918 {
2919         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2920 }
2921
2922 static const struct file_operations stmmac_rings_status_fops = {
2923         .owner = THIS_MODULE,
2924         .open = stmmac_sysfs_ring_open,
2925         .read = seq_read,
2926         .llseek = seq_lseek,
2927         .release = single_release,
2928 };
2929
2930 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2931 {
2932         struct net_device *dev = seq->private;
2933         struct stmmac_priv *priv = netdev_priv(dev);
2934
2935         if (!priv->hw_cap_support) {
2936                 seq_printf(seq, "DMA HW features not supported\n");
2937                 return 0;
2938         }
2939
2940         seq_printf(seq, "==============================\n");
2941         seq_printf(seq, "\tDMA HW features\n");
2942         seq_printf(seq, "==============================\n");
2943
2944         seq_printf(seq, "\t10/100 Mbps %s\n",
2945                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2946         seq_printf(seq, "\t1000 Mbps %s\n",
2947                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
2948         seq_printf(seq, "\tHalf duple %s\n",
2949                    (priv->dma_cap.half_duplex) ? "Y" : "N");
2950         seq_printf(seq, "\tHash Filter: %s\n",
2951                    (priv->dma_cap.hash_filter) ? "Y" : "N");
2952         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2953                    (priv->dma_cap.multi_addr) ? "Y" : "N");
2954         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2955                    (priv->dma_cap.pcs) ? "Y" : "N");
2956         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2957                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
2958         seq_printf(seq, "\tPMT Remote wake up: %s\n",
2959                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2960         seq_printf(seq, "\tPMT Magic Frame: %s\n",
2961                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2962         seq_printf(seq, "\tRMON module: %s\n",
2963                    (priv->dma_cap.rmon) ? "Y" : "N");
2964         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2965                    (priv->dma_cap.time_stamp) ? "Y" : "N");
2966         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2967                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
2968         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2969                    (priv->dma_cap.eee) ? "Y" : "N");
2970         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2971         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2972                    (priv->dma_cap.tx_coe) ? "Y" : "N");
2973         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2974                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
2975                            (priv->dma_cap.rx_coe) ? "Y" : "N");
2976         } else {
2977                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2978                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2979                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2980                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2981         }
2982         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2983                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2984         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2985                    priv->dma_cap.number_rx_channel);
2986         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2987                    priv->dma_cap.number_tx_channel);
2988         seq_printf(seq, "\tEnhanced descriptors: %s\n",
2989                    (priv->dma_cap.enh_desc) ? "Y" : "N");
2990
2991         return 0;
2992 }
2993
2994 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2995 {
2996         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2997 }
2998
2999 static const struct file_operations stmmac_dma_cap_fops = {
3000         .owner = THIS_MODULE,
3001         .open = stmmac_sysfs_dma_cap_open,
3002         .read = seq_read,
3003         .llseek = seq_lseek,
3004         .release = single_release,
3005 };
3006
3007 static int stmmac_init_fs(struct net_device *dev)
3008 {
3009         struct stmmac_priv *priv = netdev_priv(dev);
3010
3011         /* Create per netdev entries */
3012         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3013
3014         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3015                 pr_err("ERROR %s/%s, debugfs create directory failed\n",
3016                        STMMAC_RESOURCE_NAME, dev->name);
3017
3018                 return -ENOMEM;
3019         }
3020
3021         /* Entry to report DMA RX/TX rings */
3022         priv->dbgfs_rings_status =
3023                 debugfs_create_file("descriptors_status", S_IRUGO,
3024                                     priv->dbgfs_dir, dev,
3025                                     &stmmac_rings_status_fops);
3026
3027         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3028                 pr_info("ERROR creating stmmac ring debugfs file\n");
3029                 debugfs_remove_recursive(priv->dbgfs_dir);
3030
3031                 return -ENOMEM;
3032         }
3033
3034         /* Entry to report the DMA HW features */
3035         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3036                                             priv->dbgfs_dir,
3037                                             dev, &stmmac_dma_cap_fops);
3038
3039         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3040                 pr_info("ERROR creating stmmac MMC debugfs file\n");
3041                 debugfs_remove_recursive(priv->dbgfs_dir);
3042
3043                 return -ENOMEM;
3044         }
3045
3046         return 0;
3047 }
3048
3049 static void stmmac_exit_fs(struct net_device *dev)
3050 {
3051         struct stmmac_priv *priv = netdev_priv(dev);
3052
3053         debugfs_remove_recursive(priv->dbgfs_dir);
3054 }
3055 #endif /* CONFIG_DEBUG_FS */
3056
3057 static const struct net_device_ops stmmac_netdev_ops = {
3058         .ndo_open = stmmac_open,
3059         .ndo_start_xmit = stmmac_xmit,
3060         .ndo_stop = stmmac_release,
3061         .ndo_change_mtu = stmmac_change_mtu,
3062         .ndo_fix_features = stmmac_fix_features,
3063         .ndo_set_features = stmmac_set_features,
3064         .ndo_set_rx_mode = stmmac_set_rx_mode,
3065         .ndo_tx_timeout = stmmac_tx_timeout,
3066         .ndo_do_ioctl = stmmac_ioctl,
3067 #ifdef CONFIG_NET_POLL_CONTROLLER
3068         .ndo_poll_controller = stmmac_poll_controller,
3069 #endif
3070         .ndo_set_mac_address = eth_mac_addr,
3071 };
3072
3073 /**
3074  *  stmmac_hw_init - Init the MAC device
3075  *  @priv: driver private structure
3076  *  Description: this function is to configure the MAC device according to
3077  *  some platform parameters or the HW capability register. It prepares the
3078  *  driver to use either ring or chain modes and to setup either enhanced or
3079  *  normal descriptors.
3080  */
3081 static int stmmac_hw_init(struct stmmac_priv *priv)
3082 {
3083         struct mac_device_info *mac;
3084
3085         /* Identify the MAC HW device */
3086         if (priv->plat->has_gmac) {
3087                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3088                 mac = dwmac1000_setup(priv->ioaddr,
3089                                       priv->plat->multicast_filter_bins,
3090                                       priv->plat->unicast_filter_entries,
3091                                       &priv->synopsys_id);
3092         } else if (priv->plat->has_gmac4) {
3093                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3094                 mac = dwmac4_setup(priv->ioaddr,
3095                                    priv->plat->multicast_filter_bins,
3096                                    priv->plat->unicast_filter_entries,
3097                                    &priv->synopsys_id);
3098         } else {
3099                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3100         }
3101         if (!mac)
3102                 return -ENOMEM;
3103
3104         priv->hw = mac;
3105
3106         /* To use the chained or ring mode */
3107         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3108                 priv->hw->mode = &dwmac4_ring_mode_ops;
3109         } else {
3110                 if (chain_mode) {
3111                         priv->hw->mode = &chain_mode_ops;
3112                         pr_info(" Chain mode enabled\n");
3113                         priv->mode = STMMAC_CHAIN_MODE;
3114                 } else {
3115                         priv->hw->mode = &ring_mode_ops;
3116                         pr_info(" Ring mode enabled\n");
3117                         priv->mode = STMMAC_RING_MODE;
3118                 }
3119         }
3120
3121         /* Get the HW capability (new GMAC newer than 3.50a) */
3122         priv->hw_cap_support = stmmac_get_hw_features(priv);
3123         if (priv->hw_cap_support) {
3124                 pr_info(" DMA HW capability register supported");
3125
3126                 /* We can override some gmac/dma configuration fields: e.g.
3127                  * enh_desc, tx_coe (e.g. that are passed through the
3128                  * platform) with the values from the HW capability
3129                  * register (if supported).
3130                  */
3131                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3132                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3133
3134                 /* TXCOE doesn't work in thresh DMA mode */
3135                 if (priv->plat->force_thresh_dma_mode)
3136                         priv->plat->tx_coe = 0;
3137                 else
3138                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3139
3140                 /* In case of GMAC4 rx_coe is from HW cap register. */
3141                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3142
3143                 if (priv->dma_cap.rx_coe_type2)
3144                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3145                 else if (priv->dma_cap.rx_coe_type1)
3146                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3147
3148         } else
3149                 pr_info(" No HW DMA feature register supported");
3150
3151         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3152         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3153                 priv->hw->desc = &dwmac4_desc_ops;
3154         else
3155                 stmmac_selec_desc_mode(priv);
3156
3157         if (priv->plat->rx_coe) {
3158                 priv->hw->rx_csum = priv->plat->rx_coe;
3159                 pr_info(" RX Checksum Offload Engine supported\n");
3160                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3161                         pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
3162         }
3163         if (priv->plat->tx_coe)
3164                 pr_info(" TX Checksum insertion supported\n");
3165
3166         if (priv->plat->pmt) {
3167                 pr_info(" Wake-Up On Lan supported\n");
3168                 device_set_wakeup_capable(priv->device, 1);
3169         }
3170
3171         if (priv->dma_cap.tsoen)
3172                 pr_info(" TSO supported\n");
3173
3174         return 0;
3175 }
3176
3177 /**
3178  * stmmac_dvr_probe
3179  * @device: device pointer
3180  * @plat_dat: platform data pointer
3181  * @res: stmmac resource pointer
3182  * Description: this is the main probe function used to
3183  * call the alloc_etherdev, allocate the priv structure.
3184  * Return:
3185  * returns 0 on success, otherwise errno.
3186  */
3187 int stmmac_dvr_probe(struct device *device,
3188                      struct plat_stmmacenet_data *plat_dat,
3189                      struct stmmac_resources *res)
3190 {
3191         int ret = 0;
3192         struct net_device *ndev = NULL;
3193         struct stmmac_priv *priv;
3194
3195         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3196         if (!ndev)
3197                 return -ENOMEM;
3198
3199         SET_NETDEV_DEV(ndev, device);
3200
3201         priv = netdev_priv(ndev);
3202         priv->device = device;
3203         priv->dev = ndev;
3204
3205         stmmac_set_ethtool_ops(ndev);
3206         priv->pause = pause;
3207         priv->plat = plat_dat;
3208         priv->ioaddr = res->addr;
3209         priv->dev->base_addr = (unsigned long)res->addr;
3210
3211         priv->dev->irq = res->irq;
3212         priv->wol_irq = res->wol_irq;
3213         priv->lpi_irq = res->lpi_irq;
3214
3215         if (res->mac)
3216                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3217
3218         dev_set_drvdata(device, priv->dev);
3219
3220         /* Verify driver arguments */
3221         stmmac_verify_args();
3222
3223         /* Override with kernel parameters if supplied XXX CRS XXX
3224          * this needs to have multiple instances
3225          */
3226         if ((phyaddr >= 0) && (phyaddr <= 31))
3227                 priv->plat->phy_addr = phyaddr;
3228
3229         priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
3230         if (IS_ERR(priv->stmmac_clk)) {
3231                 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
3232                          __func__);
3233                 /* If failed to obtain stmmac_clk and specific clk_csr value
3234                  * is NOT passed from the platform, probe fail.
3235                  */
3236                 if (!priv->plat->clk_csr) {
3237                         ret = PTR_ERR(priv->stmmac_clk);
3238                         goto error_clk_get;
3239                 } else {
3240                         priv->stmmac_clk = NULL;
3241                 }
3242         }
3243         clk_prepare_enable(priv->stmmac_clk);
3244
3245         priv->pclk = devm_clk_get(priv->device, "pclk");
3246         if (IS_ERR(priv->pclk)) {
3247                 if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
3248                         ret = -EPROBE_DEFER;
3249                         goto error_pclk_get;
3250                 }
3251                 priv->pclk = NULL;
3252         }
3253         clk_prepare_enable(priv->pclk);
3254
3255         priv->stmmac_rst = devm_reset_control_get(priv->device,
3256                                                   STMMAC_RESOURCE_NAME);
3257         if (IS_ERR(priv->stmmac_rst)) {
3258                 if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
3259                         ret = -EPROBE_DEFER;
3260                         goto error_hw_init;
3261                 }
3262                 dev_info(priv->device, "no reset control found\n");
3263                 priv->stmmac_rst = NULL;
3264         }
3265         if (priv->stmmac_rst)
3266                 reset_control_deassert(priv->stmmac_rst);
3267
3268         /* Init MAC and get the capabilities */
3269         ret = stmmac_hw_init(priv);
3270         if (ret)
3271                 goto error_hw_init;
3272
3273         ndev->netdev_ops = &stmmac_netdev_ops;
3274
3275         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3276                             NETIF_F_RXCSUM;
3277
3278         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3279                 ndev->hw_features |= NETIF_F_TSO;
3280                 priv->tso = true;
3281                 pr_info(" TSO feature enabled\n");
3282         }
3283         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3284         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3285 #ifdef STMMAC_VLAN_TAG_USED
3286         /* Both mac100 and gmac support receive VLAN tag detection */
3287         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3288 #endif
3289         priv->msg_enable = netif_msg_init(debug, default_msg_level);
3290
3291         if (flow_ctrl)
3292                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
3293
3294         /* Rx Watchdog is available in the COREs newer than the 3.40.
3295          * In some case, for example on bugged HW this feature
3296          * has to be disable and this can be done by passing the
3297          * riwt_off field from the platform.
3298          */
3299         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3300                 priv->use_riwt = 1;
3301                 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
3302         }
3303
3304         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3305
3306         spin_lock_init(&priv->lock);
3307         spin_lock_init(&priv->tx_lock);
3308
3309         ret = register_netdev(ndev);
3310         if (ret) {
3311                 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
3312                 goto error_netdev_register;
3313         }
3314
3315         /* If a specific clk_csr value is passed from the platform
3316          * this means that the CSR Clock Range selection cannot be
3317          * changed at run-time and it is fixed. Viceversa the driver'll try to
3318          * set the MDC clock dynamically according to the csr actual
3319          * clock input.
3320          */
3321         if (!priv->plat->clk_csr)
3322                 stmmac_clk_csr_set(priv);
3323         else
3324                 priv->clk_csr = priv->plat->clk_csr;
3325
3326         stmmac_check_pcs_mode(priv);
3327
3328         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
3329             priv->pcs != STMMAC_PCS_RTBI) {
3330                 /* MDIO bus Registration */
3331                 ret = stmmac_mdio_register(ndev);
3332                 if (ret < 0) {
3333                         pr_debug("%s: MDIO bus (id: %d) registration failed",
3334                                  __func__, priv->plat->bus_id);
3335                         goto error_mdio_register;
3336                 }
3337         }
3338
3339         return 0;
3340
3341 error_mdio_register:
3342         unregister_netdev(ndev);
3343 error_netdev_register:
3344         netif_napi_del(&priv->napi);
3345 error_hw_init:
3346         clk_disable_unprepare(priv->pclk);
3347 error_pclk_get:
3348         clk_disable_unprepare(priv->stmmac_clk);
3349 error_clk_get:
3350         free_netdev(ndev);
3351
3352         return ret;
3353 }
3354 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3355
3356 /**
3357  * stmmac_dvr_remove
3358  * @dev: device pointer
3359  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3360  * changes the link status, releases the DMA descriptor rings.
3361  */
3362 int stmmac_dvr_remove(struct device *dev)
3363 {
3364         struct net_device *ndev = dev_get_drvdata(dev);
3365         struct stmmac_priv *priv = netdev_priv(ndev);
3366
3367         pr_info("%s:\n\tremoving driver", __func__);
3368
3369         priv->hw->dma->stop_rx(priv->ioaddr);
3370         priv->hw->dma->stop_tx(priv->ioaddr);
3371
3372         stmmac_set_mac(priv->ioaddr, false);
3373         netif_carrier_off(ndev);
3374         unregister_netdev(ndev);
3375         if (priv->stmmac_rst)
3376                 reset_control_assert(priv->stmmac_rst);
3377         clk_disable_unprepare(priv->pclk);
3378         clk_disable_unprepare(priv->stmmac_clk);
3379         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
3380             priv->pcs != STMMAC_PCS_RTBI)
3381                 stmmac_mdio_unregister(ndev);
3382         free_netdev(ndev);
3383
3384         return 0;
3385 }
3386 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3387
3388 /**
3389  * stmmac_suspend - suspend callback
3390  * @dev: device pointer
3391  * Description: this is the function to suspend the device and it is called
3392  * by the platform driver to stop the network queue, release the resources,
3393  * program the PMT register (for WoL), clean and release driver resources.
3394  */
3395 int stmmac_suspend(struct device *dev)
3396 {
3397         struct net_device *ndev = dev_get_drvdata(dev);
3398         struct stmmac_priv *priv = netdev_priv(ndev);
3399         unsigned long flags;
3400
3401         if (!ndev || !netif_running(ndev))
3402                 return 0;
3403
3404         if (priv->phydev)
3405                 phy_stop(priv->phydev);
3406
3407         spin_lock_irqsave(&priv->lock, flags);
3408
3409         netif_device_detach(ndev);
3410         netif_stop_queue(ndev);
3411
3412         napi_disable(&priv->napi);
3413
3414         /* Stop TX/RX DMA */
3415         priv->hw->dma->stop_tx(priv->ioaddr);
3416         priv->hw->dma->stop_rx(priv->ioaddr);
3417
3418         /* Enable Power down mode by programming the PMT regs */
3419         if (device_may_wakeup(priv->device)) {
3420                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3421                 priv->irq_wake = 1;
3422         } else {
3423                 stmmac_set_mac(priv->ioaddr, false);
3424                 pinctrl_pm_select_sleep_state(priv->device);
3425                 /* Disable clock in case of PWM is off */
3426                 clk_disable(priv->pclk);
3427                 clk_disable(priv->stmmac_clk);
3428         }
3429         spin_unlock_irqrestore(&priv->lock, flags);
3430
3431         priv->oldlink = 0;
3432         priv->speed = 0;
3433         priv->oldduplex = -1;
3434         return 0;
3435 }
3436 EXPORT_SYMBOL_GPL(stmmac_suspend);
3437
3438 /**
3439  * stmmac_resume - resume callback
3440  * @dev: device pointer
3441  * Description: when resume this function is invoked to setup the DMA and CORE
3442  * in a usable state.
3443  */
3444 int stmmac_resume(struct device *dev)
3445 {
3446         struct net_device *ndev = dev_get_drvdata(dev);
3447         struct stmmac_priv *priv = netdev_priv(ndev);
3448         unsigned long flags;
3449
3450         if (!netif_running(ndev))
3451                 return 0;
3452
3453         /* Power Down bit, into the PM register, is cleared
3454          * automatically as soon as a magic packet or a Wake-up frame
3455          * is received. Anyway, it's better to manually clear
3456          * this bit because it can generate problems while resuming
3457          * from another devices (e.g. serial console).
3458          */
3459         if (device_may_wakeup(priv->device)) {
3460                 spin_lock_irqsave(&priv->lock, flags);
3461                 priv->hw->mac->pmt(priv->hw, 0);
3462                 spin_unlock_irqrestore(&priv->lock, flags);
3463                 priv->irq_wake = 0;
3464         } else {
3465                 pinctrl_pm_select_default_state(priv->device);
3466                 /* enable the clk prevously disabled */
3467                 clk_enable(priv->stmmac_clk);
3468                 clk_enable(priv->pclk);
3469                 /* reset the phy so that it's ready */
3470                 if (priv->mii)
3471                         stmmac_mdio_reset(priv->mii);
3472         }
3473
3474         netif_device_attach(ndev);
3475
3476         spin_lock_irqsave(&priv->lock, flags);
3477
3478         priv->cur_rx = 0;
3479         priv->dirty_rx = 0;
3480         priv->dirty_tx = 0;
3481         priv->cur_tx = 0;
3482         /* reset private mss value to force mss context settings at
3483          * next tso xmit (only used for gmac4).
3484          */
3485         priv->mss = 0;
3486
3487         stmmac_clear_descriptors(priv);
3488
3489         stmmac_hw_setup(ndev, false);
3490         stmmac_init_tx_coalesce(priv);
3491         stmmac_set_rx_mode(ndev);
3492
3493         napi_enable(&priv->napi);
3494
3495         netif_start_queue(ndev);
3496
3497         spin_unlock_irqrestore(&priv->lock, flags);
3498
3499         if (priv->phydev)
3500                 phy_start(priv->phydev);
3501
3502         return 0;
3503 }
3504 EXPORT_SYMBOL_GPL(stmmac_resume);
3505
3506 #ifndef MODULE
3507 static int __init stmmac_cmdline_opt(char *str)
3508 {
3509         char *opt;
3510
3511         if (!str || !*str)
3512                 return -EINVAL;
3513         while ((opt = strsep(&str, ",")) != NULL) {
3514                 if (!strncmp(opt, "debug:", 6)) {
3515                         if (kstrtoint(opt + 6, 0, &debug))
3516                                 goto err;
3517                 } else if (!strncmp(opt, "phyaddr:", 8)) {
3518                         if (kstrtoint(opt + 8, 0, &phyaddr))
3519                                 goto err;
3520                 } else if (!strncmp(opt, "buf_sz:", 7)) {
3521                         if (kstrtoint(opt + 7, 0, &buf_sz))
3522                                 goto err;
3523                 } else if (!strncmp(opt, "tc:", 3)) {
3524                         if (kstrtoint(opt + 3, 0, &tc))
3525                                 goto err;
3526                 } else if (!strncmp(opt, "watchdog:", 9)) {
3527                         if (kstrtoint(opt + 9, 0, &watchdog))
3528                                 goto err;
3529                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3530                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3531                                 goto err;
3532                 } else if (!strncmp(opt, "pause:", 6)) {
3533                         if (kstrtoint(opt + 6, 0, &pause))
3534                                 goto err;
3535                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3536                         if (kstrtoint(opt + 10, 0, &eee_timer))
3537                                 goto err;
3538                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3539                         if (kstrtoint(opt + 11, 0, &chain_mode))
3540                                 goto err;
3541                 }
3542         }
3543         return 0;
3544
3545 err:
3546         pr_err("%s: ERROR broken module parameter conversion", __func__);
3547         return -EINVAL;
3548 }
3549
3550 __setup("stmmaceth=", stmmac_cmdline_opt);
3551 #endif /* MODULE */
3552
3553 static int __init stmmac_init(void)
3554 {
3555 #ifdef CONFIG_DEBUG_FS
3556         /* Create debugfs main directory if it doesn't exist yet */
3557         if (!stmmac_fs_dir) {
3558                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3559
3560                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3561                         pr_err("ERROR %s, debugfs create directory failed\n",
3562                                STMMAC_RESOURCE_NAME);
3563
3564                         return -ENOMEM;
3565                 }
3566         }
3567 #endif
3568
3569         return 0;
3570 }
3571
3572 static void __exit stmmac_exit(void)
3573 {
3574 #ifdef CONFIG_DEBUG_FS
3575         debugfs_remove_recursive(stmmac_fs_dir);
3576 #endif
3577 }
3578
3579 module_init(stmmac_init)
3580 module_exit(stmmac_exit)
3581
3582 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3583 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3584 MODULE_LICENSE("GPL");